3 #include "refs-internal.h"
5 #include "../iterator.h"
6 #include "../dir-iterator.h"
7 #include "../lockfile.h"
14 struct object_id old_oid
;
18 * Return true if refname, which has the specified oid and flags, can
19 * be resolved to an object in the database. If the referred-to object
20 * does not exist, emit a warning and return false.
22 static int ref_resolves_to_object(const char *refname
,
23 const struct object_id
*oid
,
26 if (flags
& REF_ISBROKEN
)
28 if (!has_sha1_file(oid
->hash
)) {
29 error("%s does not point to a valid object!", refname
);
35 struct packed_ref_cache
{
36 struct ref_cache
*cache
;
39 * Count of references to the data structure in this instance,
40 * including the pointer from files_ref_store::packed if any.
41 * The data will not be freed as long as the reference count
44 unsigned int referrers
;
46 /* The metadata from when this packed-refs cache was read */
47 struct stat_validity validity
;
51 * Future: need to be in "struct repository"
52 * when doing a full libification.
54 struct files_ref_store
{
55 struct ref_store base
;
56 unsigned int store_flags
;
60 char *packed_refs_path
;
62 struct ref_cache
*loose
;
63 struct packed_ref_cache
*packed
;
66 * Lock used for the "packed-refs" file. Note that this (and
67 * thus the enclosing `files_ref_store`) must not be freed.
69 struct lock_file packed_refs_lock
;
73 * Increment the reference count of *packed_refs.
75 static void acquire_packed_ref_cache(struct packed_ref_cache
*packed_refs
)
77 packed_refs
->referrers
++;
81 * Decrease the reference count of *packed_refs. If it goes to zero,
82 * free *packed_refs and return true; otherwise return false.
84 static int release_packed_ref_cache(struct packed_ref_cache
*packed_refs
)
86 if (!--packed_refs
->referrers
) {
87 free_ref_cache(packed_refs
->cache
);
88 stat_validity_clear(&packed_refs
->validity
);
96 static void clear_packed_ref_cache(struct files_ref_store
*refs
)
99 struct packed_ref_cache
*packed_refs
= refs
->packed
;
101 if (is_lock_file_locked(&refs
->packed_refs_lock
))
102 die("BUG: packed-ref cache cleared while locked");
104 release_packed_ref_cache(packed_refs
);
108 static void clear_loose_ref_cache(struct files_ref_store
*refs
)
111 free_ref_cache(refs
->loose
);
117 * Create a new submodule ref cache and add it to the internal
120 static struct ref_store
*files_ref_store_create(const char *gitdir
,
123 struct files_ref_store
*refs
= xcalloc(1, sizeof(*refs
));
124 struct ref_store
*ref_store
= (struct ref_store
*)refs
;
125 struct strbuf sb
= STRBUF_INIT
;
127 base_ref_store_init(ref_store
, &refs_be_files
);
128 refs
->store_flags
= flags
;
130 refs
->gitdir
= xstrdup(gitdir
);
131 get_common_dir_noenv(&sb
, gitdir
);
132 refs
->gitcommondir
= strbuf_detach(&sb
, NULL
);
133 strbuf_addf(&sb
, "%s/packed-refs", refs
->gitcommondir
);
134 refs
->packed_refs_path
= strbuf_detach(&sb
, NULL
);
140 * Die if refs is not the main ref store. caller is used in any
141 * necessary error messages.
143 static void files_assert_main_repository(struct files_ref_store
*refs
,
146 if (refs
->store_flags
& REF_STORE_MAIN
)
149 die("BUG: operation %s only allowed for main ref store", caller
);
153 * Downcast ref_store to files_ref_store. Die if ref_store is not a
154 * files_ref_store. required_flags is compared with ref_store's
155 * store_flags to ensure the ref_store has all required capabilities.
156 * "caller" is used in any necessary error messages.
158 static struct files_ref_store
*files_downcast(struct ref_store
*ref_store
,
159 unsigned int required_flags
,
162 struct files_ref_store
*refs
;
164 if (ref_store
->be
!= &refs_be_files
)
165 die("BUG: ref_store is type \"%s\" not \"files\" in %s",
166 ref_store
->be
->name
, caller
);
168 refs
= (struct files_ref_store
*)ref_store
;
170 if ((refs
->store_flags
& required_flags
) != required_flags
)
171 die("BUG: operation %s requires abilities 0x%x, but only have 0x%x",
172 caller
, required_flags
, refs
->store_flags
);
177 /* The length of a peeled reference line in packed-refs, including EOL: */
178 #define PEELED_LINE_LENGTH 42
181 * The packed-refs header line that we write out. Perhaps other
182 * traits will be added later. The trailing space is required.
184 static const char PACKED_REFS_HEADER
[] =
185 "# pack-refs with: peeled fully-peeled \n";
188 * Parse one line from a packed-refs file. Write the SHA1 to sha1.
189 * Return a pointer to the refname within the line (null-terminated),
190 * or NULL if there was a problem.
192 static const char *parse_ref_line(struct strbuf
*line
, struct object_id
*oid
)
196 if (parse_oid_hex(line
->buf
, oid
, &ref
) < 0)
198 if (!isspace(*ref
++))
204 if (line
->buf
[line
->len
- 1] != '\n')
206 line
->buf
[--line
->len
] = 0;
212 * Read from `packed_refs_file` into a newly-allocated
213 * `packed_ref_cache` and return it. The return value will already
214 * have its reference count incremented.
216 * A comment line of the form "# pack-refs with: " may contain zero or
217 * more traits. We interpret the traits as follows:
221 * Probably no references are peeled. But if the file contains a
222 * peeled value for a reference, we will use it.
226 * References under "refs/tags/", if they *can* be peeled, *are*
227 * peeled in this file. References outside of "refs/tags/" are
228 * probably not peeled even if they could have been, but if we find
229 * a peeled value for such a reference we will use it.
233 * All references in the file that can be peeled are peeled.
234 * Inversely (and this is more important), any references in the
235 * file for which no peeled value is recorded is not peelable. This
236 * trait should typically be written alongside "peeled" for
237 * compatibility with older clients, but we do not require it
238 * (i.e., "peeled" is a no-op if "fully-peeled" is set).
240 static struct packed_ref_cache
*read_packed_refs(const char *packed_refs_file
)
243 struct packed_ref_cache
*packed_refs
= xcalloc(1, sizeof(*packed_refs
));
244 struct ref_entry
*last
= NULL
;
245 struct strbuf line
= STRBUF_INIT
;
246 enum { PEELED_NONE
, PEELED_TAGS
, PEELED_FULLY
} peeled
= PEELED_NONE
;
249 acquire_packed_ref_cache(packed_refs
);
250 packed_refs
->cache
= create_ref_cache(NULL
, NULL
);
251 packed_refs
->cache
->root
->flag
&= ~REF_INCOMPLETE
;
253 f
= fopen(packed_refs_file
, "r");
255 if (errno
== ENOENT
) {
257 * This is OK; it just means that no
258 * "packed-refs" file has been written yet,
259 * which is equivalent to it being empty.
263 die_errno("couldn't read %s", packed_refs_file
);
267 stat_validity_update(&packed_refs
->validity
, fileno(f
));
269 dir
= get_ref_dir(packed_refs
->cache
->root
);
270 while (strbuf_getwholeline(&line
, f
, '\n') != EOF
) {
271 struct object_id oid
;
275 if (skip_prefix(line
.buf
, "# pack-refs with:", &traits
)) {
276 if (strstr(traits
, " fully-peeled "))
277 peeled
= PEELED_FULLY
;
278 else if (strstr(traits
, " peeled "))
279 peeled
= PEELED_TAGS
;
280 /* perhaps other traits later as well */
284 refname
= parse_ref_line(&line
, &oid
);
286 int flag
= REF_ISPACKED
;
288 if (check_refname_format(refname
, REFNAME_ALLOW_ONELEVEL
)) {
289 if (!refname_is_safe(refname
))
290 die("packed refname is dangerous: %s", refname
);
292 flag
|= REF_BAD_NAME
| REF_ISBROKEN
;
294 last
= create_ref_entry(refname
, &oid
, flag
, 0);
295 if (peeled
== PEELED_FULLY
||
296 (peeled
== PEELED_TAGS
&& starts_with(refname
, "refs/tags/")))
297 last
->flag
|= REF_KNOWS_PEELED
;
298 add_ref_entry(dir
, last
);
302 line
.buf
[0] == '^' &&
303 line
.len
== PEELED_LINE_LENGTH
&&
304 line
.buf
[PEELED_LINE_LENGTH
- 1] == '\n' &&
305 !get_oid_hex(line
.buf
+ 1, &oid
)) {
306 oidcpy(&last
->u
.value
.peeled
, &oid
);
308 * Regardless of what the file header said,
309 * we definitely know the value of *this*
312 last
->flag
|= REF_KNOWS_PEELED
;
317 strbuf_release(&line
);
322 static const char *files_packed_refs_path(struct files_ref_store
*refs
)
324 return refs
->packed_refs_path
;
327 static void files_reflog_path(struct files_ref_store
*refs
,
333 * FIXME: of course this is wrong in multi worktree
334 * setting. To be fixed real soon.
336 strbuf_addf(sb
, "%s/logs", refs
->gitcommondir
);
340 switch (ref_type(refname
)) {
341 case REF_TYPE_PER_WORKTREE
:
342 case REF_TYPE_PSEUDOREF
:
343 strbuf_addf(sb
, "%s/logs/%s", refs
->gitdir
, refname
);
345 case REF_TYPE_NORMAL
:
346 strbuf_addf(sb
, "%s/logs/%s", refs
->gitcommondir
, refname
);
349 die("BUG: unknown ref type %d of ref %s",
350 ref_type(refname
), refname
);
354 static void files_ref_path(struct files_ref_store
*refs
,
358 switch (ref_type(refname
)) {
359 case REF_TYPE_PER_WORKTREE
:
360 case REF_TYPE_PSEUDOREF
:
361 strbuf_addf(sb
, "%s/%s", refs
->gitdir
, refname
);
363 case REF_TYPE_NORMAL
:
364 strbuf_addf(sb
, "%s/%s", refs
->gitcommondir
, refname
);
367 die("BUG: unknown ref type %d of ref %s",
368 ref_type(refname
), refname
);
373 * Get the packed_ref_cache for the specified files_ref_store,
374 * creating and populating it if it hasn't been read before or if the
375 * file has been changed (according to its `validity` field) since it
376 * was last read. On the other hand, if we hold the lock, then assume
377 * that the file hasn't been changed out from under us, so skip the
378 * extra `stat()` call in `stat_validity_check()`.
380 static struct packed_ref_cache
*get_packed_ref_cache(struct files_ref_store
*refs
)
382 const char *packed_refs_file
= files_packed_refs_path(refs
);
385 !is_lock_file_locked(&refs
->packed_refs_lock
) &&
386 !stat_validity_check(&refs
->packed
->validity
, packed_refs_file
))
387 clear_packed_ref_cache(refs
);
390 refs
->packed
= read_packed_refs(packed_refs_file
);
395 static struct ref_dir
*get_packed_ref_dir(struct packed_ref_cache
*packed_ref_cache
)
397 return get_ref_dir(packed_ref_cache
->cache
->root
);
400 static struct ref_dir
*get_packed_refs(struct files_ref_store
*refs
)
402 return get_packed_ref_dir(get_packed_ref_cache(refs
));
406 * Add a reference to the in-memory packed reference cache. This may
407 * only be called while the packed-refs file is locked (see
408 * lock_packed_refs()). To actually write the packed-refs file, call
409 * commit_packed_refs().
411 static void add_packed_ref(struct files_ref_store
*refs
,
412 const char *refname
, const struct object_id
*oid
)
414 struct packed_ref_cache
*packed_ref_cache
= get_packed_ref_cache(refs
);
416 if (!is_lock_file_locked(&refs
->packed_refs_lock
))
417 die("BUG: packed refs not locked");
418 add_ref_entry(get_packed_ref_dir(packed_ref_cache
),
419 create_ref_entry(refname
, oid
, REF_ISPACKED
, 1));
423 * Read the loose references from the namespace dirname into dir
424 * (without recursing). dirname must end with '/'. dir must be the
425 * directory entry corresponding to dirname.
427 static void loose_fill_ref_dir(struct ref_store
*ref_store
,
428 struct ref_dir
*dir
, const char *dirname
)
430 struct files_ref_store
*refs
=
431 files_downcast(ref_store
, REF_STORE_READ
, "fill_ref_dir");
434 int dirnamelen
= strlen(dirname
);
435 struct strbuf refname
;
436 struct strbuf path
= STRBUF_INIT
;
439 files_ref_path(refs
, &path
, dirname
);
440 path_baselen
= path
.len
;
442 d
= opendir(path
.buf
);
444 strbuf_release(&path
);
448 strbuf_init(&refname
, dirnamelen
+ 257);
449 strbuf_add(&refname
, dirname
, dirnamelen
);
451 while ((de
= readdir(d
)) != NULL
) {
452 struct object_id oid
;
456 if (de
->d_name
[0] == '.')
458 if (ends_with(de
->d_name
, ".lock"))
460 strbuf_addstr(&refname
, de
->d_name
);
461 strbuf_addstr(&path
, de
->d_name
);
462 if (stat(path
.buf
, &st
) < 0) {
463 ; /* silently ignore */
464 } else if (S_ISDIR(st
.st_mode
)) {
465 strbuf_addch(&refname
, '/');
466 add_entry_to_dir(dir
,
467 create_dir_entry(dir
->cache
, refname
.buf
,
470 if (!refs_resolve_ref_unsafe(&refs
->base
,
475 flag
|= REF_ISBROKEN
;
476 } else if (is_null_oid(&oid
)) {
478 * It is so astronomically unlikely
479 * that NULL_SHA1 is the SHA-1 of an
480 * actual object that we consider its
481 * appearance in a loose reference
482 * file to be repo corruption
483 * (probably due to a software bug).
485 flag
|= REF_ISBROKEN
;
488 if (check_refname_format(refname
.buf
,
489 REFNAME_ALLOW_ONELEVEL
)) {
490 if (!refname_is_safe(refname
.buf
))
491 die("loose refname is dangerous: %s", refname
.buf
);
493 flag
|= REF_BAD_NAME
| REF_ISBROKEN
;
495 add_entry_to_dir(dir
,
496 create_ref_entry(refname
.buf
, &oid
, flag
, 0));
498 strbuf_setlen(&refname
, dirnamelen
);
499 strbuf_setlen(&path
, path_baselen
);
501 strbuf_release(&refname
);
502 strbuf_release(&path
);
506 * Manually add refs/bisect, which, being per-worktree, might
507 * not appear in the directory listing for refs/ in the main
510 if (!strcmp(dirname
, "refs/")) {
511 int pos
= search_ref_dir(dir
, "refs/bisect/", 12);
514 struct ref_entry
*child_entry
= create_dir_entry(
515 dir
->cache
, "refs/bisect/", 12, 1);
516 add_entry_to_dir(dir
, child_entry
);
521 static struct ref_cache
*get_loose_ref_cache(struct files_ref_store
*refs
)
525 * Mark the top-level directory complete because we
526 * are about to read the only subdirectory that can
529 refs
->loose
= create_ref_cache(&refs
->base
, loose_fill_ref_dir
);
531 /* We're going to fill the top level ourselves: */
532 refs
->loose
->root
->flag
&= ~REF_INCOMPLETE
;
535 * Add an incomplete entry for "refs/" (to be filled
538 add_entry_to_dir(get_ref_dir(refs
->loose
->root
),
539 create_dir_entry(refs
->loose
, "refs/", 5, 1));
545 * Return the ref_entry for the given refname from the packed
546 * references. If it does not exist, return NULL.
548 static struct ref_entry
*get_packed_ref(struct files_ref_store
*refs
,
551 return find_ref_entry(get_packed_refs(refs
), refname
);
555 * A loose ref file doesn't exist; check for a packed ref.
557 static int resolve_packed_ref(struct files_ref_store
*refs
,
559 unsigned char *sha1
, unsigned int *flags
)
561 struct ref_entry
*entry
;
564 * The loose reference file does not exist; check for a packed
567 entry
= get_packed_ref(refs
, refname
);
569 hashcpy(sha1
, entry
->u
.value
.oid
.hash
);
570 *flags
|= REF_ISPACKED
;
573 /* refname is not a packed reference. */
577 static int files_read_raw_ref(struct ref_store
*ref_store
,
578 const char *refname
, unsigned char *sha1
,
579 struct strbuf
*referent
, unsigned int *type
)
581 struct files_ref_store
*refs
=
582 files_downcast(ref_store
, REF_STORE_READ
, "read_raw_ref");
583 struct strbuf sb_contents
= STRBUF_INIT
;
584 struct strbuf sb_path
= STRBUF_INIT
;
591 int remaining_retries
= 3;
594 strbuf_reset(&sb_path
);
596 files_ref_path(refs
, &sb_path
, refname
);
602 * We might have to loop back here to avoid a race
603 * condition: first we lstat() the file, then we try
604 * to read it as a link or as a file. But if somebody
605 * changes the type of the file (file <-> directory
606 * <-> symlink) between the lstat() and reading, then
607 * we don't want to report that as an error but rather
608 * try again starting with the lstat().
610 * We'll keep a count of the retries, though, just to avoid
611 * any confusing situation sending us into an infinite loop.
614 if (remaining_retries
-- <= 0)
617 if (lstat(path
, &st
) < 0) {
620 if (resolve_packed_ref(refs
, refname
, sha1
, type
)) {
628 /* Follow "normalized" - ie "refs/.." symlinks by hand */
629 if (S_ISLNK(st
.st_mode
)) {
630 strbuf_reset(&sb_contents
);
631 if (strbuf_readlink(&sb_contents
, path
, 0) < 0) {
632 if (errno
== ENOENT
|| errno
== EINVAL
)
633 /* inconsistent with lstat; retry */
638 if (starts_with(sb_contents
.buf
, "refs/") &&
639 !check_refname_format(sb_contents
.buf
, 0)) {
640 strbuf_swap(&sb_contents
, referent
);
641 *type
|= REF_ISSYMREF
;
646 * It doesn't look like a refname; fall through to just
647 * treating it like a non-symlink, and reading whatever it
652 /* Is it a directory? */
653 if (S_ISDIR(st
.st_mode
)) {
655 * Even though there is a directory where the loose
656 * ref is supposed to be, there could still be a
659 if (resolve_packed_ref(refs
, refname
, sha1
, type
)) {
668 * Anything else, just open it and try to use it as
671 fd
= open(path
, O_RDONLY
);
673 if (errno
== ENOENT
&& !S_ISLNK(st
.st_mode
))
674 /* inconsistent with lstat; retry */
679 strbuf_reset(&sb_contents
);
680 if (strbuf_read(&sb_contents
, fd
, 256) < 0) {
681 int save_errno
= errno
;
687 strbuf_rtrim(&sb_contents
);
688 buf
= sb_contents
.buf
;
689 if (starts_with(buf
, "ref:")) {
691 while (isspace(*buf
))
694 strbuf_reset(referent
);
695 strbuf_addstr(referent
, buf
);
696 *type
|= REF_ISSYMREF
;
702 * Please note that FETCH_HEAD has additional
703 * data after the sha.
705 if (get_sha1_hex(buf
, sha1
) ||
706 (buf
[40] != '\0' && !isspace(buf
[40]))) {
707 *type
|= REF_ISBROKEN
;
716 strbuf_release(&sb_path
);
717 strbuf_release(&sb_contents
);
722 static void unlock_ref(struct ref_lock
*lock
)
724 /* Do not free lock->lk -- atexit() still looks at them */
726 rollback_lock_file(lock
->lk
);
727 free(lock
->ref_name
);
732 * Lock refname, without following symrefs, and set *lock_p to point
733 * at a newly-allocated lock object. Fill in lock->old_oid, referent,
734 * and type similarly to read_raw_ref().
736 * The caller must verify that refname is a "safe" reference name (in
737 * the sense of refname_is_safe()) before calling this function.
739 * If the reference doesn't already exist, verify that refname doesn't
740 * have a D/F conflict with any existing references. extras and skip
741 * are passed to refs_verify_refname_available() for this check.
743 * If mustexist is not set and the reference is not found or is
744 * broken, lock the reference anyway but clear sha1.
746 * Return 0 on success. On failure, write an error message to err and
747 * return TRANSACTION_NAME_CONFLICT or TRANSACTION_GENERIC_ERROR.
749 * Implementation note: This function is basically
754 * but it includes a lot more code to
755 * - Deal with possible races with other processes
756 * - Avoid calling refs_verify_refname_available() when it can be
757 * avoided, namely if we were successfully able to read the ref
758 * - Generate informative error messages in the case of failure
760 static int lock_raw_ref(struct files_ref_store
*refs
,
761 const char *refname
, int mustexist
,
762 const struct string_list
*extras
,
763 const struct string_list
*skip
,
764 struct ref_lock
**lock_p
,
765 struct strbuf
*referent
,
769 struct ref_lock
*lock
;
770 struct strbuf ref_file
= STRBUF_INIT
;
771 int attempts_remaining
= 3;
772 int ret
= TRANSACTION_GENERIC_ERROR
;
775 files_assert_main_repository(refs
, "lock_raw_ref");
779 /* First lock the file so it can't change out from under us. */
781 *lock_p
= lock
= xcalloc(1, sizeof(*lock
));
783 lock
->ref_name
= xstrdup(refname
);
784 files_ref_path(refs
, &ref_file
, refname
);
787 switch (safe_create_leading_directories(ref_file
.buf
)) {
792 * Suppose refname is "refs/foo/bar". We just failed
793 * to create the containing directory, "refs/foo",
794 * because there was a non-directory in the way. This
795 * indicates a D/F conflict, probably because of
796 * another reference such as "refs/foo". There is no
797 * reason to expect this error to be transitory.
799 if (refs_verify_refname_available(&refs
->base
, refname
,
800 extras
, skip
, err
)) {
803 * To the user the relevant error is
804 * that the "mustexist" reference is
808 strbuf_addf(err
, "unable to resolve reference '%s'",
812 * The error message set by
813 * refs_verify_refname_available() is
816 ret
= TRANSACTION_NAME_CONFLICT
;
820 * The file that is in the way isn't a loose
821 * reference. Report it as a low-level
824 strbuf_addf(err
, "unable to create lock file %s.lock; "
825 "non-directory in the way",
830 /* Maybe another process was tidying up. Try again. */
831 if (--attempts_remaining
> 0)
835 strbuf_addf(err
, "unable to create directory for %s",
841 lock
->lk
= xcalloc(1, sizeof(struct lock_file
));
843 if (hold_lock_file_for_update(lock
->lk
, ref_file
.buf
, LOCK_NO_DEREF
) < 0) {
844 if (errno
== ENOENT
&& --attempts_remaining
> 0) {
846 * Maybe somebody just deleted one of the
847 * directories leading to ref_file. Try
852 unable_to_lock_message(ref_file
.buf
, errno
, err
);
858 * Now we hold the lock and can read the reference without
859 * fear that its value will change.
862 if (files_read_raw_ref(&refs
->base
, refname
,
863 lock
->old_oid
.hash
, referent
, type
)) {
864 if (errno
== ENOENT
) {
866 /* Garden variety missing reference. */
867 strbuf_addf(err
, "unable to resolve reference '%s'",
872 * Reference is missing, but that's OK. We
873 * know that there is not a conflict with
874 * another loose reference because
875 * (supposing that we are trying to lock
876 * reference "refs/foo/bar"):
878 * - We were successfully able to create
879 * the lockfile refs/foo/bar.lock, so we
880 * know there cannot be a loose reference
883 * - We got ENOENT and not EISDIR, so we
884 * know that there cannot be a loose
885 * reference named "refs/foo/bar/baz".
888 } else if (errno
== EISDIR
) {
890 * There is a directory in the way. It might have
891 * contained references that have been deleted. If
892 * we don't require that the reference already
893 * exists, try to remove the directory so that it
894 * doesn't cause trouble when we want to rename the
895 * lockfile into place later.
898 /* Garden variety missing reference. */
899 strbuf_addf(err
, "unable to resolve reference '%s'",
902 } else if (remove_dir_recursively(&ref_file
,
903 REMOVE_DIR_EMPTY_ONLY
)) {
904 if (refs_verify_refname_available(
905 &refs
->base
, refname
,
906 extras
, skip
, err
)) {
908 * The error message set by
909 * verify_refname_available() is OK.
911 ret
= TRANSACTION_NAME_CONFLICT
;
915 * We can't delete the directory,
916 * but we also don't know of any
917 * references that it should
920 strbuf_addf(err
, "there is a non-empty directory '%s' "
921 "blocking reference '%s'",
922 ref_file
.buf
, refname
);
926 } else if (errno
== EINVAL
&& (*type
& REF_ISBROKEN
)) {
927 strbuf_addf(err
, "unable to resolve reference '%s': "
928 "reference broken", refname
);
931 strbuf_addf(err
, "unable to resolve reference '%s': %s",
932 refname
, strerror(errno
));
937 * If the ref did not exist and we are creating it,
938 * make sure there is no existing ref that conflicts
941 if (refs_verify_refname_available(
942 &refs
->base
, refname
,
955 strbuf_release(&ref_file
);
959 static int files_peel_ref(struct ref_store
*ref_store
,
960 const char *refname
, unsigned char *sha1
)
962 struct files_ref_store
*refs
=
963 files_downcast(ref_store
, REF_STORE_READ
| REF_STORE_ODB
,
966 unsigned char base
[20];
968 if (current_ref_iter
&& current_ref_iter
->refname
== refname
) {
969 struct object_id peeled
;
971 if (ref_iterator_peel(current_ref_iter
, &peeled
))
973 hashcpy(sha1
, peeled
.hash
);
977 if (refs_read_ref_full(ref_store
, refname
,
978 RESOLVE_REF_READING
, base
, &flag
))
982 * If the reference is packed, read its ref_entry from the
983 * cache in the hope that we already know its peeled value.
984 * We only try this optimization on packed references because
985 * (a) forcing the filling of the loose reference cache could
986 * be expensive and (b) loose references anyway usually do not
987 * have REF_KNOWS_PEELED.
989 if (flag
& REF_ISPACKED
) {
990 struct ref_entry
*r
= get_packed_ref(refs
, refname
);
992 if (peel_entry(r
, 0))
994 hashcpy(sha1
, r
->u
.value
.peeled
.hash
);
999 return peel_object(base
, sha1
);
1002 struct files_ref_iterator
{
1003 struct ref_iterator base
;
1005 struct packed_ref_cache
*packed_ref_cache
;
1006 struct ref_iterator
*iter0
;
1010 static int files_ref_iterator_advance(struct ref_iterator
*ref_iterator
)
1012 struct files_ref_iterator
*iter
=
1013 (struct files_ref_iterator
*)ref_iterator
;
1016 while ((ok
= ref_iterator_advance(iter
->iter0
)) == ITER_OK
) {
1017 if (iter
->flags
& DO_FOR_EACH_PER_WORKTREE_ONLY
&&
1018 ref_type(iter
->iter0
->refname
) != REF_TYPE_PER_WORKTREE
)
1021 if (!(iter
->flags
& DO_FOR_EACH_INCLUDE_BROKEN
) &&
1022 !ref_resolves_to_object(iter
->iter0
->refname
,
1024 iter
->iter0
->flags
))
1027 iter
->base
.refname
= iter
->iter0
->refname
;
1028 iter
->base
.oid
= iter
->iter0
->oid
;
1029 iter
->base
.flags
= iter
->iter0
->flags
;
1034 if (ref_iterator_abort(ref_iterator
) != ITER_DONE
)
1040 static int files_ref_iterator_peel(struct ref_iterator
*ref_iterator
,
1041 struct object_id
*peeled
)
1043 struct files_ref_iterator
*iter
=
1044 (struct files_ref_iterator
*)ref_iterator
;
1046 return ref_iterator_peel(iter
->iter0
, peeled
);
1049 static int files_ref_iterator_abort(struct ref_iterator
*ref_iterator
)
1051 struct files_ref_iterator
*iter
=
1052 (struct files_ref_iterator
*)ref_iterator
;
1056 ok
= ref_iterator_abort(iter
->iter0
);
1058 release_packed_ref_cache(iter
->packed_ref_cache
);
1059 base_ref_iterator_free(ref_iterator
);
1063 static struct ref_iterator_vtable files_ref_iterator_vtable
= {
1064 files_ref_iterator_advance
,
1065 files_ref_iterator_peel
,
1066 files_ref_iterator_abort
1069 static struct ref_iterator
*files_ref_iterator_begin(
1070 struct ref_store
*ref_store
,
1071 const char *prefix
, unsigned int flags
)
1073 struct files_ref_store
*refs
;
1074 struct ref_iterator
*loose_iter
, *packed_iter
;
1075 struct files_ref_iterator
*iter
;
1076 struct ref_iterator
*ref_iterator
;
1077 unsigned int required_flags
= REF_STORE_READ
;
1079 if (!(flags
& DO_FOR_EACH_INCLUDE_BROKEN
))
1080 required_flags
|= REF_STORE_ODB
;
1082 refs
= files_downcast(ref_store
, required_flags
, "ref_iterator_begin");
1084 iter
= xcalloc(1, sizeof(*iter
));
1085 ref_iterator
= &iter
->base
;
1086 base_ref_iterator_init(ref_iterator
, &files_ref_iterator_vtable
);
1089 * We must make sure that all loose refs are read before
1090 * accessing the packed-refs file; this avoids a race
1091 * condition if loose refs are migrated to the packed-refs
1092 * file by a simultaneous process, but our in-memory view is
1093 * from before the migration. We ensure this as follows:
1094 * First, we call start the loose refs iteration with its
1095 * `prime_ref` argument set to true. This causes the loose
1096 * references in the subtree to be pre-read into the cache.
1097 * (If they've already been read, that's OK; we only need to
1098 * guarantee that they're read before the packed refs, not
1099 * *how much* before.) After that, we call
1100 * get_packed_ref_cache(), which internally checks whether the
1101 * packed-ref cache is up to date with what is on disk, and
1102 * re-reads it if not.
1105 loose_iter
= cache_ref_iterator_begin(get_loose_ref_cache(refs
),
1108 iter
->packed_ref_cache
= get_packed_ref_cache(refs
);
1109 acquire_packed_ref_cache(iter
->packed_ref_cache
);
1110 packed_iter
= cache_ref_iterator_begin(iter
->packed_ref_cache
->cache
,
1113 iter
->iter0
= overlay_ref_iterator_begin(loose_iter
, packed_iter
);
1114 iter
->flags
= flags
;
1116 return ref_iterator
;
1120 * Verify that the reference locked by lock has the value old_sha1.
1121 * Fail if the reference doesn't exist and mustexist is set. Return 0
1122 * on success. On error, write an error message to err, set errno, and
1123 * return a negative value.
1125 static int verify_lock(struct ref_store
*ref_store
, struct ref_lock
*lock
,
1126 const unsigned char *old_sha1
, int mustexist
,
1131 if (refs_read_ref_full(ref_store
, lock
->ref_name
,
1132 mustexist
? RESOLVE_REF_READING
: 0,
1133 lock
->old_oid
.hash
, NULL
)) {
1135 int save_errno
= errno
;
1136 strbuf_addf(err
, "can't verify ref '%s'", lock
->ref_name
);
1140 oidclr(&lock
->old_oid
);
1144 if (old_sha1
&& hashcmp(lock
->old_oid
.hash
, old_sha1
)) {
1145 strbuf_addf(err
, "ref '%s' is at %s but expected %s",
1147 oid_to_hex(&lock
->old_oid
),
1148 sha1_to_hex(old_sha1
));
1155 static int remove_empty_directories(struct strbuf
*path
)
1158 * we want to create a file but there is a directory there;
1159 * if that is an empty directory (or a directory that contains
1160 * only empty directories), remove them.
1162 return remove_dir_recursively(path
, REMOVE_DIR_EMPTY_ONLY
);
1165 static int create_reflock(const char *path
, void *cb
)
1167 struct lock_file
*lk
= cb
;
1169 return hold_lock_file_for_update(lk
, path
, LOCK_NO_DEREF
) < 0 ? -1 : 0;
1173 * Locks a ref returning the lock on success and NULL on failure.
1174 * On failure errno is set to something meaningful.
1176 static struct ref_lock
*lock_ref_sha1_basic(struct files_ref_store
*refs
,
1177 const char *refname
,
1178 const unsigned char *old_sha1
,
1179 const struct string_list
*extras
,
1180 const struct string_list
*skip
,
1181 unsigned int flags
, int *type
,
1184 struct strbuf ref_file
= STRBUF_INIT
;
1185 struct ref_lock
*lock
;
1187 int mustexist
= (old_sha1
&& !is_null_sha1(old_sha1
));
1188 int resolve_flags
= RESOLVE_REF_NO_RECURSE
;
1191 files_assert_main_repository(refs
, "lock_ref_sha1_basic");
1194 lock
= xcalloc(1, sizeof(struct ref_lock
));
1197 resolve_flags
|= RESOLVE_REF_READING
;
1198 if (flags
& REF_DELETING
)
1199 resolve_flags
|= RESOLVE_REF_ALLOW_BAD_NAME
;
1201 files_ref_path(refs
, &ref_file
, refname
);
1202 resolved
= !!refs_resolve_ref_unsafe(&refs
->base
,
1203 refname
, resolve_flags
,
1204 lock
->old_oid
.hash
, type
);
1205 if (!resolved
&& errno
== EISDIR
) {
1207 * we are trying to lock foo but we used to
1208 * have foo/bar which now does not exist;
1209 * it is normal for the empty directory 'foo'
1212 if (remove_empty_directories(&ref_file
)) {
1214 if (!refs_verify_refname_available(
1216 refname
, extras
, skip
, err
))
1217 strbuf_addf(err
, "there are still refs under '%s'",
1221 resolved
= !!refs_resolve_ref_unsafe(&refs
->base
,
1222 refname
, resolve_flags
,
1223 lock
->old_oid
.hash
, type
);
1227 if (last_errno
!= ENOTDIR
||
1228 !refs_verify_refname_available(&refs
->base
, refname
,
1230 strbuf_addf(err
, "unable to resolve reference '%s': %s",
1231 refname
, strerror(last_errno
));
1237 * If the ref did not exist and we are creating it, make sure
1238 * there is no existing packed ref whose name begins with our
1239 * refname, nor a packed ref whose name is a proper prefix of
1242 if (is_null_oid(&lock
->old_oid
) &&
1243 refs_verify_refname_available(&refs
->base
, refname
,
1244 extras
, skip
, err
)) {
1245 last_errno
= ENOTDIR
;
1249 lock
->lk
= xcalloc(1, sizeof(struct lock_file
));
1251 lock
->ref_name
= xstrdup(refname
);
1253 if (raceproof_create_file(ref_file
.buf
, create_reflock
, lock
->lk
)) {
1255 unable_to_lock_message(ref_file
.buf
, errno
, err
);
1259 if (verify_lock(&refs
->base
, lock
, old_sha1
, mustexist
, err
)) {
1270 strbuf_release(&ref_file
);
1276 * Write an entry to the packed-refs file for the specified refname.
1277 * If peeled is non-NULL, write it as the entry's peeled value.
1279 static void write_packed_entry(FILE *fh
, const char *refname
,
1280 const unsigned char *sha1
,
1281 const unsigned char *peeled
)
1283 fprintf_or_die(fh
, "%s %s\n", sha1_to_hex(sha1
), refname
);
1285 fprintf_or_die(fh
, "^%s\n", sha1_to_hex(peeled
));
1289 * Lock the packed-refs file for writing. Flags is passed to
1290 * hold_lock_file_for_update(). Return 0 on success. On errors, set
1291 * errno appropriately and return a nonzero value.
1293 static int lock_packed_refs(struct files_ref_store
*refs
, int flags
)
1295 static int timeout_configured
= 0;
1296 static int timeout_value
= 1000;
1297 struct packed_ref_cache
*packed_ref_cache
;
1299 files_assert_main_repository(refs
, "lock_packed_refs");
1301 if (!timeout_configured
) {
1302 git_config_get_int("core.packedrefstimeout", &timeout_value
);
1303 timeout_configured
= 1;
1306 if (hold_lock_file_for_update_timeout(
1307 &refs
->packed_refs_lock
, files_packed_refs_path(refs
),
1308 flags
, timeout_value
) < 0)
1311 * Get the current packed-refs while holding the lock. It is
1312 * important that we call `get_packed_ref_cache()` before
1313 * setting `packed_ref_cache->lock`, because otherwise the
1314 * former will see that the file is locked and assume that the
1315 * cache can't be stale.
1317 packed_ref_cache
= get_packed_ref_cache(refs
);
1318 /* Increment the reference count to prevent it from being freed: */
1319 acquire_packed_ref_cache(packed_ref_cache
);
1324 * Write the current version of the packed refs cache from memory to
1325 * disk. The packed-refs file must already be locked for writing (see
1326 * lock_packed_refs()). Return zero on success. On errors, set errno
1327 * and return a nonzero value
1329 static int commit_packed_refs(struct files_ref_store
*refs
)
1331 struct packed_ref_cache
*packed_ref_cache
=
1332 get_packed_ref_cache(refs
);
1336 struct ref_iterator
*iter
;
1338 files_assert_main_repository(refs
, "commit_packed_refs");
1340 if (!is_lock_file_locked(&refs
->packed_refs_lock
))
1341 die("BUG: packed-refs not locked");
1343 out
= fdopen_lock_file(&refs
->packed_refs_lock
, "w");
1345 die_errno("unable to fdopen packed-refs descriptor");
1347 fprintf_or_die(out
, "%s", PACKED_REFS_HEADER
);
1349 iter
= cache_ref_iterator_begin(packed_ref_cache
->cache
, NULL
, 0);
1350 while ((ok
= ref_iterator_advance(iter
)) == ITER_OK
) {
1351 struct object_id peeled
;
1352 int peel_error
= ref_iterator_peel(iter
, &peeled
);
1354 write_packed_entry(out
, iter
->refname
, iter
->oid
->hash
,
1355 peel_error
? NULL
: peeled
.hash
);
1358 if (ok
!= ITER_DONE
)
1359 die("error while iterating over references");
1361 if (commit_lock_file(&refs
->packed_refs_lock
)) {
1365 release_packed_ref_cache(packed_ref_cache
);
1371 * Rollback the lockfile for the packed-refs file, and discard the
1372 * in-memory packed reference cache. (The packed-refs file will be
1373 * read anew if it is needed again after this function is called.)
1375 static void rollback_packed_refs(struct files_ref_store
*refs
)
1377 struct packed_ref_cache
*packed_ref_cache
=
1378 get_packed_ref_cache(refs
);
1380 files_assert_main_repository(refs
, "rollback_packed_refs");
1382 if (!is_lock_file_locked(&refs
->packed_refs_lock
))
1383 die("BUG: packed-refs not locked");
1384 rollback_lock_file(&refs
->packed_refs_lock
);
1385 release_packed_ref_cache(packed_ref_cache
);
1386 clear_packed_ref_cache(refs
);
1389 struct ref_to_prune
{
1390 struct ref_to_prune
*next
;
1391 unsigned char sha1
[20];
1392 char name
[FLEX_ARRAY
];
1396 REMOVE_EMPTY_PARENTS_REF
= 0x01,
1397 REMOVE_EMPTY_PARENTS_REFLOG
= 0x02
1401 * Remove empty parent directories associated with the specified
1402 * reference and/or its reflog, but spare [logs/]refs/ and immediate
1403 * subdirs. flags is a combination of REMOVE_EMPTY_PARENTS_REF and/or
1404 * REMOVE_EMPTY_PARENTS_REFLOG.
1406 static void try_remove_empty_parents(struct files_ref_store
*refs
,
1407 const char *refname
,
1410 struct strbuf buf
= STRBUF_INIT
;
1411 struct strbuf sb
= STRBUF_INIT
;
1415 strbuf_addstr(&buf
, refname
);
1417 for (i
= 0; i
< 2; i
++) { /* refs/{heads,tags,...}/ */
1418 while (*p
&& *p
!= '/')
1420 /* tolerate duplicate slashes; see check_refname_format() */
1424 q
= buf
.buf
+ buf
.len
;
1425 while (flags
& (REMOVE_EMPTY_PARENTS_REF
| REMOVE_EMPTY_PARENTS_REFLOG
)) {
1426 while (q
> p
&& *q
!= '/')
1428 while (q
> p
&& *(q
-1) == '/')
1432 strbuf_setlen(&buf
, q
- buf
.buf
);
1435 files_ref_path(refs
, &sb
, buf
.buf
);
1436 if ((flags
& REMOVE_EMPTY_PARENTS_REF
) && rmdir(sb
.buf
))
1437 flags
&= ~REMOVE_EMPTY_PARENTS_REF
;
1440 files_reflog_path(refs
, &sb
, buf
.buf
);
1441 if ((flags
& REMOVE_EMPTY_PARENTS_REFLOG
) && rmdir(sb
.buf
))
1442 flags
&= ~REMOVE_EMPTY_PARENTS_REFLOG
;
1444 strbuf_release(&buf
);
1445 strbuf_release(&sb
);
1448 /* make sure nobody touched the ref, and unlink */
1449 static void prune_ref(struct files_ref_store
*refs
, struct ref_to_prune
*r
)
1451 struct ref_transaction
*transaction
;
1452 struct strbuf err
= STRBUF_INIT
;
1454 if (check_refname_format(r
->name
, 0))
1457 transaction
= ref_store_transaction_begin(&refs
->base
, &err
);
1459 ref_transaction_delete(transaction
, r
->name
, r
->sha1
,
1460 REF_ISPRUNING
| REF_NODEREF
, NULL
, &err
) ||
1461 ref_transaction_commit(transaction
, &err
)) {
1462 ref_transaction_free(transaction
);
1463 error("%s", err
.buf
);
1464 strbuf_release(&err
);
1467 ref_transaction_free(transaction
);
1468 strbuf_release(&err
);
1471 static void prune_refs(struct files_ref_store
*refs
, struct ref_to_prune
*r
)
1480 * Return true if the specified reference should be packed.
1482 static int should_pack_ref(const char *refname
,
1483 const struct object_id
*oid
, unsigned int ref_flags
,
1484 unsigned int pack_flags
)
1486 /* Do not pack per-worktree refs: */
1487 if (ref_type(refname
) != REF_TYPE_NORMAL
)
1490 /* Do not pack non-tags unless PACK_REFS_ALL is set: */
1491 if (!(pack_flags
& PACK_REFS_ALL
) && !starts_with(refname
, "refs/tags/"))
1494 /* Do not pack symbolic refs: */
1495 if (ref_flags
& REF_ISSYMREF
)
1498 /* Do not pack broken refs: */
1499 if (!ref_resolves_to_object(refname
, oid
, ref_flags
))
1505 static int files_pack_refs(struct ref_store
*ref_store
, unsigned int flags
)
1507 struct files_ref_store
*refs
=
1508 files_downcast(ref_store
, REF_STORE_WRITE
| REF_STORE_ODB
,
1510 struct ref_iterator
*iter
;
1511 struct ref_dir
*packed_refs
;
1513 struct ref_to_prune
*refs_to_prune
= NULL
;
1515 lock_packed_refs(refs
, LOCK_DIE_ON_ERROR
);
1516 packed_refs
= get_packed_refs(refs
);
1518 iter
= cache_ref_iterator_begin(get_loose_ref_cache(refs
), NULL
, 0);
1519 while ((ok
= ref_iterator_advance(iter
)) == ITER_OK
) {
1521 * If the loose reference can be packed, add an entry
1522 * in the packed ref cache. If the reference should be
1523 * pruned, also add it to refs_to_prune.
1525 struct ref_entry
*packed_entry
;
1527 if (!should_pack_ref(iter
->refname
, iter
->oid
, iter
->flags
,
1532 * Create an entry in the packed-refs cache equivalent
1533 * to the one from the loose ref cache, except that
1534 * we don't copy the peeled status, because we want it
1537 packed_entry
= find_ref_entry(packed_refs
, iter
->refname
);
1539 /* Overwrite existing packed entry with info from loose entry */
1540 packed_entry
->flag
= REF_ISPACKED
;
1541 oidcpy(&packed_entry
->u
.value
.oid
, iter
->oid
);
1543 packed_entry
= create_ref_entry(iter
->refname
, iter
->oid
,
1545 add_ref_entry(packed_refs
, packed_entry
);
1547 oidclr(&packed_entry
->u
.value
.peeled
);
1549 /* Schedule the loose reference for pruning if requested. */
1550 if ((flags
& PACK_REFS_PRUNE
)) {
1551 struct ref_to_prune
*n
;
1552 FLEX_ALLOC_STR(n
, name
, iter
->refname
);
1553 hashcpy(n
->sha1
, iter
->oid
->hash
);
1554 n
->next
= refs_to_prune
;
1558 if (ok
!= ITER_DONE
)
1559 die("error while iterating over references");
1561 if (commit_packed_refs(refs
))
1562 die_errno("unable to overwrite old ref-pack file");
1564 prune_refs(refs
, refs_to_prune
);
1569 * Rewrite the packed-refs file, omitting any refs listed in
1570 * 'refnames'. On error, leave packed-refs unchanged, write an error
1571 * message to 'err', and return a nonzero value.
1573 * The refs in 'refnames' needn't be sorted. `err` must not be NULL.
1575 static int repack_without_refs(struct files_ref_store
*refs
,
1576 struct string_list
*refnames
, struct strbuf
*err
)
1578 struct ref_dir
*packed
;
1579 struct string_list_item
*refname
;
1580 int ret
, needs_repacking
= 0, removed
= 0;
1582 files_assert_main_repository(refs
, "repack_without_refs");
1585 /* Look for a packed ref */
1586 for_each_string_list_item(refname
, refnames
) {
1587 if (get_packed_ref(refs
, refname
->string
)) {
1588 needs_repacking
= 1;
1593 /* Avoid locking if we have nothing to do */
1594 if (!needs_repacking
)
1595 return 0; /* no refname exists in packed refs */
1597 if (lock_packed_refs(refs
, 0)) {
1598 unable_to_lock_message(files_packed_refs_path(refs
), errno
, err
);
1601 packed
= get_packed_refs(refs
);
1603 /* Remove refnames from the cache */
1604 for_each_string_list_item(refname
, refnames
)
1605 if (remove_entry_from_dir(packed
, refname
->string
) != -1)
1609 * All packed entries disappeared while we were
1610 * acquiring the lock.
1612 rollback_packed_refs(refs
);
1616 /* Write what remains */
1617 ret
= commit_packed_refs(refs
);
1619 strbuf_addf(err
, "unable to overwrite old ref-pack file: %s",
1624 static int files_delete_refs(struct ref_store
*ref_store
, const char *msg
,
1625 struct string_list
*refnames
, unsigned int flags
)
1627 struct files_ref_store
*refs
=
1628 files_downcast(ref_store
, REF_STORE_WRITE
, "delete_refs");
1629 struct strbuf err
= STRBUF_INIT
;
1635 result
= repack_without_refs(refs
, refnames
, &err
);
1638 * If we failed to rewrite the packed-refs file, then
1639 * it is unsafe to try to remove loose refs, because
1640 * doing so might expose an obsolete packed value for
1641 * a reference that might even point at an object that
1642 * has been garbage collected.
1644 if (refnames
->nr
== 1)
1645 error(_("could not delete reference %s: %s"),
1646 refnames
->items
[0].string
, err
.buf
);
1648 error(_("could not delete references: %s"), err
.buf
);
1653 for (i
= 0; i
< refnames
->nr
; i
++) {
1654 const char *refname
= refnames
->items
[i
].string
;
1656 if (refs_delete_ref(&refs
->base
, msg
, refname
, NULL
, flags
))
1657 result
|= error(_("could not remove reference %s"), refname
);
1661 strbuf_release(&err
);
1666 * People using contrib's git-new-workdir have .git/logs/refs ->
1667 * /some/other/path/.git/logs/refs, and that may live on another device.
1669 * IOW, to avoid cross device rename errors, the temporary renamed log must
1670 * live into logs/refs.
1672 #define TMP_RENAMED_LOG "refs/.tmp-renamed-log"
1675 const char *tmp_renamed_log
;
1679 static int rename_tmp_log_callback(const char *path
, void *cb_data
)
1681 struct rename_cb
*cb
= cb_data
;
1683 if (rename(cb
->tmp_renamed_log
, path
)) {
1685 * rename(a, b) when b is an existing directory ought
1686 * to result in ISDIR, but Solaris 5.8 gives ENOTDIR.
1687 * Sheesh. Record the true errno for error reporting,
1688 * but report EISDIR to raceproof_create_file() so
1689 * that it knows to retry.
1691 cb
->true_errno
= errno
;
1692 if (errno
== ENOTDIR
)
1700 static int rename_tmp_log(struct files_ref_store
*refs
, const char *newrefname
)
1702 struct strbuf path
= STRBUF_INIT
;
1703 struct strbuf tmp
= STRBUF_INIT
;
1704 struct rename_cb cb
;
1707 files_reflog_path(refs
, &path
, newrefname
);
1708 files_reflog_path(refs
, &tmp
, TMP_RENAMED_LOG
);
1709 cb
.tmp_renamed_log
= tmp
.buf
;
1710 ret
= raceproof_create_file(path
.buf
, rename_tmp_log_callback
, &cb
);
1712 if (errno
== EISDIR
)
1713 error("directory not empty: %s", path
.buf
);
1715 error("unable to move logfile %s to %s: %s",
1717 strerror(cb
.true_errno
));
1720 strbuf_release(&path
);
1721 strbuf_release(&tmp
);
1725 static int write_ref_to_lockfile(struct ref_lock
*lock
,
1726 const struct object_id
*oid
, struct strbuf
*err
);
1727 static int commit_ref_update(struct files_ref_store
*refs
,
1728 struct ref_lock
*lock
,
1729 const struct object_id
*oid
, const char *logmsg
,
1730 struct strbuf
*err
);
1732 static int files_rename_ref(struct ref_store
*ref_store
,
1733 const char *oldrefname
, const char *newrefname
,
1736 struct files_ref_store
*refs
=
1737 files_downcast(ref_store
, REF_STORE_WRITE
, "rename_ref");
1738 struct object_id oid
, orig_oid
;
1739 int flag
= 0, logmoved
= 0;
1740 struct ref_lock
*lock
;
1741 struct stat loginfo
;
1742 struct strbuf sb_oldref
= STRBUF_INIT
;
1743 struct strbuf sb_newref
= STRBUF_INIT
;
1744 struct strbuf tmp_renamed_log
= STRBUF_INIT
;
1746 struct strbuf err
= STRBUF_INIT
;
1748 files_reflog_path(refs
, &sb_oldref
, oldrefname
);
1749 files_reflog_path(refs
, &sb_newref
, newrefname
);
1750 files_reflog_path(refs
, &tmp_renamed_log
, TMP_RENAMED_LOG
);
1752 log
= !lstat(sb_oldref
.buf
, &loginfo
);
1753 if (log
&& S_ISLNK(loginfo
.st_mode
)) {
1754 ret
= error("reflog for %s is a symlink", oldrefname
);
1758 if (!refs_resolve_ref_unsafe(&refs
->base
, oldrefname
,
1759 RESOLVE_REF_READING
| RESOLVE_REF_NO_RECURSE
,
1760 orig_oid
.hash
, &flag
)) {
1761 ret
= error("refname %s not found", oldrefname
);
1765 if (flag
& REF_ISSYMREF
) {
1766 ret
= error("refname %s is a symbolic ref, renaming it is not supported",
1770 if (!refs_rename_ref_available(&refs
->base
, oldrefname
, newrefname
)) {
1775 if (log
&& rename(sb_oldref
.buf
, tmp_renamed_log
.buf
)) {
1776 ret
= error("unable to move logfile logs/%s to logs/"TMP_RENAMED_LOG
": %s",
1777 oldrefname
, strerror(errno
));
1781 if (refs_delete_ref(&refs
->base
, logmsg
, oldrefname
,
1782 orig_oid
.hash
, REF_NODEREF
)) {
1783 error("unable to delete old %s", oldrefname
);
1788 * Since we are doing a shallow lookup, oid is not the
1789 * correct value to pass to delete_ref as old_oid. But that
1790 * doesn't matter, because an old_oid check wouldn't add to
1791 * the safety anyway; we want to delete the reference whatever
1792 * its current value.
1794 if (!refs_read_ref_full(&refs
->base
, newrefname
,
1795 RESOLVE_REF_READING
| RESOLVE_REF_NO_RECURSE
,
1797 refs_delete_ref(&refs
->base
, NULL
, newrefname
,
1798 NULL
, REF_NODEREF
)) {
1799 if (errno
== EISDIR
) {
1800 struct strbuf path
= STRBUF_INIT
;
1803 files_ref_path(refs
, &path
, newrefname
);
1804 result
= remove_empty_directories(&path
);
1805 strbuf_release(&path
);
1808 error("Directory not empty: %s", newrefname
);
1812 error("unable to delete existing %s", newrefname
);
1817 if (log
&& rename_tmp_log(refs
, newrefname
))
1822 lock
= lock_ref_sha1_basic(refs
, newrefname
, NULL
, NULL
, NULL
,
1823 REF_NODEREF
, NULL
, &err
);
1825 error("unable to rename '%s' to '%s': %s", oldrefname
, newrefname
, err
.buf
);
1826 strbuf_release(&err
);
1829 oidcpy(&lock
->old_oid
, &orig_oid
);
1831 if (write_ref_to_lockfile(lock
, &orig_oid
, &err
) ||
1832 commit_ref_update(refs
, lock
, &orig_oid
, logmsg
, &err
)) {
1833 error("unable to write current sha1 into %s: %s", newrefname
, err
.buf
);
1834 strbuf_release(&err
);
1842 lock
= lock_ref_sha1_basic(refs
, oldrefname
, NULL
, NULL
, NULL
,
1843 REF_NODEREF
, NULL
, &err
);
1845 error("unable to lock %s for rollback: %s", oldrefname
, err
.buf
);
1846 strbuf_release(&err
);
1850 flag
= log_all_ref_updates
;
1851 log_all_ref_updates
= LOG_REFS_NONE
;
1852 if (write_ref_to_lockfile(lock
, &orig_oid
, &err
) ||
1853 commit_ref_update(refs
, lock
, &orig_oid
, NULL
, &err
)) {
1854 error("unable to write current sha1 into %s: %s", oldrefname
, err
.buf
);
1855 strbuf_release(&err
);
1857 log_all_ref_updates
= flag
;
1860 if (logmoved
&& rename(sb_newref
.buf
, sb_oldref
.buf
))
1861 error("unable to restore logfile %s from %s: %s",
1862 oldrefname
, newrefname
, strerror(errno
));
1863 if (!logmoved
&& log
&&
1864 rename(tmp_renamed_log
.buf
, sb_oldref
.buf
))
1865 error("unable to restore logfile %s from logs/"TMP_RENAMED_LOG
": %s",
1866 oldrefname
, strerror(errno
));
1869 strbuf_release(&sb_newref
);
1870 strbuf_release(&sb_oldref
);
1871 strbuf_release(&tmp_renamed_log
);
1876 static int close_ref(struct ref_lock
*lock
)
1878 if (close_lock_file(lock
->lk
))
1883 static int commit_ref(struct ref_lock
*lock
)
1885 char *path
= get_locked_file_path(lock
->lk
);
1888 if (!lstat(path
, &st
) && S_ISDIR(st
.st_mode
)) {
1890 * There is a directory at the path we want to rename
1891 * the lockfile to. Hopefully it is empty; try to
1894 size_t len
= strlen(path
);
1895 struct strbuf sb_path
= STRBUF_INIT
;
1897 strbuf_attach(&sb_path
, path
, len
, len
);
1900 * If this fails, commit_lock_file() will also fail
1901 * and will report the problem.
1903 remove_empty_directories(&sb_path
);
1904 strbuf_release(&sb_path
);
1909 if (commit_lock_file(lock
->lk
))
1914 static int open_or_create_logfile(const char *path
, void *cb
)
1918 *fd
= open(path
, O_APPEND
| O_WRONLY
| O_CREAT
, 0666);
1919 return (*fd
< 0) ? -1 : 0;
1923 * Create a reflog for a ref. If force_create = 0, only create the
1924 * reflog for certain refs (those for which should_autocreate_reflog
1925 * returns non-zero). Otherwise, create it regardless of the reference
1926 * name. If the logfile already existed or was created, return 0 and
1927 * set *logfd to the file descriptor opened for appending to the file.
1928 * If no logfile exists and we decided not to create one, return 0 and
1929 * set *logfd to -1. On failure, fill in *err, set *logfd to -1, and
1932 static int log_ref_setup(struct files_ref_store
*refs
,
1933 const char *refname
, int force_create
,
1934 int *logfd
, struct strbuf
*err
)
1936 struct strbuf logfile_sb
= STRBUF_INIT
;
1939 files_reflog_path(refs
, &logfile_sb
, refname
);
1940 logfile
= strbuf_detach(&logfile_sb
, NULL
);
1942 if (force_create
|| should_autocreate_reflog(refname
)) {
1943 if (raceproof_create_file(logfile
, open_or_create_logfile
, logfd
)) {
1944 if (errno
== ENOENT
)
1945 strbuf_addf(err
, "unable to create directory for '%s': "
1946 "%s", logfile
, strerror(errno
));
1947 else if (errno
== EISDIR
)
1948 strbuf_addf(err
, "there are still logs under '%s'",
1951 strbuf_addf(err
, "unable to append to '%s': %s",
1952 logfile
, strerror(errno
));
1957 *logfd
= open(logfile
, O_APPEND
| O_WRONLY
, 0666);
1959 if (errno
== ENOENT
|| errno
== EISDIR
) {
1961 * The logfile doesn't already exist,
1962 * but that is not an error; it only
1963 * means that we won't write log
1968 strbuf_addf(err
, "unable to append to '%s': %s",
1969 logfile
, strerror(errno
));
1976 adjust_shared_perm(logfile
);
1986 static int files_create_reflog(struct ref_store
*ref_store
,
1987 const char *refname
, int force_create
,
1990 struct files_ref_store
*refs
=
1991 files_downcast(ref_store
, REF_STORE_WRITE
, "create_reflog");
1994 if (log_ref_setup(refs
, refname
, force_create
, &fd
, err
))
2003 static int log_ref_write_fd(int fd
, const struct object_id
*old_oid
,
2004 const struct object_id
*new_oid
,
2005 const char *committer
, const char *msg
)
2007 int msglen
, written
;
2008 unsigned maxlen
, len
;
2011 msglen
= msg
? strlen(msg
) : 0;
2012 maxlen
= strlen(committer
) + msglen
+ 100;
2013 logrec
= xmalloc(maxlen
);
2014 len
= xsnprintf(logrec
, maxlen
, "%s %s %s\n",
2015 oid_to_hex(old_oid
),
2016 oid_to_hex(new_oid
),
2019 len
+= copy_reflog_msg(logrec
+ len
- 1, msg
) - 1;
2021 written
= len
<= maxlen
? write_in_full(fd
, logrec
, len
) : -1;
2029 static int files_log_ref_write(struct files_ref_store
*refs
,
2030 const char *refname
, const struct object_id
*old_oid
,
2031 const struct object_id
*new_oid
, const char *msg
,
2032 int flags
, struct strbuf
*err
)
2036 if (log_all_ref_updates
== LOG_REFS_UNSET
)
2037 log_all_ref_updates
= is_bare_repository() ? LOG_REFS_NONE
: LOG_REFS_NORMAL
;
2039 result
= log_ref_setup(refs
, refname
,
2040 flags
& REF_FORCE_CREATE_REFLOG
,
2048 result
= log_ref_write_fd(logfd
, old_oid
, new_oid
,
2049 git_committer_info(0), msg
);
2051 struct strbuf sb
= STRBUF_INIT
;
2052 int save_errno
= errno
;
2054 files_reflog_path(refs
, &sb
, refname
);
2055 strbuf_addf(err
, "unable to append to '%s': %s",
2056 sb
.buf
, strerror(save_errno
));
2057 strbuf_release(&sb
);
2062 struct strbuf sb
= STRBUF_INIT
;
2063 int save_errno
= errno
;
2065 files_reflog_path(refs
, &sb
, refname
);
2066 strbuf_addf(err
, "unable to append to '%s': %s",
2067 sb
.buf
, strerror(save_errno
));
2068 strbuf_release(&sb
);
2075 * Write sha1 into the open lockfile, then close the lockfile. On
2076 * errors, rollback the lockfile, fill in *err and
2079 static int write_ref_to_lockfile(struct ref_lock
*lock
,
2080 const struct object_id
*oid
, struct strbuf
*err
)
2082 static char term
= '\n';
2086 o
= parse_object(oid
);
2089 "trying to write ref '%s' with nonexistent object %s",
2090 lock
->ref_name
, oid_to_hex(oid
));
2094 if (o
->type
!= OBJ_COMMIT
&& is_branch(lock
->ref_name
)) {
2096 "trying to write non-commit object %s to branch '%s'",
2097 oid_to_hex(oid
), lock
->ref_name
);
2101 fd
= get_lock_file_fd(lock
->lk
);
2102 if (write_in_full(fd
, oid_to_hex(oid
), GIT_SHA1_HEXSZ
) != GIT_SHA1_HEXSZ
||
2103 write_in_full(fd
, &term
, 1) != 1 ||
2104 close_ref(lock
) < 0) {
2106 "couldn't write '%s'", get_lock_file_path(lock
->lk
));
2114 * Commit a change to a loose reference that has already been written
2115 * to the loose reference lockfile. Also update the reflogs if
2116 * necessary, using the specified lockmsg (which can be NULL).
2118 static int commit_ref_update(struct files_ref_store
*refs
,
2119 struct ref_lock
*lock
,
2120 const struct object_id
*oid
, const char *logmsg
,
2123 files_assert_main_repository(refs
, "commit_ref_update");
2125 clear_loose_ref_cache(refs
);
2126 if (files_log_ref_write(refs
, lock
->ref_name
,
2127 &lock
->old_oid
, oid
,
2129 char *old_msg
= strbuf_detach(err
, NULL
);
2130 strbuf_addf(err
, "cannot update the ref '%s': %s",
2131 lock
->ref_name
, old_msg
);
2137 if (strcmp(lock
->ref_name
, "HEAD") != 0) {
2139 * Special hack: If a branch is updated directly and HEAD
2140 * points to it (may happen on the remote side of a push
2141 * for example) then logically the HEAD reflog should be
2143 * A generic solution implies reverse symref information,
2144 * but finding all symrefs pointing to the given branch
2145 * would be rather costly for this rare event (the direct
2146 * update of a branch) to be worth it. So let's cheat and
2147 * check with HEAD only which should cover 99% of all usage
2148 * scenarios (even 100% of the default ones).
2150 struct object_id head_oid
;
2152 const char *head_ref
;
2154 head_ref
= refs_resolve_ref_unsafe(&refs
->base
, "HEAD",
2155 RESOLVE_REF_READING
,
2156 head_oid
.hash
, &head_flag
);
2157 if (head_ref
&& (head_flag
& REF_ISSYMREF
) &&
2158 !strcmp(head_ref
, lock
->ref_name
)) {
2159 struct strbuf log_err
= STRBUF_INIT
;
2160 if (files_log_ref_write(refs
, "HEAD",
2161 &lock
->old_oid
, oid
,
2162 logmsg
, 0, &log_err
)) {
2163 error("%s", log_err
.buf
);
2164 strbuf_release(&log_err
);
2169 if (commit_ref(lock
)) {
2170 strbuf_addf(err
, "couldn't set '%s'", lock
->ref_name
);
2179 static int create_ref_symlink(struct ref_lock
*lock
, const char *target
)
2182 #ifndef NO_SYMLINK_HEAD
2183 char *ref_path
= get_locked_file_path(lock
->lk
);
2185 ret
= symlink(target
, ref_path
);
2189 fprintf(stderr
, "no symlink - falling back to symbolic ref\n");
2194 static void update_symref_reflog(struct files_ref_store
*refs
,
2195 struct ref_lock
*lock
, const char *refname
,
2196 const char *target
, const char *logmsg
)
2198 struct strbuf err
= STRBUF_INIT
;
2199 struct object_id new_oid
;
2201 !refs_read_ref_full(&refs
->base
, target
,
2202 RESOLVE_REF_READING
, new_oid
.hash
, NULL
) &&
2203 files_log_ref_write(refs
, refname
, &lock
->old_oid
,
2204 &new_oid
, logmsg
, 0, &err
)) {
2205 error("%s", err
.buf
);
2206 strbuf_release(&err
);
2210 static int create_symref_locked(struct files_ref_store
*refs
,
2211 struct ref_lock
*lock
, const char *refname
,
2212 const char *target
, const char *logmsg
)
2214 if (prefer_symlink_refs
&& !create_ref_symlink(lock
, target
)) {
2215 update_symref_reflog(refs
, lock
, refname
, target
, logmsg
);
2219 if (!fdopen_lock_file(lock
->lk
, "w"))
2220 return error("unable to fdopen %s: %s",
2221 lock
->lk
->tempfile
.filename
.buf
, strerror(errno
));
2223 update_symref_reflog(refs
, lock
, refname
, target
, logmsg
);
2225 /* no error check; commit_ref will check ferror */
2226 fprintf(lock
->lk
->tempfile
.fp
, "ref: %s\n", target
);
2227 if (commit_ref(lock
) < 0)
2228 return error("unable to write symref for %s: %s", refname
,
2233 static int files_create_symref(struct ref_store
*ref_store
,
2234 const char *refname
, const char *target
,
2237 struct files_ref_store
*refs
=
2238 files_downcast(ref_store
, REF_STORE_WRITE
, "create_symref");
2239 struct strbuf err
= STRBUF_INIT
;
2240 struct ref_lock
*lock
;
2243 lock
= lock_ref_sha1_basic(refs
, refname
, NULL
,
2244 NULL
, NULL
, REF_NODEREF
, NULL
,
2247 error("%s", err
.buf
);
2248 strbuf_release(&err
);
2252 ret
= create_symref_locked(refs
, lock
, refname
, target
, logmsg
);
2257 static int files_reflog_exists(struct ref_store
*ref_store
,
2258 const char *refname
)
2260 struct files_ref_store
*refs
=
2261 files_downcast(ref_store
, REF_STORE_READ
, "reflog_exists");
2262 struct strbuf sb
= STRBUF_INIT
;
2266 files_reflog_path(refs
, &sb
, refname
);
2267 ret
= !lstat(sb
.buf
, &st
) && S_ISREG(st
.st_mode
);
2268 strbuf_release(&sb
);
2272 static int files_delete_reflog(struct ref_store
*ref_store
,
2273 const char *refname
)
2275 struct files_ref_store
*refs
=
2276 files_downcast(ref_store
, REF_STORE_WRITE
, "delete_reflog");
2277 struct strbuf sb
= STRBUF_INIT
;
2280 files_reflog_path(refs
, &sb
, refname
);
2281 ret
= remove_path(sb
.buf
);
2282 strbuf_release(&sb
);
2286 static int show_one_reflog_ent(struct strbuf
*sb
, each_reflog_ent_fn fn
, void *cb_data
)
2288 struct object_id ooid
, noid
;
2289 char *email_end
, *message
;
2290 timestamp_t timestamp
;
2292 const char *p
= sb
->buf
;
2294 /* old SP new SP name <email> SP time TAB msg LF */
2295 if (!sb
->len
|| sb
->buf
[sb
->len
- 1] != '\n' ||
2296 parse_oid_hex(p
, &ooid
, &p
) || *p
++ != ' ' ||
2297 parse_oid_hex(p
, &noid
, &p
) || *p
++ != ' ' ||
2298 !(email_end
= strchr(p
, '>')) ||
2299 email_end
[1] != ' ' ||
2300 !(timestamp
= parse_timestamp(email_end
+ 2, &message
, 10)) ||
2301 !message
|| message
[0] != ' ' ||
2302 (message
[1] != '+' && message
[1] != '-') ||
2303 !isdigit(message
[2]) || !isdigit(message
[3]) ||
2304 !isdigit(message
[4]) || !isdigit(message
[5]))
2305 return 0; /* corrupt? */
2306 email_end
[1] = '\0';
2307 tz
= strtol(message
+ 1, NULL
, 10);
2308 if (message
[6] != '\t')
2312 return fn(&ooid
, &noid
, p
, timestamp
, tz
, message
, cb_data
);
2315 static char *find_beginning_of_line(char *bob
, char *scan
)
2317 while (bob
< scan
&& *(--scan
) != '\n')
2318 ; /* keep scanning backwards */
2320 * Return either beginning of the buffer, or LF at the end of
2321 * the previous line.
2326 static int files_for_each_reflog_ent_reverse(struct ref_store
*ref_store
,
2327 const char *refname
,
2328 each_reflog_ent_fn fn
,
2331 struct files_ref_store
*refs
=
2332 files_downcast(ref_store
, REF_STORE_READ
,
2333 "for_each_reflog_ent_reverse");
2334 struct strbuf sb
= STRBUF_INIT
;
2337 int ret
= 0, at_tail
= 1;
2339 files_reflog_path(refs
, &sb
, refname
);
2340 logfp
= fopen(sb
.buf
, "r");
2341 strbuf_release(&sb
);
2345 /* Jump to the end */
2346 if (fseek(logfp
, 0, SEEK_END
) < 0)
2347 ret
= error("cannot seek back reflog for %s: %s",
2348 refname
, strerror(errno
));
2350 while (!ret
&& 0 < pos
) {
2356 /* Fill next block from the end */
2357 cnt
= (sizeof(buf
) < pos
) ? sizeof(buf
) : pos
;
2358 if (fseek(logfp
, pos
- cnt
, SEEK_SET
)) {
2359 ret
= error("cannot seek back reflog for %s: %s",
2360 refname
, strerror(errno
));
2363 nread
= fread(buf
, cnt
, 1, logfp
);
2365 ret
= error("cannot read %d bytes from reflog for %s: %s",
2366 cnt
, refname
, strerror(errno
));
2371 scanp
= endp
= buf
+ cnt
;
2372 if (at_tail
&& scanp
[-1] == '\n')
2373 /* Looking at the final LF at the end of the file */
2377 while (buf
< scanp
) {
2379 * terminating LF of the previous line, or the beginning
2384 bp
= find_beginning_of_line(buf
, scanp
);
2388 * The newline is the end of the previous line,
2389 * so we know we have complete line starting
2390 * at (bp + 1). Prefix it onto any prior data
2391 * we collected for the line and process it.
2393 strbuf_splice(&sb
, 0, 0, bp
+ 1, endp
- (bp
+ 1));
2396 ret
= show_one_reflog_ent(&sb
, fn
, cb_data
);
2402 * We are at the start of the buffer, and the
2403 * start of the file; there is no previous
2404 * line, and we have everything for this one.
2405 * Process it, and we can end the loop.
2407 strbuf_splice(&sb
, 0, 0, buf
, endp
- buf
);
2408 ret
= show_one_reflog_ent(&sb
, fn
, cb_data
);
2415 * We are at the start of the buffer, and there
2416 * is more file to read backwards. Which means
2417 * we are in the middle of a line. Note that we
2418 * may get here even if *bp was a newline; that
2419 * just means we are at the exact end of the
2420 * previous line, rather than some spot in the
2423 * Save away what we have to be combined with
2424 * the data from the next read.
2426 strbuf_splice(&sb
, 0, 0, buf
, endp
- buf
);
2433 die("BUG: reverse reflog parser had leftover data");
2436 strbuf_release(&sb
);
2440 static int files_for_each_reflog_ent(struct ref_store
*ref_store
,
2441 const char *refname
,
2442 each_reflog_ent_fn fn
, void *cb_data
)
2444 struct files_ref_store
*refs
=
2445 files_downcast(ref_store
, REF_STORE_READ
,
2446 "for_each_reflog_ent");
2448 struct strbuf sb
= STRBUF_INIT
;
2451 files_reflog_path(refs
, &sb
, refname
);
2452 logfp
= fopen(sb
.buf
, "r");
2453 strbuf_release(&sb
);
2457 while (!ret
&& !strbuf_getwholeline(&sb
, logfp
, '\n'))
2458 ret
= show_one_reflog_ent(&sb
, fn
, cb_data
);
2460 strbuf_release(&sb
);
2464 struct files_reflog_iterator
{
2465 struct ref_iterator base
;
2467 struct ref_store
*ref_store
;
2468 struct dir_iterator
*dir_iterator
;
2469 struct object_id oid
;
2472 static int files_reflog_iterator_advance(struct ref_iterator
*ref_iterator
)
2474 struct files_reflog_iterator
*iter
=
2475 (struct files_reflog_iterator
*)ref_iterator
;
2476 struct dir_iterator
*diter
= iter
->dir_iterator
;
2479 while ((ok
= dir_iterator_advance(diter
)) == ITER_OK
) {
2482 if (!S_ISREG(diter
->st
.st_mode
))
2484 if (diter
->basename
[0] == '.')
2486 if (ends_with(diter
->basename
, ".lock"))
2489 if (refs_read_ref_full(iter
->ref_store
,
2490 diter
->relative_path
, 0,
2491 iter
->oid
.hash
, &flags
)) {
2492 error("bad ref for %s", diter
->path
.buf
);
2496 iter
->base
.refname
= diter
->relative_path
;
2497 iter
->base
.oid
= &iter
->oid
;
2498 iter
->base
.flags
= flags
;
2502 iter
->dir_iterator
= NULL
;
2503 if (ref_iterator_abort(ref_iterator
) == ITER_ERROR
)
2508 static int files_reflog_iterator_peel(struct ref_iterator
*ref_iterator
,
2509 struct object_id
*peeled
)
2511 die("BUG: ref_iterator_peel() called for reflog_iterator");
2514 static int files_reflog_iterator_abort(struct ref_iterator
*ref_iterator
)
2516 struct files_reflog_iterator
*iter
=
2517 (struct files_reflog_iterator
*)ref_iterator
;
2520 if (iter
->dir_iterator
)
2521 ok
= dir_iterator_abort(iter
->dir_iterator
);
2523 base_ref_iterator_free(ref_iterator
);
2527 static struct ref_iterator_vtable files_reflog_iterator_vtable
= {
2528 files_reflog_iterator_advance
,
2529 files_reflog_iterator_peel
,
2530 files_reflog_iterator_abort
2533 static struct ref_iterator
*files_reflog_iterator_begin(struct ref_store
*ref_store
)
2535 struct files_ref_store
*refs
=
2536 files_downcast(ref_store
, REF_STORE_READ
,
2537 "reflog_iterator_begin");
2538 struct files_reflog_iterator
*iter
= xcalloc(1, sizeof(*iter
));
2539 struct ref_iterator
*ref_iterator
= &iter
->base
;
2540 struct strbuf sb
= STRBUF_INIT
;
2542 base_ref_iterator_init(ref_iterator
, &files_reflog_iterator_vtable
);
2543 files_reflog_path(refs
, &sb
, NULL
);
2544 iter
->dir_iterator
= dir_iterator_begin(sb
.buf
);
2545 iter
->ref_store
= ref_store
;
2546 strbuf_release(&sb
);
2547 return ref_iterator
;
2551 * If update is a direct update of head_ref (the reference pointed to
2552 * by HEAD), then add an extra REF_LOG_ONLY update for HEAD.
2554 static int split_head_update(struct ref_update
*update
,
2555 struct ref_transaction
*transaction
,
2556 const char *head_ref
,
2557 struct string_list
*affected_refnames
,
2560 struct string_list_item
*item
;
2561 struct ref_update
*new_update
;
2563 if ((update
->flags
& REF_LOG_ONLY
) ||
2564 (update
->flags
& REF_ISPRUNING
) ||
2565 (update
->flags
& REF_UPDATE_VIA_HEAD
))
2568 if (strcmp(update
->refname
, head_ref
))
2572 * First make sure that HEAD is not already in the
2573 * transaction. This insertion is O(N) in the transaction
2574 * size, but it happens at most once per transaction.
2576 item
= string_list_insert(affected_refnames
, "HEAD");
2578 /* An entry already existed */
2580 "multiple updates for 'HEAD' (including one "
2581 "via its referent '%s') are not allowed",
2583 return TRANSACTION_NAME_CONFLICT
;
2586 new_update
= ref_transaction_add_update(
2587 transaction
, "HEAD",
2588 update
->flags
| REF_LOG_ONLY
| REF_NODEREF
,
2589 update
->new_oid
.hash
, update
->old_oid
.hash
,
2592 item
->util
= new_update
;
2598 * update is for a symref that points at referent and doesn't have
2599 * REF_NODEREF set. Split it into two updates:
2600 * - The original update, but with REF_LOG_ONLY and REF_NODEREF set
2601 * - A new, separate update for the referent reference
2602 * Note that the new update will itself be subject to splitting when
2603 * the iteration gets to it.
2605 static int split_symref_update(struct files_ref_store
*refs
,
2606 struct ref_update
*update
,
2607 const char *referent
,
2608 struct ref_transaction
*transaction
,
2609 struct string_list
*affected_refnames
,
2612 struct string_list_item
*item
;
2613 struct ref_update
*new_update
;
2614 unsigned int new_flags
;
2617 * First make sure that referent is not already in the
2618 * transaction. This insertion is O(N) in the transaction
2619 * size, but it happens at most once per symref in a
2622 item
= string_list_insert(affected_refnames
, referent
);
2624 /* An entry already existed */
2626 "multiple updates for '%s' (including one "
2627 "via symref '%s') are not allowed",
2628 referent
, update
->refname
);
2629 return TRANSACTION_NAME_CONFLICT
;
2632 new_flags
= update
->flags
;
2633 if (!strcmp(update
->refname
, "HEAD")) {
2635 * Record that the new update came via HEAD, so that
2636 * when we process it, split_head_update() doesn't try
2637 * to add another reflog update for HEAD. Note that
2638 * this bit will be propagated if the new_update
2639 * itself needs to be split.
2641 new_flags
|= REF_UPDATE_VIA_HEAD
;
2644 new_update
= ref_transaction_add_update(
2645 transaction
, referent
, new_flags
,
2646 update
->new_oid
.hash
, update
->old_oid
.hash
,
2649 new_update
->parent_update
= update
;
2652 * Change the symbolic ref update to log only. Also, it
2653 * doesn't need to check its old SHA-1 value, as that will be
2654 * done when new_update is processed.
2656 update
->flags
|= REF_LOG_ONLY
| REF_NODEREF
;
2657 update
->flags
&= ~REF_HAVE_OLD
;
2659 item
->util
= new_update
;
2665 * Return the refname under which update was originally requested.
2667 static const char *original_update_refname(struct ref_update
*update
)
2669 while (update
->parent_update
)
2670 update
= update
->parent_update
;
2672 return update
->refname
;
2676 * Check whether the REF_HAVE_OLD and old_oid values stored in update
2677 * are consistent with oid, which is the reference's current value. If
2678 * everything is OK, return 0; otherwise, write an error message to
2679 * err and return -1.
2681 static int check_old_oid(struct ref_update
*update
, struct object_id
*oid
,
2684 if (!(update
->flags
& REF_HAVE_OLD
) ||
2685 !oidcmp(oid
, &update
->old_oid
))
2688 if (is_null_oid(&update
->old_oid
))
2689 strbuf_addf(err
, "cannot lock ref '%s': "
2690 "reference already exists",
2691 original_update_refname(update
));
2692 else if (is_null_oid(oid
))
2693 strbuf_addf(err
, "cannot lock ref '%s': "
2694 "reference is missing but expected %s",
2695 original_update_refname(update
),
2696 oid_to_hex(&update
->old_oid
));
2698 strbuf_addf(err
, "cannot lock ref '%s': "
2699 "is at %s but expected %s",
2700 original_update_refname(update
),
2702 oid_to_hex(&update
->old_oid
));
2708 * Prepare for carrying out update:
2709 * - Lock the reference referred to by update.
2710 * - Read the reference under lock.
2711 * - Check that its old SHA-1 value (if specified) is correct, and in
2712 * any case record it in update->lock->old_oid for later use when
2713 * writing the reflog.
2714 * - If it is a symref update without REF_NODEREF, split it up into a
2715 * REF_LOG_ONLY update of the symref and add a separate update for
2716 * the referent to transaction.
2717 * - If it is an update of head_ref, add a corresponding REF_LOG_ONLY
2720 static int lock_ref_for_update(struct files_ref_store
*refs
,
2721 struct ref_update
*update
,
2722 struct ref_transaction
*transaction
,
2723 const char *head_ref
,
2724 struct string_list
*affected_refnames
,
2727 struct strbuf referent
= STRBUF_INIT
;
2728 int mustexist
= (update
->flags
& REF_HAVE_OLD
) &&
2729 !is_null_oid(&update
->old_oid
);
2731 struct ref_lock
*lock
;
2733 files_assert_main_repository(refs
, "lock_ref_for_update");
2735 if ((update
->flags
& REF_HAVE_NEW
) && is_null_oid(&update
->new_oid
))
2736 update
->flags
|= REF_DELETING
;
2739 ret
= split_head_update(update
, transaction
, head_ref
,
2740 affected_refnames
, err
);
2745 ret
= lock_raw_ref(refs
, update
->refname
, mustexist
,
2746 affected_refnames
, NULL
,
2748 &update
->type
, err
);
2752 reason
= strbuf_detach(err
, NULL
);
2753 strbuf_addf(err
, "cannot lock ref '%s': %s",
2754 original_update_refname(update
), reason
);
2759 update
->backend_data
= lock
;
2761 if (update
->type
& REF_ISSYMREF
) {
2762 if (update
->flags
& REF_NODEREF
) {
2764 * We won't be reading the referent as part of
2765 * the transaction, so we have to read it here
2766 * to record and possibly check old_sha1:
2768 if (refs_read_ref_full(&refs
->base
,
2770 lock
->old_oid
.hash
, NULL
)) {
2771 if (update
->flags
& REF_HAVE_OLD
) {
2772 strbuf_addf(err
, "cannot lock ref '%s': "
2773 "error reading reference",
2774 original_update_refname(update
));
2777 } else if (check_old_oid(update
, &lock
->old_oid
, err
)) {
2778 return TRANSACTION_GENERIC_ERROR
;
2782 * Create a new update for the reference this
2783 * symref is pointing at. Also, we will record
2784 * and verify old_sha1 for this update as part
2785 * of processing the split-off update, so we
2786 * don't have to do it here.
2788 ret
= split_symref_update(refs
, update
,
2789 referent
.buf
, transaction
,
2790 affected_refnames
, err
);
2795 struct ref_update
*parent_update
;
2797 if (check_old_oid(update
, &lock
->old_oid
, err
))
2798 return TRANSACTION_GENERIC_ERROR
;
2801 * If this update is happening indirectly because of a
2802 * symref update, record the old SHA-1 in the parent
2805 for (parent_update
= update
->parent_update
;
2807 parent_update
= parent_update
->parent_update
) {
2808 struct ref_lock
*parent_lock
= parent_update
->backend_data
;
2809 oidcpy(&parent_lock
->old_oid
, &lock
->old_oid
);
2813 if ((update
->flags
& REF_HAVE_NEW
) &&
2814 !(update
->flags
& REF_DELETING
) &&
2815 !(update
->flags
& REF_LOG_ONLY
)) {
2816 if (!(update
->type
& REF_ISSYMREF
) &&
2817 !oidcmp(&lock
->old_oid
, &update
->new_oid
)) {
2819 * The reference already has the desired
2820 * value, so we don't need to write it.
2822 } else if (write_ref_to_lockfile(lock
, &update
->new_oid
,
2824 char *write_err
= strbuf_detach(err
, NULL
);
2827 * The lock was freed upon failure of
2828 * write_ref_to_lockfile():
2830 update
->backend_data
= NULL
;
2832 "cannot update ref '%s': %s",
2833 update
->refname
, write_err
);
2835 return TRANSACTION_GENERIC_ERROR
;
2837 update
->flags
|= REF_NEEDS_COMMIT
;
2840 if (!(update
->flags
& REF_NEEDS_COMMIT
)) {
2842 * We didn't call write_ref_to_lockfile(), so
2843 * the lockfile is still open. Close it to
2844 * free up the file descriptor:
2846 if (close_ref(lock
)) {
2847 strbuf_addf(err
, "couldn't close '%s.lock'",
2849 return TRANSACTION_GENERIC_ERROR
;
2856 * Unlock any references in `transaction` that are still locked, and
2857 * mark the transaction closed.
2859 static void files_transaction_cleanup(struct ref_transaction
*transaction
)
2863 for (i
= 0; i
< transaction
->nr
; i
++) {
2864 struct ref_update
*update
= transaction
->updates
[i
];
2865 struct ref_lock
*lock
= update
->backend_data
;
2869 update
->backend_data
= NULL
;
2873 transaction
->state
= REF_TRANSACTION_CLOSED
;
2876 static int files_transaction_prepare(struct ref_store
*ref_store
,
2877 struct ref_transaction
*transaction
,
2880 struct files_ref_store
*refs
=
2881 files_downcast(ref_store
, REF_STORE_WRITE
,
2882 "ref_transaction_prepare");
2885 struct string_list affected_refnames
= STRING_LIST_INIT_NODUP
;
2886 char *head_ref
= NULL
;
2888 struct object_id head_oid
;
2892 if (!transaction
->nr
)
2896 * Fail if a refname appears more than once in the
2897 * transaction. (If we end up splitting up any updates using
2898 * split_symref_update() or split_head_update(), those
2899 * functions will check that the new updates don't have the
2900 * same refname as any existing ones.)
2902 for (i
= 0; i
< transaction
->nr
; i
++) {
2903 struct ref_update
*update
= transaction
->updates
[i
];
2904 struct string_list_item
*item
=
2905 string_list_append(&affected_refnames
, update
->refname
);
2908 * We store a pointer to update in item->util, but at
2909 * the moment we never use the value of this field
2910 * except to check whether it is non-NULL.
2912 item
->util
= update
;
2914 string_list_sort(&affected_refnames
);
2915 if (ref_update_reject_duplicates(&affected_refnames
, err
)) {
2916 ret
= TRANSACTION_GENERIC_ERROR
;
2921 * Special hack: If a branch is updated directly and HEAD
2922 * points to it (may happen on the remote side of a push
2923 * for example) then logically the HEAD reflog should be
2926 * A generic solution would require reverse symref lookups,
2927 * but finding all symrefs pointing to a given branch would be
2928 * rather costly for this rare event (the direct update of a
2929 * branch) to be worth it. So let's cheat and check with HEAD
2930 * only, which should cover 99% of all usage scenarios (even
2931 * 100% of the default ones).
2933 * So if HEAD is a symbolic reference, then record the name of
2934 * the reference that it points to. If we see an update of
2935 * head_ref within the transaction, then split_head_update()
2936 * arranges for the reflog of HEAD to be updated, too.
2938 head_ref
= refs_resolve_refdup(ref_store
, "HEAD",
2939 RESOLVE_REF_NO_RECURSE
,
2940 head_oid
.hash
, &head_type
);
2942 if (head_ref
&& !(head_type
& REF_ISSYMREF
)) {
2948 * Acquire all locks, verify old values if provided, check
2949 * that new values are valid, and write new values to the
2950 * lockfiles, ready to be activated. Only keep one lockfile
2951 * open at a time to avoid running out of file descriptors.
2952 * Note that lock_ref_for_update() might append more updates
2953 * to the transaction.
2955 for (i
= 0; i
< transaction
->nr
; i
++) {
2956 struct ref_update
*update
= transaction
->updates
[i
];
2958 ret
= lock_ref_for_update(refs
, update
, transaction
,
2959 head_ref
, &affected_refnames
, err
);
2966 string_list_clear(&affected_refnames
, 0);
2969 files_transaction_cleanup(transaction
);
2971 transaction
->state
= REF_TRANSACTION_PREPARED
;
2976 static int files_transaction_finish(struct ref_store
*ref_store
,
2977 struct ref_transaction
*transaction
,
2980 struct files_ref_store
*refs
=
2981 files_downcast(ref_store
, 0, "ref_transaction_finish");
2984 struct string_list refs_to_delete
= STRING_LIST_INIT_NODUP
;
2985 struct string_list_item
*ref_to_delete
;
2986 struct strbuf sb
= STRBUF_INIT
;
2990 if (!transaction
->nr
) {
2991 transaction
->state
= REF_TRANSACTION_CLOSED
;
2995 /* Perform updates first so live commits remain referenced */
2996 for (i
= 0; i
< transaction
->nr
; i
++) {
2997 struct ref_update
*update
= transaction
->updates
[i
];
2998 struct ref_lock
*lock
= update
->backend_data
;
3000 if (update
->flags
& REF_NEEDS_COMMIT
||
3001 update
->flags
& REF_LOG_ONLY
) {
3002 if (files_log_ref_write(refs
,
3006 update
->msg
, update
->flags
,
3008 char *old_msg
= strbuf_detach(err
, NULL
);
3010 strbuf_addf(err
, "cannot update the ref '%s': %s",
3011 lock
->ref_name
, old_msg
);
3014 update
->backend_data
= NULL
;
3015 ret
= TRANSACTION_GENERIC_ERROR
;
3019 if (update
->flags
& REF_NEEDS_COMMIT
) {
3020 clear_loose_ref_cache(refs
);
3021 if (commit_ref(lock
)) {
3022 strbuf_addf(err
, "couldn't set '%s'", lock
->ref_name
);
3024 update
->backend_data
= NULL
;
3025 ret
= TRANSACTION_GENERIC_ERROR
;
3030 /* Perform deletes now that updates are safely completed */
3031 for (i
= 0; i
< transaction
->nr
; i
++) {
3032 struct ref_update
*update
= transaction
->updates
[i
];
3033 struct ref_lock
*lock
= update
->backend_data
;
3035 if (update
->flags
& REF_DELETING
&&
3036 !(update
->flags
& REF_LOG_ONLY
)) {
3037 if (!(update
->type
& REF_ISPACKED
) ||
3038 update
->type
& REF_ISSYMREF
) {
3039 /* It is a loose reference. */
3041 files_ref_path(refs
, &sb
, lock
->ref_name
);
3042 if (unlink_or_msg(sb
.buf
, err
)) {
3043 ret
= TRANSACTION_GENERIC_ERROR
;
3046 update
->flags
|= REF_DELETED_LOOSE
;
3049 if (!(update
->flags
& REF_ISPRUNING
))
3050 string_list_append(&refs_to_delete
,
3055 if (repack_without_refs(refs
, &refs_to_delete
, err
)) {
3056 ret
= TRANSACTION_GENERIC_ERROR
;
3060 /* Delete the reflogs of any references that were deleted: */
3061 for_each_string_list_item(ref_to_delete
, &refs_to_delete
) {
3063 files_reflog_path(refs
, &sb
, ref_to_delete
->string
);
3064 if (!unlink_or_warn(sb
.buf
))
3065 try_remove_empty_parents(refs
, ref_to_delete
->string
,
3066 REMOVE_EMPTY_PARENTS_REFLOG
);
3069 clear_loose_ref_cache(refs
);
3072 files_transaction_cleanup(transaction
);
3074 for (i
= 0; i
< transaction
->nr
; i
++) {
3075 struct ref_update
*update
= transaction
->updates
[i
];
3077 if (update
->flags
& REF_DELETED_LOOSE
) {
3079 * The loose reference was deleted. Delete any
3080 * empty parent directories. (Note that this
3081 * can only work because we have already
3082 * removed the lockfile.)
3084 try_remove_empty_parents(refs
, update
->refname
,
3085 REMOVE_EMPTY_PARENTS_REF
);
3089 strbuf_release(&sb
);
3090 string_list_clear(&refs_to_delete
, 0);
3094 static int files_transaction_abort(struct ref_store
*ref_store
,
3095 struct ref_transaction
*transaction
,
3098 files_transaction_cleanup(transaction
);
3102 static int ref_present(const char *refname
,
3103 const struct object_id
*oid
, int flags
, void *cb_data
)
3105 struct string_list
*affected_refnames
= cb_data
;
3107 return string_list_has_string(affected_refnames
, refname
);
3110 static int files_initial_transaction_commit(struct ref_store
*ref_store
,
3111 struct ref_transaction
*transaction
,
3114 struct files_ref_store
*refs
=
3115 files_downcast(ref_store
, REF_STORE_WRITE
,
3116 "initial_ref_transaction_commit");
3119 struct string_list affected_refnames
= STRING_LIST_INIT_NODUP
;
3123 if (transaction
->state
!= REF_TRANSACTION_OPEN
)
3124 die("BUG: commit called for transaction that is not open");
3126 /* Fail if a refname appears more than once in the transaction: */
3127 for (i
= 0; i
< transaction
->nr
; i
++)
3128 string_list_append(&affected_refnames
,
3129 transaction
->updates
[i
]->refname
);
3130 string_list_sort(&affected_refnames
);
3131 if (ref_update_reject_duplicates(&affected_refnames
, err
)) {
3132 ret
= TRANSACTION_GENERIC_ERROR
;
3137 * It's really undefined to call this function in an active
3138 * repository or when there are existing references: we are
3139 * only locking and changing packed-refs, so (1) any
3140 * simultaneous processes might try to change a reference at
3141 * the same time we do, and (2) any existing loose versions of
3142 * the references that we are setting would have precedence
3143 * over our values. But some remote helpers create the remote
3144 * "HEAD" and "master" branches before calling this function,
3145 * so here we really only check that none of the references
3146 * that we are creating already exists.
3148 if (refs_for_each_rawref(&refs
->base
, ref_present
,
3149 &affected_refnames
))
3150 die("BUG: initial ref transaction called with existing refs");
3152 for (i
= 0; i
< transaction
->nr
; i
++) {
3153 struct ref_update
*update
= transaction
->updates
[i
];
3155 if ((update
->flags
& REF_HAVE_OLD
) &&
3156 !is_null_oid(&update
->old_oid
))
3157 die("BUG: initial ref transaction with old_sha1 set");
3158 if (refs_verify_refname_available(&refs
->base
, update
->refname
,
3159 &affected_refnames
, NULL
,
3161 ret
= TRANSACTION_NAME_CONFLICT
;
3166 if (lock_packed_refs(refs
, 0)) {
3167 strbuf_addf(err
, "unable to lock packed-refs file: %s",
3169 ret
= TRANSACTION_GENERIC_ERROR
;
3173 for (i
= 0; i
< transaction
->nr
; i
++) {
3174 struct ref_update
*update
= transaction
->updates
[i
];
3176 if ((update
->flags
& REF_HAVE_NEW
) &&
3177 !is_null_oid(&update
->new_oid
))
3178 add_packed_ref(refs
, update
->refname
,
3182 if (commit_packed_refs(refs
)) {
3183 strbuf_addf(err
, "unable to commit packed-refs file: %s",
3185 ret
= TRANSACTION_GENERIC_ERROR
;
3190 transaction
->state
= REF_TRANSACTION_CLOSED
;
3191 string_list_clear(&affected_refnames
, 0);
3195 struct expire_reflog_cb
{
3197 reflog_expiry_should_prune_fn
*should_prune_fn
;
3200 struct object_id last_kept_oid
;
3203 static int expire_reflog_ent(struct object_id
*ooid
, struct object_id
*noid
,
3204 const char *email
, timestamp_t timestamp
, int tz
,
3205 const char *message
, void *cb_data
)
3207 struct expire_reflog_cb
*cb
= cb_data
;
3208 struct expire_reflog_policy_cb
*policy_cb
= cb
->policy_cb
;
3210 if (cb
->flags
& EXPIRE_REFLOGS_REWRITE
)
3211 ooid
= &cb
->last_kept_oid
;
3213 if ((*cb
->should_prune_fn
)(ooid
, noid
, email
, timestamp
, tz
,
3214 message
, policy_cb
)) {
3216 printf("would prune %s", message
);
3217 else if (cb
->flags
& EXPIRE_REFLOGS_VERBOSE
)
3218 printf("prune %s", message
);
3221 fprintf(cb
->newlog
, "%s %s %s %"PRItime
" %+05d\t%s",
3222 oid_to_hex(ooid
), oid_to_hex(noid
),
3223 email
, timestamp
, tz
, message
);
3224 oidcpy(&cb
->last_kept_oid
, noid
);
3226 if (cb
->flags
& EXPIRE_REFLOGS_VERBOSE
)
3227 printf("keep %s", message
);
3232 static int files_reflog_expire(struct ref_store
*ref_store
,
3233 const char *refname
, const unsigned char *sha1
,
3235 reflog_expiry_prepare_fn prepare_fn
,
3236 reflog_expiry_should_prune_fn should_prune_fn
,
3237 reflog_expiry_cleanup_fn cleanup_fn
,
3238 void *policy_cb_data
)
3240 struct files_ref_store
*refs
=
3241 files_downcast(ref_store
, REF_STORE_WRITE
, "reflog_expire");
3242 static struct lock_file reflog_lock
;
3243 struct expire_reflog_cb cb
;
3244 struct ref_lock
*lock
;
3245 struct strbuf log_file_sb
= STRBUF_INIT
;
3249 struct strbuf err
= STRBUF_INIT
;
3250 struct object_id oid
;
3252 memset(&cb
, 0, sizeof(cb
));
3254 cb
.policy_cb
= policy_cb_data
;
3255 cb
.should_prune_fn
= should_prune_fn
;
3258 * The reflog file is locked by holding the lock on the
3259 * reference itself, plus we might need to update the
3260 * reference if --updateref was specified:
3262 lock
= lock_ref_sha1_basic(refs
, refname
, sha1
,
3263 NULL
, NULL
, REF_NODEREF
,
3266 error("cannot lock ref '%s': %s", refname
, err
.buf
);
3267 strbuf_release(&err
);
3270 if (!refs_reflog_exists(ref_store
, refname
)) {
3275 files_reflog_path(refs
, &log_file_sb
, refname
);
3276 log_file
= strbuf_detach(&log_file_sb
, NULL
);
3277 if (!(flags
& EXPIRE_REFLOGS_DRY_RUN
)) {
3279 * Even though holding $GIT_DIR/logs/$reflog.lock has
3280 * no locking implications, we use the lock_file
3281 * machinery here anyway because it does a lot of the
3282 * work we need, including cleaning up if the program
3283 * exits unexpectedly.
3285 if (hold_lock_file_for_update(&reflog_lock
, log_file
, 0) < 0) {
3286 struct strbuf err
= STRBUF_INIT
;
3287 unable_to_lock_message(log_file
, errno
, &err
);
3288 error("%s", err
.buf
);
3289 strbuf_release(&err
);
3292 cb
.newlog
= fdopen_lock_file(&reflog_lock
, "w");
3294 error("cannot fdopen %s (%s)",
3295 get_lock_file_path(&reflog_lock
), strerror(errno
));
3300 hashcpy(oid
.hash
, sha1
);
3302 (*prepare_fn
)(refname
, &oid
, cb
.policy_cb
);
3303 refs_for_each_reflog_ent(ref_store
, refname
, expire_reflog_ent
, &cb
);
3304 (*cleanup_fn
)(cb
.policy_cb
);
3306 if (!(flags
& EXPIRE_REFLOGS_DRY_RUN
)) {
3308 * It doesn't make sense to adjust a reference pointed
3309 * to by a symbolic ref based on expiring entries in
3310 * the symbolic reference's reflog. Nor can we update
3311 * a reference if there are no remaining reflog
3314 int update
= (flags
& EXPIRE_REFLOGS_UPDATE_REF
) &&
3315 !(type
& REF_ISSYMREF
) &&
3316 !is_null_oid(&cb
.last_kept_oid
);
3318 if (close_lock_file(&reflog_lock
)) {
3319 status
|= error("couldn't write %s: %s", log_file
,
3321 } else if (update
&&
3322 (write_in_full(get_lock_file_fd(lock
->lk
),
3323 oid_to_hex(&cb
.last_kept_oid
), GIT_SHA1_HEXSZ
) != GIT_SHA1_HEXSZ
||
3324 write_str_in_full(get_lock_file_fd(lock
->lk
), "\n") != 1 ||
3325 close_ref(lock
) < 0)) {
3326 status
|= error("couldn't write %s",
3327 get_lock_file_path(lock
->lk
));
3328 rollback_lock_file(&reflog_lock
);
3329 } else if (commit_lock_file(&reflog_lock
)) {
3330 status
|= error("unable to write reflog '%s' (%s)",
3331 log_file
, strerror(errno
));
3332 } else if (update
&& commit_ref(lock
)) {
3333 status
|= error("couldn't set %s", lock
->ref_name
);
3341 rollback_lock_file(&reflog_lock
);
3347 static int files_init_db(struct ref_store
*ref_store
, struct strbuf
*err
)
3349 struct files_ref_store
*refs
=
3350 files_downcast(ref_store
, REF_STORE_WRITE
, "init_db");
3351 struct strbuf sb
= STRBUF_INIT
;
3354 * Create .git/refs/{heads,tags}
3356 files_ref_path(refs
, &sb
, "refs/heads");
3357 safe_create_dir(sb
.buf
, 1);
3360 files_ref_path(refs
, &sb
, "refs/tags");
3361 safe_create_dir(sb
.buf
, 1);
3363 strbuf_release(&sb
);
3367 struct ref_storage_be refs_be_files
= {
3370 files_ref_store_create
,
3372 files_transaction_prepare
,
3373 files_transaction_finish
,
3374 files_transaction_abort
,
3375 files_initial_transaction_commit
,
3379 files_create_symref
,
3383 files_ref_iterator_begin
,
3386 files_reflog_iterator_begin
,
3387 files_for_each_reflog_ent
,
3388 files_for_each_reflog_ent_reverse
,
3389 files_reflog_exists
,
3390 files_create_reflog
,
3391 files_delete_reflog
,