3 #include "refs-internal.h"
5 #include "../iterator.h"
6 #include "../dir-iterator.h"
7 #include "../lockfile.h"
14 struct object_id old_oid
;
18 * Return true if refname, which has the specified oid and flags, can
19 * be resolved to an object in the database. If the referred-to object
20 * does not exist, emit a warning and return false.
22 static int ref_resolves_to_object(const char *refname
,
23 const struct object_id
*oid
,
26 if (flags
& REF_ISBROKEN
)
28 if (!has_sha1_file(oid
->hash
)) {
29 error("%s does not point to a valid object!", refname
);
35 struct packed_ref_cache
{
36 struct ref_cache
*cache
;
39 * Count of references to the data structure in this instance,
40 * including the pointer from files_ref_store::packed if any.
41 * The data will not be freed as long as the reference count
44 unsigned int referrers
;
46 /* The metadata from when this packed-refs cache was read */
47 struct stat_validity validity
;
51 * A container for `packed-refs`-related data. It is not (yet) a
54 struct packed_ref_store
{
55 unsigned int store_flags
;
57 /* The path of the "packed-refs" file: */
61 * A cache of the values read from the `packed-refs` file, if
62 * it might still be current; otherwise, NULL.
64 struct packed_ref_cache
*cache
;
67 * Lock used for the "packed-refs" file. Note that this (and
68 * thus the enclosing `packed_ref_store`) must not be freed.
70 struct lock_file lock
;
73 static struct packed_ref_store
*packed_ref_store_create(
74 const char *path
, unsigned int store_flags
)
76 struct packed_ref_store
*refs
= xcalloc(1, sizeof(*refs
));
78 refs
->store_flags
= store_flags
;
79 refs
->path
= xstrdup(path
);
84 * Future: need to be in "struct repository"
85 * when doing a full libification.
87 struct files_ref_store
{
88 struct ref_store base
;
89 unsigned int store_flags
;
94 struct ref_cache
*loose
;
96 struct packed_ref_store
*packed_ref_store
;
100 * Increment the reference count of *packed_refs.
102 static void acquire_packed_ref_cache(struct packed_ref_cache
*packed_refs
)
104 packed_refs
->referrers
++;
108 * Decrease the reference count of *packed_refs. If it goes to zero,
109 * free *packed_refs and return true; otherwise return false.
111 static int release_packed_ref_cache(struct packed_ref_cache
*packed_refs
)
113 if (!--packed_refs
->referrers
) {
114 free_ref_cache(packed_refs
->cache
);
115 stat_validity_clear(&packed_refs
->validity
);
123 static void clear_packed_ref_cache(struct packed_ref_store
*refs
)
126 struct packed_ref_cache
*cache
= refs
->cache
;
128 if (is_lock_file_locked(&refs
->lock
))
129 die("BUG: packed-ref cache cleared while locked");
131 release_packed_ref_cache(cache
);
135 static void clear_loose_ref_cache(struct files_ref_store
*refs
)
138 free_ref_cache(refs
->loose
);
144 * Create a new submodule ref cache and add it to the internal
147 static struct ref_store
*files_ref_store_create(const char *gitdir
,
150 struct files_ref_store
*refs
= xcalloc(1, sizeof(*refs
));
151 struct ref_store
*ref_store
= (struct ref_store
*)refs
;
152 struct strbuf sb
= STRBUF_INIT
;
154 base_ref_store_init(ref_store
, &refs_be_files
);
155 refs
->store_flags
= flags
;
157 refs
->gitdir
= xstrdup(gitdir
);
158 get_common_dir_noenv(&sb
, gitdir
);
159 refs
->gitcommondir
= strbuf_detach(&sb
, NULL
);
160 strbuf_addf(&sb
, "%s/packed-refs", refs
->gitcommondir
);
161 refs
->packed_ref_store
= packed_ref_store_create(sb
.buf
, flags
);
168 * Die if refs is not the main ref store. caller is used in any
169 * necessary error messages.
171 static void files_assert_main_repository(struct files_ref_store
*refs
,
174 if (refs
->store_flags
& REF_STORE_MAIN
)
177 die("BUG: operation %s only allowed for main ref store", caller
);
181 * Downcast ref_store to files_ref_store. Die if ref_store is not a
182 * files_ref_store. required_flags is compared with ref_store's
183 * store_flags to ensure the ref_store has all required capabilities.
184 * "caller" is used in any necessary error messages.
186 static struct files_ref_store
*files_downcast(struct ref_store
*ref_store
,
187 unsigned int required_flags
,
190 struct files_ref_store
*refs
;
192 if (ref_store
->be
!= &refs_be_files
)
193 die("BUG: ref_store is type \"%s\" not \"files\" in %s",
194 ref_store
->be
->name
, caller
);
196 refs
= (struct files_ref_store
*)ref_store
;
198 if ((refs
->store_flags
& required_flags
) != required_flags
)
199 die("BUG: operation %s requires abilities 0x%x, but only have 0x%x",
200 caller
, required_flags
, refs
->store_flags
);
205 /* The length of a peeled reference line in packed-refs, including EOL: */
206 #define PEELED_LINE_LENGTH 42
209 * The packed-refs header line that we write out. Perhaps other
210 * traits will be added later. The trailing space is required.
212 static const char PACKED_REFS_HEADER
[] =
213 "# pack-refs with: peeled fully-peeled \n";
216 * Parse one line from a packed-refs file. Write the SHA1 to sha1.
217 * Return a pointer to the refname within the line (null-terminated),
218 * or NULL if there was a problem.
220 static const char *parse_ref_line(struct strbuf
*line
, struct object_id
*oid
)
224 if (parse_oid_hex(line
->buf
, oid
, &ref
) < 0)
226 if (!isspace(*ref
++))
232 if (line
->buf
[line
->len
- 1] != '\n')
234 line
->buf
[--line
->len
] = 0;
240 * Read from `packed_refs_file` into a newly-allocated
241 * `packed_ref_cache` and return it. The return value will already
242 * have its reference count incremented.
244 * A comment line of the form "# pack-refs with: " may contain zero or
245 * more traits. We interpret the traits as follows:
249 * Probably no references are peeled. But if the file contains a
250 * peeled value for a reference, we will use it.
254 * References under "refs/tags/", if they *can* be peeled, *are*
255 * peeled in this file. References outside of "refs/tags/" are
256 * probably not peeled even if they could have been, but if we find
257 * a peeled value for such a reference we will use it.
261 * All references in the file that can be peeled are peeled.
262 * Inversely (and this is more important), any references in the
263 * file for which no peeled value is recorded is not peelable. This
264 * trait should typically be written alongside "peeled" for
265 * compatibility with older clients, but we do not require it
266 * (i.e., "peeled" is a no-op if "fully-peeled" is set).
268 static struct packed_ref_cache
*read_packed_refs(const char *packed_refs_file
)
271 struct packed_ref_cache
*packed_refs
= xcalloc(1, sizeof(*packed_refs
));
272 struct ref_entry
*last
= NULL
;
273 struct strbuf line
= STRBUF_INIT
;
274 enum { PEELED_NONE
, PEELED_TAGS
, PEELED_FULLY
} peeled
= PEELED_NONE
;
277 acquire_packed_ref_cache(packed_refs
);
278 packed_refs
->cache
= create_ref_cache(NULL
, NULL
);
279 packed_refs
->cache
->root
->flag
&= ~REF_INCOMPLETE
;
281 f
= fopen(packed_refs_file
, "r");
283 if (errno
== ENOENT
) {
285 * This is OK; it just means that no
286 * "packed-refs" file has been written yet,
287 * which is equivalent to it being empty.
291 die_errno("couldn't read %s", packed_refs_file
);
295 stat_validity_update(&packed_refs
->validity
, fileno(f
));
297 dir
= get_ref_dir(packed_refs
->cache
->root
);
298 while (strbuf_getwholeline(&line
, f
, '\n') != EOF
) {
299 struct object_id oid
;
303 if (skip_prefix(line
.buf
, "# pack-refs with:", &traits
)) {
304 if (strstr(traits
, " fully-peeled "))
305 peeled
= PEELED_FULLY
;
306 else if (strstr(traits
, " peeled "))
307 peeled
= PEELED_TAGS
;
308 /* perhaps other traits later as well */
312 refname
= parse_ref_line(&line
, &oid
);
314 int flag
= REF_ISPACKED
;
316 if (check_refname_format(refname
, REFNAME_ALLOW_ONELEVEL
)) {
317 if (!refname_is_safe(refname
))
318 die("packed refname is dangerous: %s", refname
);
320 flag
|= REF_BAD_NAME
| REF_ISBROKEN
;
322 last
= create_ref_entry(refname
, &oid
, flag
);
323 if (peeled
== PEELED_FULLY
||
324 (peeled
== PEELED_TAGS
&& starts_with(refname
, "refs/tags/")))
325 last
->flag
|= REF_KNOWS_PEELED
;
326 add_ref_entry(dir
, last
);
330 line
.buf
[0] == '^' &&
331 line
.len
== PEELED_LINE_LENGTH
&&
332 line
.buf
[PEELED_LINE_LENGTH
- 1] == '\n' &&
333 !get_oid_hex(line
.buf
+ 1, &oid
)) {
334 oidcpy(&last
->u
.value
.peeled
, &oid
);
336 * Regardless of what the file header said,
337 * we definitely know the value of *this*
340 last
->flag
|= REF_KNOWS_PEELED
;
345 strbuf_release(&line
);
350 static void files_reflog_path(struct files_ref_store
*refs
,
356 * FIXME: of course this is wrong in multi worktree
357 * setting. To be fixed real soon.
359 strbuf_addf(sb
, "%s/logs", refs
->gitcommondir
);
363 switch (ref_type(refname
)) {
364 case REF_TYPE_PER_WORKTREE
:
365 case REF_TYPE_PSEUDOREF
:
366 strbuf_addf(sb
, "%s/logs/%s", refs
->gitdir
, refname
);
368 case REF_TYPE_NORMAL
:
369 strbuf_addf(sb
, "%s/logs/%s", refs
->gitcommondir
, refname
);
372 die("BUG: unknown ref type %d of ref %s",
373 ref_type(refname
), refname
);
377 static void files_ref_path(struct files_ref_store
*refs
,
381 switch (ref_type(refname
)) {
382 case REF_TYPE_PER_WORKTREE
:
383 case REF_TYPE_PSEUDOREF
:
384 strbuf_addf(sb
, "%s/%s", refs
->gitdir
, refname
);
386 case REF_TYPE_NORMAL
:
387 strbuf_addf(sb
, "%s/%s", refs
->gitcommondir
, refname
);
390 die("BUG: unknown ref type %d of ref %s",
391 ref_type(refname
), refname
);
396 * Check that the packed refs cache (if any) still reflects the
397 * contents of the file. If not, clear the cache.
399 static void validate_packed_ref_cache(struct files_ref_store
*refs
)
401 if (refs
->packed_ref_store
->cache
&&
402 !stat_validity_check(&refs
->packed_ref_store
->cache
->validity
,
403 refs
->packed_ref_store
->path
))
404 clear_packed_ref_cache(refs
->packed_ref_store
);
408 * Get the packed_ref_cache for the specified files_ref_store,
409 * creating and populating it if it hasn't been read before or if the
410 * file has been changed (according to its `validity` field) since it
411 * was last read. On the other hand, if we hold the lock, then assume
412 * that the file hasn't been changed out from under us, so skip the
413 * extra `stat()` call in `stat_validity_check()`.
415 static struct packed_ref_cache
*get_packed_ref_cache(struct files_ref_store
*refs
)
417 const char *packed_refs_file
= refs
->packed_ref_store
->path
;
419 if (!is_lock_file_locked(&refs
->packed_ref_store
->lock
))
420 validate_packed_ref_cache(refs
);
422 if (!refs
->packed_ref_store
->cache
)
423 refs
->packed_ref_store
->cache
= read_packed_refs(packed_refs_file
);
425 return refs
->packed_ref_store
->cache
;
428 static struct ref_dir
*get_packed_ref_dir(struct packed_ref_cache
*packed_ref_cache
)
430 return get_ref_dir(packed_ref_cache
->cache
->root
);
433 static struct ref_dir
*get_packed_refs(struct files_ref_store
*refs
)
435 return get_packed_ref_dir(get_packed_ref_cache(refs
));
439 * Add or overwrite a reference in the in-memory packed reference
440 * cache. This may only be called while the packed-refs file is locked
441 * (see lock_packed_refs()). To actually write the packed-refs file,
442 * call commit_packed_refs().
444 static void add_packed_ref(struct files_ref_store
*refs
,
445 const char *refname
, const struct object_id
*oid
)
447 struct ref_dir
*packed_refs
;
448 struct ref_entry
*packed_entry
;
450 if (!is_lock_file_locked(&refs
->packed_ref_store
->lock
))
451 die("BUG: packed refs not locked");
453 if (check_refname_format(refname
, REFNAME_ALLOW_ONELEVEL
))
454 die("Reference has invalid format: '%s'", refname
);
456 packed_refs
= get_packed_refs(refs
);
457 packed_entry
= find_ref_entry(packed_refs
, refname
);
459 /* Overwrite the existing entry: */
460 oidcpy(&packed_entry
->u
.value
.oid
, oid
);
461 packed_entry
->flag
= REF_ISPACKED
;
462 oidclr(&packed_entry
->u
.value
.peeled
);
464 packed_entry
= create_ref_entry(refname
, oid
, REF_ISPACKED
);
465 add_ref_entry(packed_refs
, packed_entry
);
470 * Read the loose references from the namespace dirname into dir
471 * (without recursing). dirname must end with '/'. dir must be the
472 * directory entry corresponding to dirname.
474 static void loose_fill_ref_dir(struct ref_store
*ref_store
,
475 struct ref_dir
*dir
, const char *dirname
)
477 struct files_ref_store
*refs
=
478 files_downcast(ref_store
, REF_STORE_READ
, "fill_ref_dir");
481 int dirnamelen
= strlen(dirname
);
482 struct strbuf refname
;
483 struct strbuf path
= STRBUF_INIT
;
486 files_ref_path(refs
, &path
, dirname
);
487 path_baselen
= path
.len
;
489 d
= opendir(path
.buf
);
491 strbuf_release(&path
);
495 strbuf_init(&refname
, dirnamelen
+ 257);
496 strbuf_add(&refname
, dirname
, dirnamelen
);
498 while ((de
= readdir(d
)) != NULL
) {
499 struct object_id oid
;
503 if (de
->d_name
[0] == '.')
505 if (ends_with(de
->d_name
, ".lock"))
507 strbuf_addstr(&refname
, de
->d_name
);
508 strbuf_addstr(&path
, de
->d_name
);
509 if (stat(path
.buf
, &st
) < 0) {
510 ; /* silently ignore */
511 } else if (S_ISDIR(st
.st_mode
)) {
512 strbuf_addch(&refname
, '/');
513 add_entry_to_dir(dir
,
514 create_dir_entry(dir
->cache
, refname
.buf
,
517 if (!refs_resolve_ref_unsafe(&refs
->base
,
522 flag
|= REF_ISBROKEN
;
523 } else if (is_null_oid(&oid
)) {
525 * It is so astronomically unlikely
526 * that NULL_SHA1 is the SHA-1 of an
527 * actual object that we consider its
528 * appearance in a loose reference
529 * file to be repo corruption
530 * (probably due to a software bug).
532 flag
|= REF_ISBROKEN
;
535 if (check_refname_format(refname
.buf
,
536 REFNAME_ALLOW_ONELEVEL
)) {
537 if (!refname_is_safe(refname
.buf
))
538 die("loose refname is dangerous: %s", refname
.buf
);
540 flag
|= REF_BAD_NAME
| REF_ISBROKEN
;
542 add_entry_to_dir(dir
,
543 create_ref_entry(refname
.buf
, &oid
, flag
));
545 strbuf_setlen(&refname
, dirnamelen
);
546 strbuf_setlen(&path
, path_baselen
);
548 strbuf_release(&refname
);
549 strbuf_release(&path
);
553 * Manually add refs/bisect, which, being per-worktree, might
554 * not appear in the directory listing for refs/ in the main
557 if (!strcmp(dirname
, "refs/")) {
558 int pos
= search_ref_dir(dir
, "refs/bisect/", 12);
561 struct ref_entry
*child_entry
= create_dir_entry(
562 dir
->cache
, "refs/bisect/", 12, 1);
563 add_entry_to_dir(dir
, child_entry
);
568 static struct ref_cache
*get_loose_ref_cache(struct files_ref_store
*refs
)
572 * Mark the top-level directory complete because we
573 * are about to read the only subdirectory that can
576 refs
->loose
= create_ref_cache(&refs
->base
, loose_fill_ref_dir
);
578 /* We're going to fill the top level ourselves: */
579 refs
->loose
->root
->flag
&= ~REF_INCOMPLETE
;
582 * Add an incomplete entry for "refs/" (to be filled
585 add_entry_to_dir(get_ref_dir(refs
->loose
->root
),
586 create_dir_entry(refs
->loose
, "refs/", 5, 1));
592 * Return the ref_entry for the given refname from the packed
593 * references. If it does not exist, return NULL.
595 static struct ref_entry
*get_packed_ref(struct files_ref_store
*refs
,
598 return find_ref_entry(get_packed_refs(refs
), refname
);
602 * A loose ref file doesn't exist; check for a packed ref.
604 static int resolve_packed_ref(struct files_ref_store
*refs
,
606 unsigned char *sha1
, unsigned int *flags
)
608 struct ref_entry
*entry
;
611 * The loose reference file does not exist; check for a packed
614 entry
= get_packed_ref(refs
, refname
);
616 hashcpy(sha1
, entry
->u
.value
.oid
.hash
);
617 *flags
|= REF_ISPACKED
;
620 /* refname is not a packed reference. */
624 static int files_read_raw_ref(struct ref_store
*ref_store
,
625 const char *refname
, unsigned char *sha1
,
626 struct strbuf
*referent
, unsigned int *type
)
628 struct files_ref_store
*refs
=
629 files_downcast(ref_store
, REF_STORE_READ
, "read_raw_ref");
630 struct strbuf sb_contents
= STRBUF_INIT
;
631 struct strbuf sb_path
= STRBUF_INIT
;
638 int remaining_retries
= 3;
641 strbuf_reset(&sb_path
);
643 files_ref_path(refs
, &sb_path
, refname
);
649 * We might have to loop back here to avoid a race
650 * condition: first we lstat() the file, then we try
651 * to read it as a link or as a file. But if somebody
652 * changes the type of the file (file <-> directory
653 * <-> symlink) between the lstat() and reading, then
654 * we don't want to report that as an error but rather
655 * try again starting with the lstat().
657 * We'll keep a count of the retries, though, just to avoid
658 * any confusing situation sending us into an infinite loop.
661 if (remaining_retries
-- <= 0)
664 if (lstat(path
, &st
) < 0) {
667 if (resolve_packed_ref(refs
, refname
, sha1
, type
)) {
675 /* Follow "normalized" - ie "refs/.." symlinks by hand */
676 if (S_ISLNK(st
.st_mode
)) {
677 strbuf_reset(&sb_contents
);
678 if (strbuf_readlink(&sb_contents
, path
, 0) < 0) {
679 if (errno
== ENOENT
|| errno
== EINVAL
)
680 /* inconsistent with lstat; retry */
685 if (starts_with(sb_contents
.buf
, "refs/") &&
686 !check_refname_format(sb_contents
.buf
, 0)) {
687 strbuf_swap(&sb_contents
, referent
);
688 *type
|= REF_ISSYMREF
;
693 * It doesn't look like a refname; fall through to just
694 * treating it like a non-symlink, and reading whatever it
699 /* Is it a directory? */
700 if (S_ISDIR(st
.st_mode
)) {
702 * Even though there is a directory where the loose
703 * ref is supposed to be, there could still be a
706 if (resolve_packed_ref(refs
, refname
, sha1
, type
)) {
715 * Anything else, just open it and try to use it as
718 fd
= open(path
, O_RDONLY
);
720 if (errno
== ENOENT
&& !S_ISLNK(st
.st_mode
))
721 /* inconsistent with lstat; retry */
726 strbuf_reset(&sb_contents
);
727 if (strbuf_read(&sb_contents
, fd
, 256) < 0) {
728 int save_errno
= errno
;
734 strbuf_rtrim(&sb_contents
);
735 buf
= sb_contents
.buf
;
736 if (starts_with(buf
, "ref:")) {
738 while (isspace(*buf
))
741 strbuf_reset(referent
);
742 strbuf_addstr(referent
, buf
);
743 *type
|= REF_ISSYMREF
;
749 * Please note that FETCH_HEAD has additional
750 * data after the sha.
752 if (get_sha1_hex(buf
, sha1
) ||
753 (buf
[40] != '\0' && !isspace(buf
[40]))) {
754 *type
|= REF_ISBROKEN
;
763 strbuf_release(&sb_path
);
764 strbuf_release(&sb_contents
);
769 static void unlock_ref(struct ref_lock
*lock
)
771 /* Do not free lock->lk -- atexit() still looks at them */
773 rollback_lock_file(lock
->lk
);
774 free(lock
->ref_name
);
779 * Lock refname, without following symrefs, and set *lock_p to point
780 * at a newly-allocated lock object. Fill in lock->old_oid, referent,
781 * and type similarly to read_raw_ref().
783 * The caller must verify that refname is a "safe" reference name (in
784 * the sense of refname_is_safe()) before calling this function.
786 * If the reference doesn't already exist, verify that refname doesn't
787 * have a D/F conflict with any existing references. extras and skip
788 * are passed to refs_verify_refname_available() for this check.
790 * If mustexist is not set and the reference is not found or is
791 * broken, lock the reference anyway but clear sha1.
793 * Return 0 on success. On failure, write an error message to err and
794 * return TRANSACTION_NAME_CONFLICT or TRANSACTION_GENERIC_ERROR.
796 * Implementation note: This function is basically
801 * but it includes a lot more code to
802 * - Deal with possible races with other processes
803 * - Avoid calling refs_verify_refname_available() when it can be
804 * avoided, namely if we were successfully able to read the ref
805 * - Generate informative error messages in the case of failure
807 static int lock_raw_ref(struct files_ref_store
*refs
,
808 const char *refname
, int mustexist
,
809 const struct string_list
*extras
,
810 const struct string_list
*skip
,
811 struct ref_lock
**lock_p
,
812 struct strbuf
*referent
,
816 struct ref_lock
*lock
;
817 struct strbuf ref_file
= STRBUF_INIT
;
818 int attempts_remaining
= 3;
819 int ret
= TRANSACTION_GENERIC_ERROR
;
822 files_assert_main_repository(refs
, "lock_raw_ref");
826 /* First lock the file so it can't change out from under us. */
828 *lock_p
= lock
= xcalloc(1, sizeof(*lock
));
830 lock
->ref_name
= xstrdup(refname
);
831 files_ref_path(refs
, &ref_file
, refname
);
834 switch (safe_create_leading_directories(ref_file
.buf
)) {
839 * Suppose refname is "refs/foo/bar". We just failed
840 * to create the containing directory, "refs/foo",
841 * because there was a non-directory in the way. This
842 * indicates a D/F conflict, probably because of
843 * another reference such as "refs/foo". There is no
844 * reason to expect this error to be transitory.
846 if (refs_verify_refname_available(&refs
->base
, refname
,
847 extras
, skip
, err
)) {
850 * To the user the relevant error is
851 * that the "mustexist" reference is
855 strbuf_addf(err
, "unable to resolve reference '%s'",
859 * The error message set by
860 * refs_verify_refname_available() is
863 ret
= TRANSACTION_NAME_CONFLICT
;
867 * The file that is in the way isn't a loose
868 * reference. Report it as a low-level
871 strbuf_addf(err
, "unable to create lock file %s.lock; "
872 "non-directory in the way",
877 /* Maybe another process was tidying up. Try again. */
878 if (--attempts_remaining
> 0)
882 strbuf_addf(err
, "unable to create directory for %s",
888 lock
->lk
= xcalloc(1, sizeof(struct lock_file
));
890 if (hold_lock_file_for_update(lock
->lk
, ref_file
.buf
, LOCK_NO_DEREF
) < 0) {
891 if (errno
== ENOENT
&& --attempts_remaining
> 0) {
893 * Maybe somebody just deleted one of the
894 * directories leading to ref_file. Try
899 unable_to_lock_message(ref_file
.buf
, errno
, err
);
905 * Now we hold the lock and can read the reference without
906 * fear that its value will change.
909 if (files_read_raw_ref(&refs
->base
, refname
,
910 lock
->old_oid
.hash
, referent
, type
)) {
911 if (errno
== ENOENT
) {
913 /* Garden variety missing reference. */
914 strbuf_addf(err
, "unable to resolve reference '%s'",
919 * Reference is missing, but that's OK. We
920 * know that there is not a conflict with
921 * another loose reference because
922 * (supposing that we are trying to lock
923 * reference "refs/foo/bar"):
925 * - We were successfully able to create
926 * the lockfile refs/foo/bar.lock, so we
927 * know there cannot be a loose reference
930 * - We got ENOENT and not EISDIR, so we
931 * know that there cannot be a loose
932 * reference named "refs/foo/bar/baz".
935 } else if (errno
== EISDIR
) {
937 * There is a directory in the way. It might have
938 * contained references that have been deleted. If
939 * we don't require that the reference already
940 * exists, try to remove the directory so that it
941 * doesn't cause trouble when we want to rename the
942 * lockfile into place later.
945 /* Garden variety missing reference. */
946 strbuf_addf(err
, "unable to resolve reference '%s'",
949 } else if (remove_dir_recursively(&ref_file
,
950 REMOVE_DIR_EMPTY_ONLY
)) {
951 if (refs_verify_refname_available(
952 &refs
->base
, refname
,
953 extras
, skip
, err
)) {
955 * The error message set by
956 * verify_refname_available() is OK.
958 ret
= TRANSACTION_NAME_CONFLICT
;
962 * We can't delete the directory,
963 * but we also don't know of any
964 * references that it should
967 strbuf_addf(err
, "there is a non-empty directory '%s' "
968 "blocking reference '%s'",
969 ref_file
.buf
, refname
);
973 } else if (errno
== EINVAL
&& (*type
& REF_ISBROKEN
)) {
974 strbuf_addf(err
, "unable to resolve reference '%s': "
975 "reference broken", refname
);
978 strbuf_addf(err
, "unable to resolve reference '%s': %s",
979 refname
, strerror(errno
));
984 * If the ref did not exist and we are creating it,
985 * make sure there is no existing ref that conflicts
988 if (refs_verify_refname_available(
989 &refs
->base
, refname
,
1002 strbuf_release(&ref_file
);
1006 static int files_peel_ref(struct ref_store
*ref_store
,
1007 const char *refname
, unsigned char *sha1
)
1009 struct files_ref_store
*refs
=
1010 files_downcast(ref_store
, REF_STORE_READ
| REF_STORE_ODB
,
1013 unsigned char base
[20];
1015 if (current_ref_iter
&& current_ref_iter
->refname
== refname
) {
1016 struct object_id peeled
;
1018 if (ref_iterator_peel(current_ref_iter
, &peeled
))
1020 hashcpy(sha1
, peeled
.hash
);
1024 if (refs_read_ref_full(ref_store
, refname
,
1025 RESOLVE_REF_READING
, base
, &flag
))
1029 * If the reference is packed, read its ref_entry from the
1030 * cache in the hope that we already know its peeled value.
1031 * We only try this optimization on packed references because
1032 * (a) forcing the filling of the loose reference cache could
1033 * be expensive and (b) loose references anyway usually do not
1034 * have REF_KNOWS_PEELED.
1036 if (flag
& REF_ISPACKED
) {
1037 struct ref_entry
*r
= get_packed_ref(refs
, refname
);
1039 if (peel_entry(r
, 0))
1041 hashcpy(sha1
, r
->u
.value
.peeled
.hash
);
1046 return peel_object(base
, sha1
);
1049 struct files_ref_iterator
{
1050 struct ref_iterator base
;
1052 struct packed_ref_cache
*packed_ref_cache
;
1053 struct ref_iterator
*iter0
;
1057 static int files_ref_iterator_advance(struct ref_iterator
*ref_iterator
)
1059 struct files_ref_iterator
*iter
=
1060 (struct files_ref_iterator
*)ref_iterator
;
1063 while ((ok
= ref_iterator_advance(iter
->iter0
)) == ITER_OK
) {
1064 if (iter
->flags
& DO_FOR_EACH_PER_WORKTREE_ONLY
&&
1065 ref_type(iter
->iter0
->refname
) != REF_TYPE_PER_WORKTREE
)
1068 if (!(iter
->flags
& DO_FOR_EACH_INCLUDE_BROKEN
) &&
1069 !ref_resolves_to_object(iter
->iter0
->refname
,
1071 iter
->iter0
->flags
))
1074 iter
->base
.refname
= iter
->iter0
->refname
;
1075 iter
->base
.oid
= iter
->iter0
->oid
;
1076 iter
->base
.flags
= iter
->iter0
->flags
;
1081 if (ref_iterator_abort(ref_iterator
) != ITER_DONE
)
1087 static int files_ref_iterator_peel(struct ref_iterator
*ref_iterator
,
1088 struct object_id
*peeled
)
1090 struct files_ref_iterator
*iter
=
1091 (struct files_ref_iterator
*)ref_iterator
;
1093 return ref_iterator_peel(iter
->iter0
, peeled
);
1096 static int files_ref_iterator_abort(struct ref_iterator
*ref_iterator
)
1098 struct files_ref_iterator
*iter
=
1099 (struct files_ref_iterator
*)ref_iterator
;
1103 ok
= ref_iterator_abort(iter
->iter0
);
1105 release_packed_ref_cache(iter
->packed_ref_cache
);
1106 base_ref_iterator_free(ref_iterator
);
1110 static struct ref_iterator_vtable files_ref_iterator_vtable
= {
1111 files_ref_iterator_advance
,
1112 files_ref_iterator_peel
,
1113 files_ref_iterator_abort
1116 static struct ref_iterator
*files_ref_iterator_begin(
1117 struct ref_store
*ref_store
,
1118 const char *prefix
, unsigned int flags
)
1120 struct files_ref_store
*refs
;
1121 struct ref_iterator
*loose_iter
, *packed_iter
;
1122 struct files_ref_iterator
*iter
;
1123 struct ref_iterator
*ref_iterator
;
1124 unsigned int required_flags
= REF_STORE_READ
;
1126 if (!(flags
& DO_FOR_EACH_INCLUDE_BROKEN
))
1127 required_flags
|= REF_STORE_ODB
;
1129 refs
= files_downcast(ref_store
, required_flags
, "ref_iterator_begin");
1131 iter
= xcalloc(1, sizeof(*iter
));
1132 ref_iterator
= &iter
->base
;
1133 base_ref_iterator_init(ref_iterator
, &files_ref_iterator_vtable
);
1136 * We must make sure that all loose refs are read before
1137 * accessing the packed-refs file; this avoids a race
1138 * condition if loose refs are migrated to the packed-refs
1139 * file by a simultaneous process, but our in-memory view is
1140 * from before the migration. We ensure this as follows:
1141 * First, we call start the loose refs iteration with its
1142 * `prime_ref` argument set to true. This causes the loose
1143 * references in the subtree to be pre-read into the cache.
1144 * (If they've already been read, that's OK; we only need to
1145 * guarantee that they're read before the packed refs, not
1146 * *how much* before.) After that, we call
1147 * get_packed_ref_cache(), which internally checks whether the
1148 * packed-ref cache is up to date with what is on disk, and
1149 * re-reads it if not.
1152 loose_iter
= cache_ref_iterator_begin(get_loose_ref_cache(refs
),
1155 iter
->packed_ref_cache
= get_packed_ref_cache(refs
);
1156 acquire_packed_ref_cache(iter
->packed_ref_cache
);
1157 packed_iter
= cache_ref_iterator_begin(iter
->packed_ref_cache
->cache
,
1160 iter
->iter0
= overlay_ref_iterator_begin(loose_iter
, packed_iter
);
1161 iter
->flags
= flags
;
1163 return ref_iterator
;
1167 * Verify that the reference locked by lock has the value old_sha1.
1168 * Fail if the reference doesn't exist and mustexist is set. Return 0
1169 * on success. On error, write an error message to err, set errno, and
1170 * return a negative value.
1172 static int verify_lock(struct ref_store
*ref_store
, struct ref_lock
*lock
,
1173 const unsigned char *old_sha1
, int mustexist
,
1178 if (refs_read_ref_full(ref_store
, lock
->ref_name
,
1179 mustexist
? RESOLVE_REF_READING
: 0,
1180 lock
->old_oid
.hash
, NULL
)) {
1182 int save_errno
= errno
;
1183 strbuf_addf(err
, "can't verify ref '%s'", lock
->ref_name
);
1187 oidclr(&lock
->old_oid
);
1191 if (old_sha1
&& hashcmp(lock
->old_oid
.hash
, old_sha1
)) {
1192 strbuf_addf(err
, "ref '%s' is at %s but expected %s",
1194 oid_to_hex(&lock
->old_oid
),
1195 sha1_to_hex(old_sha1
));
1202 static int remove_empty_directories(struct strbuf
*path
)
1205 * we want to create a file but there is a directory there;
1206 * if that is an empty directory (or a directory that contains
1207 * only empty directories), remove them.
1209 return remove_dir_recursively(path
, REMOVE_DIR_EMPTY_ONLY
);
1212 static int create_reflock(const char *path
, void *cb
)
1214 struct lock_file
*lk
= cb
;
1216 return hold_lock_file_for_update(lk
, path
, LOCK_NO_DEREF
) < 0 ? -1 : 0;
1220 * Locks a ref returning the lock on success and NULL on failure.
1221 * On failure errno is set to something meaningful.
1223 static struct ref_lock
*lock_ref_sha1_basic(struct files_ref_store
*refs
,
1224 const char *refname
,
1225 const unsigned char *old_sha1
,
1226 const struct string_list
*extras
,
1227 const struct string_list
*skip
,
1228 unsigned int flags
, int *type
,
1231 struct strbuf ref_file
= STRBUF_INIT
;
1232 struct ref_lock
*lock
;
1234 int mustexist
= (old_sha1
&& !is_null_sha1(old_sha1
));
1235 int resolve_flags
= RESOLVE_REF_NO_RECURSE
;
1238 files_assert_main_repository(refs
, "lock_ref_sha1_basic");
1241 lock
= xcalloc(1, sizeof(struct ref_lock
));
1244 resolve_flags
|= RESOLVE_REF_READING
;
1245 if (flags
& REF_DELETING
)
1246 resolve_flags
|= RESOLVE_REF_ALLOW_BAD_NAME
;
1248 files_ref_path(refs
, &ref_file
, refname
);
1249 resolved
= !!refs_resolve_ref_unsafe(&refs
->base
,
1250 refname
, resolve_flags
,
1251 lock
->old_oid
.hash
, type
);
1252 if (!resolved
&& errno
== EISDIR
) {
1254 * we are trying to lock foo but we used to
1255 * have foo/bar which now does not exist;
1256 * it is normal for the empty directory 'foo'
1259 if (remove_empty_directories(&ref_file
)) {
1261 if (!refs_verify_refname_available(
1263 refname
, extras
, skip
, err
))
1264 strbuf_addf(err
, "there are still refs under '%s'",
1268 resolved
= !!refs_resolve_ref_unsafe(&refs
->base
,
1269 refname
, resolve_flags
,
1270 lock
->old_oid
.hash
, type
);
1274 if (last_errno
!= ENOTDIR
||
1275 !refs_verify_refname_available(&refs
->base
, refname
,
1277 strbuf_addf(err
, "unable to resolve reference '%s': %s",
1278 refname
, strerror(last_errno
));
1284 * If the ref did not exist and we are creating it, make sure
1285 * there is no existing packed ref whose name begins with our
1286 * refname, nor a packed ref whose name is a proper prefix of
1289 if (is_null_oid(&lock
->old_oid
) &&
1290 refs_verify_refname_available(&refs
->base
, refname
,
1291 extras
, skip
, err
)) {
1292 last_errno
= ENOTDIR
;
1296 lock
->lk
= xcalloc(1, sizeof(struct lock_file
));
1298 lock
->ref_name
= xstrdup(refname
);
1300 if (raceproof_create_file(ref_file
.buf
, create_reflock
, lock
->lk
)) {
1302 unable_to_lock_message(ref_file
.buf
, errno
, err
);
1306 if (verify_lock(&refs
->base
, lock
, old_sha1
, mustexist
, err
)) {
1317 strbuf_release(&ref_file
);
1323 * Write an entry to the packed-refs file for the specified refname.
1324 * If peeled is non-NULL, write it as the entry's peeled value.
1326 static void write_packed_entry(FILE *fh
, const char *refname
,
1327 const unsigned char *sha1
,
1328 const unsigned char *peeled
)
1330 fprintf_or_die(fh
, "%s %s\n", sha1_to_hex(sha1
), refname
);
1332 fprintf_or_die(fh
, "^%s\n", sha1_to_hex(peeled
));
1336 * Lock the packed-refs file for writing. Flags is passed to
1337 * hold_lock_file_for_update(). Return 0 on success. On errors, set
1338 * errno appropriately and return a nonzero value.
1340 static int lock_packed_refs(struct files_ref_store
*refs
, int flags
)
1342 static int timeout_configured
= 0;
1343 static int timeout_value
= 1000;
1344 struct packed_ref_cache
*packed_ref_cache
;
1346 files_assert_main_repository(refs
, "lock_packed_refs");
1348 if (!timeout_configured
) {
1349 git_config_get_int("core.packedrefstimeout", &timeout_value
);
1350 timeout_configured
= 1;
1353 if (hold_lock_file_for_update_timeout(
1354 &refs
->packed_ref_store
->lock
,
1355 refs
->packed_ref_store
->path
,
1356 flags
, timeout_value
) < 0)
1360 * Now that we hold the `packed-refs` lock, make sure that our
1361 * cache matches the current version of the file. Normally
1362 * `get_packed_ref_cache()` does that for us, but that
1363 * function assumes that when the file is locked, any existing
1364 * cache is still valid. We've just locked the file, but it
1365 * might have changed the moment *before* we locked it.
1367 validate_packed_ref_cache(refs
);
1369 packed_ref_cache
= get_packed_ref_cache(refs
);
1370 /* Increment the reference count to prevent it from being freed: */
1371 acquire_packed_ref_cache(packed_ref_cache
);
1376 * Write the current version of the packed refs cache from memory to
1377 * disk. The packed-refs file must already be locked for writing (see
1378 * lock_packed_refs()). Return zero on success. On errors, set errno
1379 * and return a nonzero value
1381 static int commit_packed_refs(struct files_ref_store
*refs
)
1383 struct packed_ref_cache
*packed_ref_cache
=
1384 get_packed_ref_cache(refs
);
1388 struct ref_iterator
*iter
;
1390 files_assert_main_repository(refs
, "commit_packed_refs");
1392 if (!is_lock_file_locked(&refs
->packed_ref_store
->lock
))
1393 die("BUG: packed-refs not locked");
1395 out
= fdopen_lock_file(&refs
->packed_ref_store
->lock
, "w");
1397 die_errno("unable to fdopen packed-refs descriptor");
1399 fprintf_or_die(out
, "%s", PACKED_REFS_HEADER
);
1401 iter
= cache_ref_iterator_begin(packed_ref_cache
->cache
, NULL
, 0);
1402 while ((ok
= ref_iterator_advance(iter
)) == ITER_OK
) {
1403 struct object_id peeled
;
1404 int peel_error
= ref_iterator_peel(iter
, &peeled
);
1406 write_packed_entry(out
, iter
->refname
, iter
->oid
->hash
,
1407 peel_error
? NULL
: peeled
.hash
);
1410 if (ok
!= ITER_DONE
)
1411 die("error while iterating over references");
1413 if (commit_lock_file(&refs
->packed_ref_store
->lock
)) {
1417 release_packed_ref_cache(packed_ref_cache
);
1423 * Rollback the lockfile for the packed-refs file, and discard the
1424 * in-memory packed reference cache. (The packed-refs file will be
1425 * read anew if it is needed again after this function is called.)
1427 static void rollback_packed_refs(struct files_ref_store
*refs
)
1429 struct packed_ref_cache
*packed_ref_cache
=
1430 get_packed_ref_cache(refs
);
1432 files_assert_main_repository(refs
, "rollback_packed_refs");
1434 if (!is_lock_file_locked(&refs
->packed_ref_store
->lock
))
1435 die("BUG: packed-refs not locked");
1436 rollback_lock_file(&refs
->packed_ref_store
->lock
);
1437 release_packed_ref_cache(packed_ref_cache
);
1438 clear_packed_ref_cache(refs
->packed_ref_store
);
1441 struct ref_to_prune
{
1442 struct ref_to_prune
*next
;
1443 unsigned char sha1
[20];
1444 char name
[FLEX_ARRAY
];
1448 REMOVE_EMPTY_PARENTS_REF
= 0x01,
1449 REMOVE_EMPTY_PARENTS_REFLOG
= 0x02
1453 * Remove empty parent directories associated with the specified
1454 * reference and/or its reflog, but spare [logs/]refs/ and immediate
1455 * subdirs. flags is a combination of REMOVE_EMPTY_PARENTS_REF and/or
1456 * REMOVE_EMPTY_PARENTS_REFLOG.
1458 static void try_remove_empty_parents(struct files_ref_store
*refs
,
1459 const char *refname
,
1462 struct strbuf buf
= STRBUF_INIT
;
1463 struct strbuf sb
= STRBUF_INIT
;
1467 strbuf_addstr(&buf
, refname
);
1469 for (i
= 0; i
< 2; i
++) { /* refs/{heads,tags,...}/ */
1470 while (*p
&& *p
!= '/')
1472 /* tolerate duplicate slashes; see check_refname_format() */
1476 q
= buf
.buf
+ buf
.len
;
1477 while (flags
& (REMOVE_EMPTY_PARENTS_REF
| REMOVE_EMPTY_PARENTS_REFLOG
)) {
1478 while (q
> p
&& *q
!= '/')
1480 while (q
> p
&& *(q
-1) == '/')
1484 strbuf_setlen(&buf
, q
- buf
.buf
);
1487 files_ref_path(refs
, &sb
, buf
.buf
);
1488 if ((flags
& REMOVE_EMPTY_PARENTS_REF
) && rmdir(sb
.buf
))
1489 flags
&= ~REMOVE_EMPTY_PARENTS_REF
;
1492 files_reflog_path(refs
, &sb
, buf
.buf
);
1493 if ((flags
& REMOVE_EMPTY_PARENTS_REFLOG
) && rmdir(sb
.buf
))
1494 flags
&= ~REMOVE_EMPTY_PARENTS_REFLOG
;
1496 strbuf_release(&buf
);
1497 strbuf_release(&sb
);
1500 /* make sure nobody touched the ref, and unlink */
1501 static void prune_ref(struct files_ref_store
*refs
, struct ref_to_prune
*r
)
1503 struct ref_transaction
*transaction
;
1504 struct strbuf err
= STRBUF_INIT
;
1506 if (check_refname_format(r
->name
, 0))
1509 transaction
= ref_store_transaction_begin(&refs
->base
, &err
);
1511 ref_transaction_delete(transaction
, r
->name
, r
->sha1
,
1512 REF_ISPRUNING
| REF_NODEREF
, NULL
, &err
) ||
1513 ref_transaction_commit(transaction
, &err
)) {
1514 ref_transaction_free(transaction
);
1515 error("%s", err
.buf
);
1516 strbuf_release(&err
);
1519 ref_transaction_free(transaction
);
1520 strbuf_release(&err
);
1523 static void prune_refs(struct files_ref_store
*refs
, struct ref_to_prune
*r
)
1532 * Return true if the specified reference should be packed.
1534 static int should_pack_ref(const char *refname
,
1535 const struct object_id
*oid
, unsigned int ref_flags
,
1536 unsigned int pack_flags
)
1538 /* Do not pack per-worktree refs: */
1539 if (ref_type(refname
) != REF_TYPE_NORMAL
)
1542 /* Do not pack non-tags unless PACK_REFS_ALL is set: */
1543 if (!(pack_flags
& PACK_REFS_ALL
) && !starts_with(refname
, "refs/tags/"))
1546 /* Do not pack symbolic refs: */
1547 if (ref_flags
& REF_ISSYMREF
)
1550 /* Do not pack broken refs: */
1551 if (!ref_resolves_to_object(refname
, oid
, ref_flags
))
1557 static int files_pack_refs(struct ref_store
*ref_store
, unsigned int flags
)
1559 struct files_ref_store
*refs
=
1560 files_downcast(ref_store
, REF_STORE_WRITE
| REF_STORE_ODB
,
1562 struct ref_iterator
*iter
;
1564 struct ref_to_prune
*refs_to_prune
= NULL
;
1566 lock_packed_refs(refs
, LOCK_DIE_ON_ERROR
);
1568 iter
= cache_ref_iterator_begin(get_loose_ref_cache(refs
), NULL
, 0);
1569 while ((ok
= ref_iterator_advance(iter
)) == ITER_OK
) {
1571 * If the loose reference can be packed, add an entry
1572 * in the packed ref cache. If the reference should be
1573 * pruned, also add it to refs_to_prune.
1575 if (!should_pack_ref(iter
->refname
, iter
->oid
, iter
->flags
,
1580 * Create an entry in the packed-refs cache equivalent
1581 * to the one from the loose ref cache, except that
1582 * we don't copy the peeled status, because we want it
1585 add_packed_ref(refs
, iter
->refname
, iter
->oid
);
1587 /* Schedule the loose reference for pruning if requested. */
1588 if ((flags
& PACK_REFS_PRUNE
)) {
1589 struct ref_to_prune
*n
;
1590 FLEX_ALLOC_STR(n
, name
, iter
->refname
);
1591 hashcpy(n
->sha1
, iter
->oid
->hash
);
1592 n
->next
= refs_to_prune
;
1596 if (ok
!= ITER_DONE
)
1597 die("error while iterating over references");
1599 if (commit_packed_refs(refs
))
1600 die_errno("unable to overwrite old ref-pack file");
1602 prune_refs(refs
, refs_to_prune
);
1607 * Rewrite the packed-refs file, omitting any refs listed in
1608 * 'refnames'. On error, leave packed-refs unchanged, write an error
1609 * message to 'err', and return a nonzero value.
1611 * The refs in 'refnames' needn't be sorted. `err` must not be NULL.
1613 static int repack_without_refs(struct files_ref_store
*refs
,
1614 struct string_list
*refnames
, struct strbuf
*err
)
1616 struct ref_dir
*packed
;
1617 struct string_list_item
*refname
;
1618 int ret
, needs_repacking
= 0, removed
= 0;
1620 files_assert_main_repository(refs
, "repack_without_refs");
1623 /* Look for a packed ref */
1624 for_each_string_list_item(refname
, refnames
) {
1625 if (get_packed_ref(refs
, refname
->string
)) {
1626 needs_repacking
= 1;
1631 /* Avoid locking if we have nothing to do */
1632 if (!needs_repacking
)
1633 return 0; /* no refname exists in packed refs */
1635 if (lock_packed_refs(refs
, 0)) {
1636 unable_to_lock_message(refs
->packed_ref_store
->path
, errno
, err
);
1639 packed
= get_packed_refs(refs
);
1641 /* Remove refnames from the cache */
1642 for_each_string_list_item(refname
, refnames
)
1643 if (remove_entry_from_dir(packed
, refname
->string
) != -1)
1647 * All packed entries disappeared while we were
1648 * acquiring the lock.
1650 rollback_packed_refs(refs
);
1654 /* Write what remains */
1655 ret
= commit_packed_refs(refs
);
1657 strbuf_addf(err
, "unable to overwrite old ref-pack file: %s",
1662 static int files_delete_refs(struct ref_store
*ref_store
, const char *msg
,
1663 struct string_list
*refnames
, unsigned int flags
)
1665 struct files_ref_store
*refs
=
1666 files_downcast(ref_store
, REF_STORE_WRITE
, "delete_refs");
1667 struct strbuf err
= STRBUF_INIT
;
1673 result
= repack_without_refs(refs
, refnames
, &err
);
1676 * If we failed to rewrite the packed-refs file, then
1677 * it is unsafe to try to remove loose refs, because
1678 * doing so might expose an obsolete packed value for
1679 * a reference that might even point at an object that
1680 * has been garbage collected.
1682 if (refnames
->nr
== 1)
1683 error(_("could not delete reference %s: %s"),
1684 refnames
->items
[0].string
, err
.buf
);
1686 error(_("could not delete references: %s"), err
.buf
);
1691 for (i
= 0; i
< refnames
->nr
; i
++) {
1692 const char *refname
= refnames
->items
[i
].string
;
1694 if (refs_delete_ref(&refs
->base
, msg
, refname
, NULL
, flags
))
1695 result
|= error(_("could not remove reference %s"), refname
);
1699 strbuf_release(&err
);
1704 * People using contrib's git-new-workdir have .git/logs/refs ->
1705 * /some/other/path/.git/logs/refs, and that may live on another device.
1707 * IOW, to avoid cross device rename errors, the temporary renamed log must
1708 * live into logs/refs.
1710 #define TMP_RENAMED_LOG "refs/.tmp-renamed-log"
1713 const char *tmp_renamed_log
;
1717 static int rename_tmp_log_callback(const char *path
, void *cb_data
)
1719 struct rename_cb
*cb
= cb_data
;
1721 if (rename(cb
->tmp_renamed_log
, path
)) {
1723 * rename(a, b) when b is an existing directory ought
1724 * to result in ISDIR, but Solaris 5.8 gives ENOTDIR.
1725 * Sheesh. Record the true errno for error reporting,
1726 * but report EISDIR to raceproof_create_file() so
1727 * that it knows to retry.
1729 cb
->true_errno
= errno
;
1730 if (errno
== ENOTDIR
)
1738 static int rename_tmp_log(struct files_ref_store
*refs
, const char *newrefname
)
1740 struct strbuf path
= STRBUF_INIT
;
1741 struct strbuf tmp
= STRBUF_INIT
;
1742 struct rename_cb cb
;
1745 files_reflog_path(refs
, &path
, newrefname
);
1746 files_reflog_path(refs
, &tmp
, TMP_RENAMED_LOG
);
1747 cb
.tmp_renamed_log
= tmp
.buf
;
1748 ret
= raceproof_create_file(path
.buf
, rename_tmp_log_callback
, &cb
);
1750 if (errno
== EISDIR
)
1751 error("directory not empty: %s", path
.buf
);
1753 error("unable to move logfile %s to %s: %s",
1755 strerror(cb
.true_errno
));
1758 strbuf_release(&path
);
1759 strbuf_release(&tmp
);
1763 static int write_ref_to_lockfile(struct ref_lock
*lock
,
1764 const struct object_id
*oid
, struct strbuf
*err
);
1765 static int commit_ref_update(struct files_ref_store
*refs
,
1766 struct ref_lock
*lock
,
1767 const struct object_id
*oid
, const char *logmsg
,
1768 struct strbuf
*err
);
1770 static int files_rename_ref(struct ref_store
*ref_store
,
1771 const char *oldrefname
, const char *newrefname
,
1774 struct files_ref_store
*refs
=
1775 files_downcast(ref_store
, REF_STORE_WRITE
, "rename_ref");
1776 struct object_id oid
, orig_oid
;
1777 int flag
= 0, logmoved
= 0;
1778 struct ref_lock
*lock
;
1779 struct stat loginfo
;
1780 struct strbuf sb_oldref
= STRBUF_INIT
;
1781 struct strbuf sb_newref
= STRBUF_INIT
;
1782 struct strbuf tmp_renamed_log
= STRBUF_INIT
;
1784 struct strbuf err
= STRBUF_INIT
;
1786 files_reflog_path(refs
, &sb_oldref
, oldrefname
);
1787 files_reflog_path(refs
, &sb_newref
, newrefname
);
1788 files_reflog_path(refs
, &tmp_renamed_log
, TMP_RENAMED_LOG
);
1790 log
= !lstat(sb_oldref
.buf
, &loginfo
);
1791 if (log
&& S_ISLNK(loginfo
.st_mode
)) {
1792 ret
= error("reflog for %s is a symlink", oldrefname
);
1796 if (!refs_resolve_ref_unsafe(&refs
->base
, oldrefname
,
1797 RESOLVE_REF_READING
| RESOLVE_REF_NO_RECURSE
,
1798 orig_oid
.hash
, &flag
)) {
1799 ret
= error("refname %s not found", oldrefname
);
1803 if (flag
& REF_ISSYMREF
) {
1804 ret
= error("refname %s is a symbolic ref, renaming it is not supported",
1808 if (!refs_rename_ref_available(&refs
->base
, oldrefname
, newrefname
)) {
1813 if (log
&& rename(sb_oldref
.buf
, tmp_renamed_log
.buf
)) {
1814 ret
= error("unable to move logfile logs/%s to logs/"TMP_RENAMED_LOG
": %s",
1815 oldrefname
, strerror(errno
));
1819 if (refs_delete_ref(&refs
->base
, logmsg
, oldrefname
,
1820 orig_oid
.hash
, REF_NODEREF
)) {
1821 error("unable to delete old %s", oldrefname
);
1826 * Since we are doing a shallow lookup, oid is not the
1827 * correct value to pass to delete_ref as old_oid. But that
1828 * doesn't matter, because an old_oid check wouldn't add to
1829 * the safety anyway; we want to delete the reference whatever
1830 * its current value.
1832 if (!refs_read_ref_full(&refs
->base
, newrefname
,
1833 RESOLVE_REF_READING
| RESOLVE_REF_NO_RECURSE
,
1835 refs_delete_ref(&refs
->base
, NULL
, newrefname
,
1836 NULL
, REF_NODEREF
)) {
1837 if (errno
== EISDIR
) {
1838 struct strbuf path
= STRBUF_INIT
;
1841 files_ref_path(refs
, &path
, newrefname
);
1842 result
= remove_empty_directories(&path
);
1843 strbuf_release(&path
);
1846 error("Directory not empty: %s", newrefname
);
1850 error("unable to delete existing %s", newrefname
);
1855 if (log
&& rename_tmp_log(refs
, newrefname
))
1860 lock
= lock_ref_sha1_basic(refs
, newrefname
, NULL
, NULL
, NULL
,
1861 REF_NODEREF
, NULL
, &err
);
1863 error("unable to rename '%s' to '%s': %s", oldrefname
, newrefname
, err
.buf
);
1864 strbuf_release(&err
);
1867 oidcpy(&lock
->old_oid
, &orig_oid
);
1869 if (write_ref_to_lockfile(lock
, &orig_oid
, &err
) ||
1870 commit_ref_update(refs
, lock
, &orig_oid
, logmsg
, &err
)) {
1871 error("unable to write current sha1 into %s: %s", newrefname
, err
.buf
);
1872 strbuf_release(&err
);
1880 lock
= lock_ref_sha1_basic(refs
, oldrefname
, NULL
, NULL
, NULL
,
1881 REF_NODEREF
, NULL
, &err
);
1883 error("unable to lock %s for rollback: %s", oldrefname
, err
.buf
);
1884 strbuf_release(&err
);
1888 flag
= log_all_ref_updates
;
1889 log_all_ref_updates
= LOG_REFS_NONE
;
1890 if (write_ref_to_lockfile(lock
, &orig_oid
, &err
) ||
1891 commit_ref_update(refs
, lock
, &orig_oid
, NULL
, &err
)) {
1892 error("unable to write current sha1 into %s: %s", oldrefname
, err
.buf
);
1893 strbuf_release(&err
);
1895 log_all_ref_updates
= flag
;
1898 if (logmoved
&& rename(sb_newref
.buf
, sb_oldref
.buf
))
1899 error("unable to restore logfile %s from %s: %s",
1900 oldrefname
, newrefname
, strerror(errno
));
1901 if (!logmoved
&& log
&&
1902 rename(tmp_renamed_log
.buf
, sb_oldref
.buf
))
1903 error("unable to restore logfile %s from logs/"TMP_RENAMED_LOG
": %s",
1904 oldrefname
, strerror(errno
));
1907 strbuf_release(&sb_newref
);
1908 strbuf_release(&sb_oldref
);
1909 strbuf_release(&tmp_renamed_log
);
1914 static int close_ref(struct ref_lock
*lock
)
1916 if (close_lock_file(lock
->lk
))
1921 static int commit_ref(struct ref_lock
*lock
)
1923 char *path
= get_locked_file_path(lock
->lk
);
1926 if (!lstat(path
, &st
) && S_ISDIR(st
.st_mode
)) {
1928 * There is a directory at the path we want to rename
1929 * the lockfile to. Hopefully it is empty; try to
1932 size_t len
= strlen(path
);
1933 struct strbuf sb_path
= STRBUF_INIT
;
1935 strbuf_attach(&sb_path
, path
, len
, len
);
1938 * If this fails, commit_lock_file() will also fail
1939 * and will report the problem.
1941 remove_empty_directories(&sb_path
);
1942 strbuf_release(&sb_path
);
1947 if (commit_lock_file(lock
->lk
))
1952 static int open_or_create_logfile(const char *path
, void *cb
)
1956 *fd
= open(path
, O_APPEND
| O_WRONLY
| O_CREAT
, 0666);
1957 return (*fd
< 0) ? -1 : 0;
1961 * Create a reflog for a ref. If force_create = 0, only create the
1962 * reflog for certain refs (those for which should_autocreate_reflog
1963 * returns non-zero). Otherwise, create it regardless of the reference
1964 * name. If the logfile already existed or was created, return 0 and
1965 * set *logfd to the file descriptor opened for appending to the file.
1966 * If no logfile exists and we decided not to create one, return 0 and
1967 * set *logfd to -1. On failure, fill in *err, set *logfd to -1, and
1970 static int log_ref_setup(struct files_ref_store
*refs
,
1971 const char *refname
, int force_create
,
1972 int *logfd
, struct strbuf
*err
)
1974 struct strbuf logfile_sb
= STRBUF_INIT
;
1977 files_reflog_path(refs
, &logfile_sb
, refname
);
1978 logfile
= strbuf_detach(&logfile_sb
, NULL
);
1980 if (force_create
|| should_autocreate_reflog(refname
)) {
1981 if (raceproof_create_file(logfile
, open_or_create_logfile
, logfd
)) {
1982 if (errno
== ENOENT
)
1983 strbuf_addf(err
, "unable to create directory for '%s': "
1984 "%s", logfile
, strerror(errno
));
1985 else if (errno
== EISDIR
)
1986 strbuf_addf(err
, "there are still logs under '%s'",
1989 strbuf_addf(err
, "unable to append to '%s': %s",
1990 logfile
, strerror(errno
));
1995 *logfd
= open(logfile
, O_APPEND
| O_WRONLY
, 0666);
1997 if (errno
== ENOENT
|| errno
== EISDIR
) {
1999 * The logfile doesn't already exist,
2000 * but that is not an error; it only
2001 * means that we won't write log
2006 strbuf_addf(err
, "unable to append to '%s': %s",
2007 logfile
, strerror(errno
));
2014 adjust_shared_perm(logfile
);
2024 static int files_create_reflog(struct ref_store
*ref_store
,
2025 const char *refname
, int force_create
,
2028 struct files_ref_store
*refs
=
2029 files_downcast(ref_store
, REF_STORE_WRITE
, "create_reflog");
2032 if (log_ref_setup(refs
, refname
, force_create
, &fd
, err
))
2041 static int log_ref_write_fd(int fd
, const struct object_id
*old_oid
,
2042 const struct object_id
*new_oid
,
2043 const char *committer
, const char *msg
)
2045 int msglen
, written
;
2046 unsigned maxlen
, len
;
2049 msglen
= msg
? strlen(msg
) : 0;
2050 maxlen
= strlen(committer
) + msglen
+ 100;
2051 logrec
= xmalloc(maxlen
);
2052 len
= xsnprintf(logrec
, maxlen
, "%s %s %s\n",
2053 oid_to_hex(old_oid
),
2054 oid_to_hex(new_oid
),
2057 len
+= copy_reflog_msg(logrec
+ len
- 1, msg
) - 1;
2059 written
= len
<= maxlen
? write_in_full(fd
, logrec
, len
) : -1;
2067 static int files_log_ref_write(struct files_ref_store
*refs
,
2068 const char *refname
, const struct object_id
*old_oid
,
2069 const struct object_id
*new_oid
, const char *msg
,
2070 int flags
, struct strbuf
*err
)
2074 if (log_all_ref_updates
== LOG_REFS_UNSET
)
2075 log_all_ref_updates
= is_bare_repository() ? LOG_REFS_NONE
: LOG_REFS_NORMAL
;
2077 result
= log_ref_setup(refs
, refname
,
2078 flags
& REF_FORCE_CREATE_REFLOG
,
2086 result
= log_ref_write_fd(logfd
, old_oid
, new_oid
,
2087 git_committer_info(0), msg
);
2089 struct strbuf sb
= STRBUF_INIT
;
2090 int save_errno
= errno
;
2092 files_reflog_path(refs
, &sb
, refname
);
2093 strbuf_addf(err
, "unable to append to '%s': %s",
2094 sb
.buf
, strerror(save_errno
));
2095 strbuf_release(&sb
);
2100 struct strbuf sb
= STRBUF_INIT
;
2101 int save_errno
= errno
;
2103 files_reflog_path(refs
, &sb
, refname
);
2104 strbuf_addf(err
, "unable to append to '%s': %s",
2105 sb
.buf
, strerror(save_errno
));
2106 strbuf_release(&sb
);
2113 * Write sha1 into the open lockfile, then close the lockfile. On
2114 * errors, rollback the lockfile, fill in *err and
2117 static int write_ref_to_lockfile(struct ref_lock
*lock
,
2118 const struct object_id
*oid
, struct strbuf
*err
)
2120 static char term
= '\n';
2124 o
= parse_object(oid
);
2127 "trying to write ref '%s' with nonexistent object %s",
2128 lock
->ref_name
, oid_to_hex(oid
));
2132 if (o
->type
!= OBJ_COMMIT
&& is_branch(lock
->ref_name
)) {
2134 "trying to write non-commit object %s to branch '%s'",
2135 oid_to_hex(oid
), lock
->ref_name
);
2139 fd
= get_lock_file_fd(lock
->lk
);
2140 if (write_in_full(fd
, oid_to_hex(oid
), GIT_SHA1_HEXSZ
) != GIT_SHA1_HEXSZ
||
2141 write_in_full(fd
, &term
, 1) != 1 ||
2142 close_ref(lock
) < 0) {
2144 "couldn't write '%s'", get_lock_file_path(lock
->lk
));
2152 * Commit a change to a loose reference that has already been written
2153 * to the loose reference lockfile. Also update the reflogs if
2154 * necessary, using the specified lockmsg (which can be NULL).
2156 static int commit_ref_update(struct files_ref_store
*refs
,
2157 struct ref_lock
*lock
,
2158 const struct object_id
*oid
, const char *logmsg
,
2161 files_assert_main_repository(refs
, "commit_ref_update");
2163 clear_loose_ref_cache(refs
);
2164 if (files_log_ref_write(refs
, lock
->ref_name
,
2165 &lock
->old_oid
, oid
,
2167 char *old_msg
= strbuf_detach(err
, NULL
);
2168 strbuf_addf(err
, "cannot update the ref '%s': %s",
2169 lock
->ref_name
, old_msg
);
2175 if (strcmp(lock
->ref_name
, "HEAD") != 0) {
2177 * Special hack: If a branch is updated directly and HEAD
2178 * points to it (may happen on the remote side of a push
2179 * for example) then logically the HEAD reflog should be
2181 * A generic solution implies reverse symref information,
2182 * but finding all symrefs pointing to the given branch
2183 * would be rather costly for this rare event (the direct
2184 * update of a branch) to be worth it. So let's cheat and
2185 * check with HEAD only which should cover 99% of all usage
2186 * scenarios (even 100% of the default ones).
2188 struct object_id head_oid
;
2190 const char *head_ref
;
2192 head_ref
= refs_resolve_ref_unsafe(&refs
->base
, "HEAD",
2193 RESOLVE_REF_READING
,
2194 head_oid
.hash
, &head_flag
);
2195 if (head_ref
&& (head_flag
& REF_ISSYMREF
) &&
2196 !strcmp(head_ref
, lock
->ref_name
)) {
2197 struct strbuf log_err
= STRBUF_INIT
;
2198 if (files_log_ref_write(refs
, "HEAD",
2199 &lock
->old_oid
, oid
,
2200 logmsg
, 0, &log_err
)) {
2201 error("%s", log_err
.buf
);
2202 strbuf_release(&log_err
);
2207 if (commit_ref(lock
)) {
2208 strbuf_addf(err
, "couldn't set '%s'", lock
->ref_name
);
2217 static int create_ref_symlink(struct ref_lock
*lock
, const char *target
)
2220 #ifndef NO_SYMLINK_HEAD
2221 char *ref_path
= get_locked_file_path(lock
->lk
);
2223 ret
= symlink(target
, ref_path
);
2227 fprintf(stderr
, "no symlink - falling back to symbolic ref\n");
2232 static void update_symref_reflog(struct files_ref_store
*refs
,
2233 struct ref_lock
*lock
, const char *refname
,
2234 const char *target
, const char *logmsg
)
2236 struct strbuf err
= STRBUF_INIT
;
2237 struct object_id new_oid
;
2239 !refs_read_ref_full(&refs
->base
, target
,
2240 RESOLVE_REF_READING
, new_oid
.hash
, NULL
) &&
2241 files_log_ref_write(refs
, refname
, &lock
->old_oid
,
2242 &new_oid
, logmsg
, 0, &err
)) {
2243 error("%s", err
.buf
);
2244 strbuf_release(&err
);
2248 static int create_symref_locked(struct files_ref_store
*refs
,
2249 struct ref_lock
*lock
, const char *refname
,
2250 const char *target
, const char *logmsg
)
2252 if (prefer_symlink_refs
&& !create_ref_symlink(lock
, target
)) {
2253 update_symref_reflog(refs
, lock
, refname
, target
, logmsg
);
2257 if (!fdopen_lock_file(lock
->lk
, "w"))
2258 return error("unable to fdopen %s: %s",
2259 lock
->lk
->tempfile
.filename
.buf
, strerror(errno
));
2261 update_symref_reflog(refs
, lock
, refname
, target
, logmsg
);
2263 /* no error check; commit_ref will check ferror */
2264 fprintf(lock
->lk
->tempfile
.fp
, "ref: %s\n", target
);
2265 if (commit_ref(lock
) < 0)
2266 return error("unable to write symref for %s: %s", refname
,
2271 static int files_create_symref(struct ref_store
*ref_store
,
2272 const char *refname
, const char *target
,
2275 struct files_ref_store
*refs
=
2276 files_downcast(ref_store
, REF_STORE_WRITE
, "create_symref");
2277 struct strbuf err
= STRBUF_INIT
;
2278 struct ref_lock
*lock
;
2281 lock
= lock_ref_sha1_basic(refs
, refname
, NULL
,
2282 NULL
, NULL
, REF_NODEREF
, NULL
,
2285 error("%s", err
.buf
);
2286 strbuf_release(&err
);
2290 ret
= create_symref_locked(refs
, lock
, refname
, target
, logmsg
);
2295 static int files_reflog_exists(struct ref_store
*ref_store
,
2296 const char *refname
)
2298 struct files_ref_store
*refs
=
2299 files_downcast(ref_store
, REF_STORE_READ
, "reflog_exists");
2300 struct strbuf sb
= STRBUF_INIT
;
2304 files_reflog_path(refs
, &sb
, refname
);
2305 ret
= !lstat(sb
.buf
, &st
) && S_ISREG(st
.st_mode
);
2306 strbuf_release(&sb
);
2310 static int files_delete_reflog(struct ref_store
*ref_store
,
2311 const char *refname
)
2313 struct files_ref_store
*refs
=
2314 files_downcast(ref_store
, REF_STORE_WRITE
, "delete_reflog");
2315 struct strbuf sb
= STRBUF_INIT
;
2318 files_reflog_path(refs
, &sb
, refname
);
2319 ret
= remove_path(sb
.buf
);
2320 strbuf_release(&sb
);
2324 static int show_one_reflog_ent(struct strbuf
*sb
, each_reflog_ent_fn fn
, void *cb_data
)
2326 struct object_id ooid
, noid
;
2327 char *email_end
, *message
;
2328 timestamp_t timestamp
;
2330 const char *p
= sb
->buf
;
2332 /* old SP new SP name <email> SP time TAB msg LF */
2333 if (!sb
->len
|| sb
->buf
[sb
->len
- 1] != '\n' ||
2334 parse_oid_hex(p
, &ooid
, &p
) || *p
++ != ' ' ||
2335 parse_oid_hex(p
, &noid
, &p
) || *p
++ != ' ' ||
2336 !(email_end
= strchr(p
, '>')) ||
2337 email_end
[1] != ' ' ||
2338 !(timestamp
= parse_timestamp(email_end
+ 2, &message
, 10)) ||
2339 !message
|| message
[0] != ' ' ||
2340 (message
[1] != '+' && message
[1] != '-') ||
2341 !isdigit(message
[2]) || !isdigit(message
[3]) ||
2342 !isdigit(message
[4]) || !isdigit(message
[5]))
2343 return 0; /* corrupt? */
2344 email_end
[1] = '\0';
2345 tz
= strtol(message
+ 1, NULL
, 10);
2346 if (message
[6] != '\t')
2350 return fn(&ooid
, &noid
, p
, timestamp
, tz
, message
, cb_data
);
2353 static char *find_beginning_of_line(char *bob
, char *scan
)
2355 while (bob
< scan
&& *(--scan
) != '\n')
2356 ; /* keep scanning backwards */
2358 * Return either beginning of the buffer, or LF at the end of
2359 * the previous line.
2364 static int files_for_each_reflog_ent_reverse(struct ref_store
*ref_store
,
2365 const char *refname
,
2366 each_reflog_ent_fn fn
,
2369 struct files_ref_store
*refs
=
2370 files_downcast(ref_store
, REF_STORE_READ
,
2371 "for_each_reflog_ent_reverse");
2372 struct strbuf sb
= STRBUF_INIT
;
2375 int ret
= 0, at_tail
= 1;
2377 files_reflog_path(refs
, &sb
, refname
);
2378 logfp
= fopen(sb
.buf
, "r");
2379 strbuf_release(&sb
);
2383 /* Jump to the end */
2384 if (fseek(logfp
, 0, SEEK_END
) < 0)
2385 ret
= error("cannot seek back reflog for %s: %s",
2386 refname
, strerror(errno
));
2388 while (!ret
&& 0 < pos
) {
2394 /* Fill next block from the end */
2395 cnt
= (sizeof(buf
) < pos
) ? sizeof(buf
) : pos
;
2396 if (fseek(logfp
, pos
- cnt
, SEEK_SET
)) {
2397 ret
= error("cannot seek back reflog for %s: %s",
2398 refname
, strerror(errno
));
2401 nread
= fread(buf
, cnt
, 1, logfp
);
2403 ret
= error("cannot read %d bytes from reflog for %s: %s",
2404 cnt
, refname
, strerror(errno
));
2409 scanp
= endp
= buf
+ cnt
;
2410 if (at_tail
&& scanp
[-1] == '\n')
2411 /* Looking at the final LF at the end of the file */
2415 while (buf
< scanp
) {
2417 * terminating LF of the previous line, or the beginning
2422 bp
= find_beginning_of_line(buf
, scanp
);
2426 * The newline is the end of the previous line,
2427 * so we know we have complete line starting
2428 * at (bp + 1). Prefix it onto any prior data
2429 * we collected for the line and process it.
2431 strbuf_splice(&sb
, 0, 0, bp
+ 1, endp
- (bp
+ 1));
2434 ret
= show_one_reflog_ent(&sb
, fn
, cb_data
);
2440 * We are at the start of the buffer, and the
2441 * start of the file; there is no previous
2442 * line, and we have everything for this one.
2443 * Process it, and we can end the loop.
2445 strbuf_splice(&sb
, 0, 0, buf
, endp
- buf
);
2446 ret
= show_one_reflog_ent(&sb
, fn
, cb_data
);
2453 * We are at the start of the buffer, and there
2454 * is more file to read backwards. Which means
2455 * we are in the middle of a line. Note that we
2456 * may get here even if *bp was a newline; that
2457 * just means we are at the exact end of the
2458 * previous line, rather than some spot in the
2461 * Save away what we have to be combined with
2462 * the data from the next read.
2464 strbuf_splice(&sb
, 0, 0, buf
, endp
- buf
);
2471 die("BUG: reverse reflog parser had leftover data");
2474 strbuf_release(&sb
);
2478 static int files_for_each_reflog_ent(struct ref_store
*ref_store
,
2479 const char *refname
,
2480 each_reflog_ent_fn fn
, void *cb_data
)
2482 struct files_ref_store
*refs
=
2483 files_downcast(ref_store
, REF_STORE_READ
,
2484 "for_each_reflog_ent");
2486 struct strbuf sb
= STRBUF_INIT
;
2489 files_reflog_path(refs
, &sb
, refname
);
2490 logfp
= fopen(sb
.buf
, "r");
2491 strbuf_release(&sb
);
2495 while (!ret
&& !strbuf_getwholeline(&sb
, logfp
, '\n'))
2496 ret
= show_one_reflog_ent(&sb
, fn
, cb_data
);
2498 strbuf_release(&sb
);
2502 struct files_reflog_iterator
{
2503 struct ref_iterator base
;
2505 struct ref_store
*ref_store
;
2506 struct dir_iterator
*dir_iterator
;
2507 struct object_id oid
;
2510 static int files_reflog_iterator_advance(struct ref_iterator
*ref_iterator
)
2512 struct files_reflog_iterator
*iter
=
2513 (struct files_reflog_iterator
*)ref_iterator
;
2514 struct dir_iterator
*diter
= iter
->dir_iterator
;
2517 while ((ok
= dir_iterator_advance(diter
)) == ITER_OK
) {
2520 if (!S_ISREG(diter
->st
.st_mode
))
2522 if (diter
->basename
[0] == '.')
2524 if (ends_with(diter
->basename
, ".lock"))
2527 if (refs_read_ref_full(iter
->ref_store
,
2528 diter
->relative_path
, 0,
2529 iter
->oid
.hash
, &flags
)) {
2530 error("bad ref for %s", diter
->path
.buf
);
2534 iter
->base
.refname
= diter
->relative_path
;
2535 iter
->base
.oid
= &iter
->oid
;
2536 iter
->base
.flags
= flags
;
2540 iter
->dir_iterator
= NULL
;
2541 if (ref_iterator_abort(ref_iterator
) == ITER_ERROR
)
2546 static int files_reflog_iterator_peel(struct ref_iterator
*ref_iterator
,
2547 struct object_id
*peeled
)
2549 die("BUG: ref_iterator_peel() called for reflog_iterator");
2552 static int files_reflog_iterator_abort(struct ref_iterator
*ref_iterator
)
2554 struct files_reflog_iterator
*iter
=
2555 (struct files_reflog_iterator
*)ref_iterator
;
2558 if (iter
->dir_iterator
)
2559 ok
= dir_iterator_abort(iter
->dir_iterator
);
2561 base_ref_iterator_free(ref_iterator
);
2565 static struct ref_iterator_vtable files_reflog_iterator_vtable
= {
2566 files_reflog_iterator_advance
,
2567 files_reflog_iterator_peel
,
2568 files_reflog_iterator_abort
2571 static struct ref_iterator
*files_reflog_iterator_begin(struct ref_store
*ref_store
)
2573 struct files_ref_store
*refs
=
2574 files_downcast(ref_store
, REF_STORE_READ
,
2575 "reflog_iterator_begin");
2576 struct files_reflog_iterator
*iter
= xcalloc(1, sizeof(*iter
));
2577 struct ref_iterator
*ref_iterator
= &iter
->base
;
2578 struct strbuf sb
= STRBUF_INIT
;
2580 base_ref_iterator_init(ref_iterator
, &files_reflog_iterator_vtable
);
2581 files_reflog_path(refs
, &sb
, NULL
);
2582 iter
->dir_iterator
= dir_iterator_begin(sb
.buf
);
2583 iter
->ref_store
= ref_store
;
2584 strbuf_release(&sb
);
2585 return ref_iterator
;
2589 * If update is a direct update of head_ref (the reference pointed to
2590 * by HEAD), then add an extra REF_LOG_ONLY update for HEAD.
2592 static int split_head_update(struct ref_update
*update
,
2593 struct ref_transaction
*transaction
,
2594 const char *head_ref
,
2595 struct string_list
*affected_refnames
,
2598 struct string_list_item
*item
;
2599 struct ref_update
*new_update
;
2601 if ((update
->flags
& REF_LOG_ONLY
) ||
2602 (update
->flags
& REF_ISPRUNING
) ||
2603 (update
->flags
& REF_UPDATE_VIA_HEAD
))
2606 if (strcmp(update
->refname
, head_ref
))
2610 * First make sure that HEAD is not already in the
2611 * transaction. This insertion is O(N) in the transaction
2612 * size, but it happens at most once per transaction.
2614 item
= string_list_insert(affected_refnames
, "HEAD");
2616 /* An entry already existed */
2618 "multiple updates for 'HEAD' (including one "
2619 "via its referent '%s') are not allowed",
2621 return TRANSACTION_NAME_CONFLICT
;
2624 new_update
= ref_transaction_add_update(
2625 transaction
, "HEAD",
2626 update
->flags
| REF_LOG_ONLY
| REF_NODEREF
,
2627 update
->new_oid
.hash
, update
->old_oid
.hash
,
2630 item
->util
= new_update
;
2636 * update is for a symref that points at referent and doesn't have
2637 * REF_NODEREF set. Split it into two updates:
2638 * - The original update, but with REF_LOG_ONLY and REF_NODEREF set
2639 * - A new, separate update for the referent reference
2640 * Note that the new update will itself be subject to splitting when
2641 * the iteration gets to it.
2643 static int split_symref_update(struct files_ref_store
*refs
,
2644 struct ref_update
*update
,
2645 const char *referent
,
2646 struct ref_transaction
*transaction
,
2647 struct string_list
*affected_refnames
,
2650 struct string_list_item
*item
;
2651 struct ref_update
*new_update
;
2652 unsigned int new_flags
;
2655 * First make sure that referent is not already in the
2656 * transaction. This insertion is O(N) in the transaction
2657 * size, but it happens at most once per symref in a
2660 item
= string_list_insert(affected_refnames
, referent
);
2662 /* An entry already existed */
2664 "multiple updates for '%s' (including one "
2665 "via symref '%s') are not allowed",
2666 referent
, update
->refname
);
2667 return TRANSACTION_NAME_CONFLICT
;
2670 new_flags
= update
->flags
;
2671 if (!strcmp(update
->refname
, "HEAD")) {
2673 * Record that the new update came via HEAD, so that
2674 * when we process it, split_head_update() doesn't try
2675 * to add another reflog update for HEAD. Note that
2676 * this bit will be propagated if the new_update
2677 * itself needs to be split.
2679 new_flags
|= REF_UPDATE_VIA_HEAD
;
2682 new_update
= ref_transaction_add_update(
2683 transaction
, referent
, new_flags
,
2684 update
->new_oid
.hash
, update
->old_oid
.hash
,
2687 new_update
->parent_update
= update
;
2690 * Change the symbolic ref update to log only. Also, it
2691 * doesn't need to check its old SHA-1 value, as that will be
2692 * done when new_update is processed.
2694 update
->flags
|= REF_LOG_ONLY
| REF_NODEREF
;
2695 update
->flags
&= ~REF_HAVE_OLD
;
2697 item
->util
= new_update
;
2703 * Return the refname under which update was originally requested.
2705 static const char *original_update_refname(struct ref_update
*update
)
2707 while (update
->parent_update
)
2708 update
= update
->parent_update
;
2710 return update
->refname
;
2714 * Check whether the REF_HAVE_OLD and old_oid values stored in update
2715 * are consistent with oid, which is the reference's current value. If
2716 * everything is OK, return 0; otherwise, write an error message to
2717 * err and return -1.
2719 static int check_old_oid(struct ref_update
*update
, struct object_id
*oid
,
2722 if (!(update
->flags
& REF_HAVE_OLD
) ||
2723 !oidcmp(oid
, &update
->old_oid
))
2726 if (is_null_oid(&update
->old_oid
))
2727 strbuf_addf(err
, "cannot lock ref '%s': "
2728 "reference already exists",
2729 original_update_refname(update
));
2730 else if (is_null_oid(oid
))
2731 strbuf_addf(err
, "cannot lock ref '%s': "
2732 "reference is missing but expected %s",
2733 original_update_refname(update
),
2734 oid_to_hex(&update
->old_oid
));
2736 strbuf_addf(err
, "cannot lock ref '%s': "
2737 "is at %s but expected %s",
2738 original_update_refname(update
),
2740 oid_to_hex(&update
->old_oid
));
2746 * Prepare for carrying out update:
2747 * - Lock the reference referred to by update.
2748 * - Read the reference under lock.
2749 * - Check that its old SHA-1 value (if specified) is correct, and in
2750 * any case record it in update->lock->old_oid for later use when
2751 * writing the reflog.
2752 * - If it is a symref update without REF_NODEREF, split it up into a
2753 * REF_LOG_ONLY update of the symref and add a separate update for
2754 * the referent to transaction.
2755 * - If it is an update of head_ref, add a corresponding REF_LOG_ONLY
2758 static int lock_ref_for_update(struct files_ref_store
*refs
,
2759 struct ref_update
*update
,
2760 struct ref_transaction
*transaction
,
2761 const char *head_ref
,
2762 struct string_list
*affected_refnames
,
2765 struct strbuf referent
= STRBUF_INIT
;
2766 int mustexist
= (update
->flags
& REF_HAVE_OLD
) &&
2767 !is_null_oid(&update
->old_oid
);
2769 struct ref_lock
*lock
;
2771 files_assert_main_repository(refs
, "lock_ref_for_update");
2773 if ((update
->flags
& REF_HAVE_NEW
) && is_null_oid(&update
->new_oid
))
2774 update
->flags
|= REF_DELETING
;
2777 ret
= split_head_update(update
, transaction
, head_ref
,
2778 affected_refnames
, err
);
2783 ret
= lock_raw_ref(refs
, update
->refname
, mustexist
,
2784 affected_refnames
, NULL
,
2786 &update
->type
, err
);
2790 reason
= strbuf_detach(err
, NULL
);
2791 strbuf_addf(err
, "cannot lock ref '%s': %s",
2792 original_update_refname(update
), reason
);
2797 update
->backend_data
= lock
;
2799 if (update
->type
& REF_ISSYMREF
) {
2800 if (update
->flags
& REF_NODEREF
) {
2802 * We won't be reading the referent as part of
2803 * the transaction, so we have to read it here
2804 * to record and possibly check old_sha1:
2806 if (refs_read_ref_full(&refs
->base
,
2808 lock
->old_oid
.hash
, NULL
)) {
2809 if (update
->flags
& REF_HAVE_OLD
) {
2810 strbuf_addf(err
, "cannot lock ref '%s': "
2811 "error reading reference",
2812 original_update_refname(update
));
2815 } else if (check_old_oid(update
, &lock
->old_oid
, err
)) {
2816 return TRANSACTION_GENERIC_ERROR
;
2820 * Create a new update for the reference this
2821 * symref is pointing at. Also, we will record
2822 * and verify old_sha1 for this update as part
2823 * of processing the split-off update, so we
2824 * don't have to do it here.
2826 ret
= split_symref_update(refs
, update
,
2827 referent
.buf
, transaction
,
2828 affected_refnames
, err
);
2833 struct ref_update
*parent_update
;
2835 if (check_old_oid(update
, &lock
->old_oid
, err
))
2836 return TRANSACTION_GENERIC_ERROR
;
2839 * If this update is happening indirectly because of a
2840 * symref update, record the old SHA-1 in the parent
2843 for (parent_update
= update
->parent_update
;
2845 parent_update
= parent_update
->parent_update
) {
2846 struct ref_lock
*parent_lock
= parent_update
->backend_data
;
2847 oidcpy(&parent_lock
->old_oid
, &lock
->old_oid
);
2851 if ((update
->flags
& REF_HAVE_NEW
) &&
2852 !(update
->flags
& REF_DELETING
) &&
2853 !(update
->flags
& REF_LOG_ONLY
)) {
2854 if (!(update
->type
& REF_ISSYMREF
) &&
2855 !oidcmp(&lock
->old_oid
, &update
->new_oid
)) {
2857 * The reference already has the desired
2858 * value, so we don't need to write it.
2860 } else if (write_ref_to_lockfile(lock
, &update
->new_oid
,
2862 char *write_err
= strbuf_detach(err
, NULL
);
2865 * The lock was freed upon failure of
2866 * write_ref_to_lockfile():
2868 update
->backend_data
= NULL
;
2870 "cannot update ref '%s': %s",
2871 update
->refname
, write_err
);
2873 return TRANSACTION_GENERIC_ERROR
;
2875 update
->flags
|= REF_NEEDS_COMMIT
;
2878 if (!(update
->flags
& REF_NEEDS_COMMIT
)) {
2880 * We didn't call write_ref_to_lockfile(), so
2881 * the lockfile is still open. Close it to
2882 * free up the file descriptor:
2884 if (close_ref(lock
)) {
2885 strbuf_addf(err
, "couldn't close '%s.lock'",
2887 return TRANSACTION_GENERIC_ERROR
;
2894 * Unlock any references in `transaction` that are still locked, and
2895 * mark the transaction closed.
2897 static void files_transaction_cleanup(struct ref_transaction
*transaction
)
2901 for (i
= 0; i
< transaction
->nr
; i
++) {
2902 struct ref_update
*update
= transaction
->updates
[i
];
2903 struct ref_lock
*lock
= update
->backend_data
;
2907 update
->backend_data
= NULL
;
2911 transaction
->state
= REF_TRANSACTION_CLOSED
;
2914 static int files_transaction_prepare(struct ref_store
*ref_store
,
2915 struct ref_transaction
*transaction
,
2918 struct files_ref_store
*refs
=
2919 files_downcast(ref_store
, REF_STORE_WRITE
,
2920 "ref_transaction_prepare");
2923 struct string_list affected_refnames
= STRING_LIST_INIT_NODUP
;
2924 char *head_ref
= NULL
;
2926 struct object_id head_oid
;
2930 if (!transaction
->nr
)
2934 * Fail if a refname appears more than once in the
2935 * transaction. (If we end up splitting up any updates using
2936 * split_symref_update() or split_head_update(), those
2937 * functions will check that the new updates don't have the
2938 * same refname as any existing ones.)
2940 for (i
= 0; i
< transaction
->nr
; i
++) {
2941 struct ref_update
*update
= transaction
->updates
[i
];
2942 struct string_list_item
*item
=
2943 string_list_append(&affected_refnames
, update
->refname
);
2946 * We store a pointer to update in item->util, but at
2947 * the moment we never use the value of this field
2948 * except to check whether it is non-NULL.
2950 item
->util
= update
;
2952 string_list_sort(&affected_refnames
);
2953 if (ref_update_reject_duplicates(&affected_refnames
, err
)) {
2954 ret
= TRANSACTION_GENERIC_ERROR
;
2959 * Special hack: If a branch is updated directly and HEAD
2960 * points to it (may happen on the remote side of a push
2961 * for example) then logically the HEAD reflog should be
2964 * A generic solution would require reverse symref lookups,
2965 * but finding all symrefs pointing to a given branch would be
2966 * rather costly for this rare event (the direct update of a
2967 * branch) to be worth it. So let's cheat and check with HEAD
2968 * only, which should cover 99% of all usage scenarios (even
2969 * 100% of the default ones).
2971 * So if HEAD is a symbolic reference, then record the name of
2972 * the reference that it points to. If we see an update of
2973 * head_ref within the transaction, then split_head_update()
2974 * arranges for the reflog of HEAD to be updated, too.
2976 head_ref
= refs_resolve_refdup(ref_store
, "HEAD",
2977 RESOLVE_REF_NO_RECURSE
,
2978 head_oid
.hash
, &head_type
);
2980 if (head_ref
&& !(head_type
& REF_ISSYMREF
)) {
2986 * Acquire all locks, verify old values if provided, check
2987 * that new values are valid, and write new values to the
2988 * lockfiles, ready to be activated. Only keep one lockfile
2989 * open at a time to avoid running out of file descriptors.
2990 * Note that lock_ref_for_update() might append more updates
2991 * to the transaction.
2993 for (i
= 0; i
< transaction
->nr
; i
++) {
2994 struct ref_update
*update
= transaction
->updates
[i
];
2996 ret
= lock_ref_for_update(refs
, update
, transaction
,
2997 head_ref
, &affected_refnames
, err
);
3004 string_list_clear(&affected_refnames
, 0);
3007 files_transaction_cleanup(transaction
);
3009 transaction
->state
= REF_TRANSACTION_PREPARED
;
3014 static int files_transaction_finish(struct ref_store
*ref_store
,
3015 struct ref_transaction
*transaction
,
3018 struct files_ref_store
*refs
=
3019 files_downcast(ref_store
, 0, "ref_transaction_finish");
3022 struct string_list refs_to_delete
= STRING_LIST_INIT_NODUP
;
3023 struct string_list_item
*ref_to_delete
;
3024 struct strbuf sb
= STRBUF_INIT
;
3028 if (!transaction
->nr
) {
3029 transaction
->state
= REF_TRANSACTION_CLOSED
;
3033 /* Perform updates first so live commits remain referenced */
3034 for (i
= 0; i
< transaction
->nr
; i
++) {
3035 struct ref_update
*update
= transaction
->updates
[i
];
3036 struct ref_lock
*lock
= update
->backend_data
;
3038 if (update
->flags
& REF_NEEDS_COMMIT
||
3039 update
->flags
& REF_LOG_ONLY
) {
3040 if (files_log_ref_write(refs
,
3044 update
->msg
, update
->flags
,
3046 char *old_msg
= strbuf_detach(err
, NULL
);
3048 strbuf_addf(err
, "cannot update the ref '%s': %s",
3049 lock
->ref_name
, old_msg
);
3052 update
->backend_data
= NULL
;
3053 ret
= TRANSACTION_GENERIC_ERROR
;
3057 if (update
->flags
& REF_NEEDS_COMMIT
) {
3058 clear_loose_ref_cache(refs
);
3059 if (commit_ref(lock
)) {
3060 strbuf_addf(err
, "couldn't set '%s'", lock
->ref_name
);
3062 update
->backend_data
= NULL
;
3063 ret
= TRANSACTION_GENERIC_ERROR
;
3068 /* Perform deletes now that updates are safely completed */
3069 for (i
= 0; i
< transaction
->nr
; i
++) {
3070 struct ref_update
*update
= transaction
->updates
[i
];
3071 struct ref_lock
*lock
= update
->backend_data
;
3073 if (update
->flags
& REF_DELETING
&&
3074 !(update
->flags
& REF_LOG_ONLY
)) {
3075 if (!(update
->type
& REF_ISPACKED
) ||
3076 update
->type
& REF_ISSYMREF
) {
3077 /* It is a loose reference. */
3079 files_ref_path(refs
, &sb
, lock
->ref_name
);
3080 if (unlink_or_msg(sb
.buf
, err
)) {
3081 ret
= TRANSACTION_GENERIC_ERROR
;
3084 update
->flags
|= REF_DELETED_LOOSE
;
3087 if (!(update
->flags
& REF_ISPRUNING
))
3088 string_list_append(&refs_to_delete
,
3093 if (repack_without_refs(refs
, &refs_to_delete
, err
)) {
3094 ret
= TRANSACTION_GENERIC_ERROR
;
3098 /* Delete the reflogs of any references that were deleted: */
3099 for_each_string_list_item(ref_to_delete
, &refs_to_delete
) {
3101 files_reflog_path(refs
, &sb
, ref_to_delete
->string
);
3102 if (!unlink_or_warn(sb
.buf
))
3103 try_remove_empty_parents(refs
, ref_to_delete
->string
,
3104 REMOVE_EMPTY_PARENTS_REFLOG
);
3107 clear_loose_ref_cache(refs
);
3110 files_transaction_cleanup(transaction
);
3112 for (i
= 0; i
< transaction
->nr
; i
++) {
3113 struct ref_update
*update
= transaction
->updates
[i
];
3115 if (update
->flags
& REF_DELETED_LOOSE
) {
3117 * The loose reference was deleted. Delete any
3118 * empty parent directories. (Note that this
3119 * can only work because we have already
3120 * removed the lockfile.)
3122 try_remove_empty_parents(refs
, update
->refname
,
3123 REMOVE_EMPTY_PARENTS_REF
);
3127 strbuf_release(&sb
);
3128 string_list_clear(&refs_to_delete
, 0);
3132 static int files_transaction_abort(struct ref_store
*ref_store
,
3133 struct ref_transaction
*transaction
,
3136 files_transaction_cleanup(transaction
);
3140 static int ref_present(const char *refname
,
3141 const struct object_id
*oid
, int flags
, void *cb_data
)
3143 struct string_list
*affected_refnames
= cb_data
;
3145 return string_list_has_string(affected_refnames
, refname
);
3148 static int files_initial_transaction_commit(struct ref_store
*ref_store
,
3149 struct ref_transaction
*transaction
,
3152 struct files_ref_store
*refs
=
3153 files_downcast(ref_store
, REF_STORE_WRITE
,
3154 "initial_ref_transaction_commit");
3157 struct string_list affected_refnames
= STRING_LIST_INIT_NODUP
;
3161 if (transaction
->state
!= REF_TRANSACTION_OPEN
)
3162 die("BUG: commit called for transaction that is not open");
3164 /* Fail if a refname appears more than once in the transaction: */
3165 for (i
= 0; i
< transaction
->nr
; i
++)
3166 string_list_append(&affected_refnames
,
3167 transaction
->updates
[i
]->refname
);
3168 string_list_sort(&affected_refnames
);
3169 if (ref_update_reject_duplicates(&affected_refnames
, err
)) {
3170 ret
= TRANSACTION_GENERIC_ERROR
;
3175 * It's really undefined to call this function in an active
3176 * repository or when there are existing references: we are
3177 * only locking and changing packed-refs, so (1) any
3178 * simultaneous processes might try to change a reference at
3179 * the same time we do, and (2) any existing loose versions of
3180 * the references that we are setting would have precedence
3181 * over our values. But some remote helpers create the remote
3182 * "HEAD" and "master" branches before calling this function,
3183 * so here we really only check that none of the references
3184 * that we are creating already exists.
3186 if (refs_for_each_rawref(&refs
->base
, ref_present
,
3187 &affected_refnames
))
3188 die("BUG: initial ref transaction called with existing refs");
3190 for (i
= 0; i
< transaction
->nr
; i
++) {
3191 struct ref_update
*update
= transaction
->updates
[i
];
3193 if ((update
->flags
& REF_HAVE_OLD
) &&
3194 !is_null_oid(&update
->old_oid
))
3195 die("BUG: initial ref transaction with old_sha1 set");
3196 if (refs_verify_refname_available(&refs
->base
, update
->refname
,
3197 &affected_refnames
, NULL
,
3199 ret
= TRANSACTION_NAME_CONFLICT
;
3204 if (lock_packed_refs(refs
, 0)) {
3205 strbuf_addf(err
, "unable to lock packed-refs file: %s",
3207 ret
= TRANSACTION_GENERIC_ERROR
;
3211 for (i
= 0; i
< transaction
->nr
; i
++) {
3212 struct ref_update
*update
= transaction
->updates
[i
];
3214 if ((update
->flags
& REF_HAVE_NEW
) &&
3215 !is_null_oid(&update
->new_oid
))
3216 add_packed_ref(refs
, update
->refname
,
3220 if (commit_packed_refs(refs
)) {
3221 strbuf_addf(err
, "unable to commit packed-refs file: %s",
3223 ret
= TRANSACTION_GENERIC_ERROR
;
3228 transaction
->state
= REF_TRANSACTION_CLOSED
;
3229 string_list_clear(&affected_refnames
, 0);
3233 struct expire_reflog_cb
{
3235 reflog_expiry_should_prune_fn
*should_prune_fn
;
3238 struct object_id last_kept_oid
;
3241 static int expire_reflog_ent(struct object_id
*ooid
, struct object_id
*noid
,
3242 const char *email
, timestamp_t timestamp
, int tz
,
3243 const char *message
, void *cb_data
)
3245 struct expire_reflog_cb
*cb
= cb_data
;
3246 struct expire_reflog_policy_cb
*policy_cb
= cb
->policy_cb
;
3248 if (cb
->flags
& EXPIRE_REFLOGS_REWRITE
)
3249 ooid
= &cb
->last_kept_oid
;
3251 if ((*cb
->should_prune_fn
)(ooid
, noid
, email
, timestamp
, tz
,
3252 message
, policy_cb
)) {
3254 printf("would prune %s", message
);
3255 else if (cb
->flags
& EXPIRE_REFLOGS_VERBOSE
)
3256 printf("prune %s", message
);
3259 fprintf(cb
->newlog
, "%s %s %s %"PRItime
" %+05d\t%s",
3260 oid_to_hex(ooid
), oid_to_hex(noid
),
3261 email
, timestamp
, tz
, message
);
3262 oidcpy(&cb
->last_kept_oid
, noid
);
3264 if (cb
->flags
& EXPIRE_REFLOGS_VERBOSE
)
3265 printf("keep %s", message
);
3270 static int files_reflog_expire(struct ref_store
*ref_store
,
3271 const char *refname
, const unsigned char *sha1
,
3273 reflog_expiry_prepare_fn prepare_fn
,
3274 reflog_expiry_should_prune_fn should_prune_fn
,
3275 reflog_expiry_cleanup_fn cleanup_fn
,
3276 void *policy_cb_data
)
3278 struct files_ref_store
*refs
=
3279 files_downcast(ref_store
, REF_STORE_WRITE
, "reflog_expire");
3280 static struct lock_file reflog_lock
;
3281 struct expire_reflog_cb cb
;
3282 struct ref_lock
*lock
;
3283 struct strbuf log_file_sb
= STRBUF_INIT
;
3287 struct strbuf err
= STRBUF_INIT
;
3288 struct object_id oid
;
3290 memset(&cb
, 0, sizeof(cb
));
3292 cb
.policy_cb
= policy_cb_data
;
3293 cb
.should_prune_fn
= should_prune_fn
;
3296 * The reflog file is locked by holding the lock on the
3297 * reference itself, plus we might need to update the
3298 * reference if --updateref was specified:
3300 lock
= lock_ref_sha1_basic(refs
, refname
, sha1
,
3301 NULL
, NULL
, REF_NODEREF
,
3304 error("cannot lock ref '%s': %s", refname
, err
.buf
);
3305 strbuf_release(&err
);
3308 if (!refs_reflog_exists(ref_store
, refname
)) {
3313 files_reflog_path(refs
, &log_file_sb
, refname
);
3314 log_file
= strbuf_detach(&log_file_sb
, NULL
);
3315 if (!(flags
& EXPIRE_REFLOGS_DRY_RUN
)) {
3317 * Even though holding $GIT_DIR/logs/$reflog.lock has
3318 * no locking implications, we use the lock_file
3319 * machinery here anyway because it does a lot of the
3320 * work we need, including cleaning up if the program
3321 * exits unexpectedly.
3323 if (hold_lock_file_for_update(&reflog_lock
, log_file
, 0) < 0) {
3324 struct strbuf err
= STRBUF_INIT
;
3325 unable_to_lock_message(log_file
, errno
, &err
);
3326 error("%s", err
.buf
);
3327 strbuf_release(&err
);
3330 cb
.newlog
= fdopen_lock_file(&reflog_lock
, "w");
3332 error("cannot fdopen %s (%s)",
3333 get_lock_file_path(&reflog_lock
), strerror(errno
));
3338 hashcpy(oid
.hash
, sha1
);
3340 (*prepare_fn
)(refname
, &oid
, cb
.policy_cb
);
3341 refs_for_each_reflog_ent(ref_store
, refname
, expire_reflog_ent
, &cb
);
3342 (*cleanup_fn
)(cb
.policy_cb
);
3344 if (!(flags
& EXPIRE_REFLOGS_DRY_RUN
)) {
3346 * It doesn't make sense to adjust a reference pointed
3347 * to by a symbolic ref based on expiring entries in
3348 * the symbolic reference's reflog. Nor can we update
3349 * a reference if there are no remaining reflog
3352 int update
= (flags
& EXPIRE_REFLOGS_UPDATE_REF
) &&
3353 !(type
& REF_ISSYMREF
) &&
3354 !is_null_oid(&cb
.last_kept_oid
);
3356 if (close_lock_file(&reflog_lock
)) {
3357 status
|= error("couldn't write %s: %s", log_file
,
3359 } else if (update
&&
3360 (write_in_full(get_lock_file_fd(lock
->lk
),
3361 oid_to_hex(&cb
.last_kept_oid
), GIT_SHA1_HEXSZ
) != GIT_SHA1_HEXSZ
||
3362 write_str_in_full(get_lock_file_fd(lock
->lk
), "\n") != 1 ||
3363 close_ref(lock
) < 0)) {
3364 status
|= error("couldn't write %s",
3365 get_lock_file_path(lock
->lk
));
3366 rollback_lock_file(&reflog_lock
);
3367 } else if (commit_lock_file(&reflog_lock
)) {
3368 status
|= error("unable to write reflog '%s' (%s)",
3369 log_file
, strerror(errno
));
3370 } else if (update
&& commit_ref(lock
)) {
3371 status
|= error("couldn't set %s", lock
->ref_name
);
3379 rollback_lock_file(&reflog_lock
);
3385 static int files_init_db(struct ref_store
*ref_store
, struct strbuf
*err
)
3387 struct files_ref_store
*refs
=
3388 files_downcast(ref_store
, REF_STORE_WRITE
, "init_db");
3389 struct strbuf sb
= STRBUF_INIT
;
3392 * Create .git/refs/{heads,tags}
3394 files_ref_path(refs
, &sb
, "refs/heads");
3395 safe_create_dir(sb
.buf
, 1);
3398 files_ref_path(refs
, &sb
, "refs/tags");
3399 safe_create_dir(sb
.buf
, 1);
3401 strbuf_release(&sb
);
3405 struct ref_storage_be refs_be_files
= {
3408 files_ref_store_create
,
3410 files_transaction_prepare
,
3411 files_transaction_finish
,
3412 files_transaction_abort
,
3413 files_initial_transaction_commit
,
3417 files_create_symref
,
3421 files_ref_iterator_begin
,
3424 files_reflog_iterator_begin
,
3425 files_for_each_reflog_ent
,
3426 files_for_each_reflog_ent_reverse
,
3427 files_reflog_exists
,
3428 files_create_reflog
,
3429 files_delete_reflog
,