]> git.ipfire.org Git - thirdparty/git.git/blob - refs/files-backend.c
files-backend: cheapen refname_available check when locking refs
[thirdparty/git.git] / refs / files-backend.c
1 #include "../cache.h"
2 #include "../refs.h"
3 #include "refs-internal.h"
4 #include "ref-cache.h"
5 #include "packed-backend.h"
6 #include "../iterator.h"
7 #include "../dir-iterator.h"
8 #include "../lockfile.h"
9 #include "../object.h"
10 #include "../dir.h"
11
12 struct ref_lock {
13 char *ref_name;
14 struct lock_file *lk;
15 struct object_id old_oid;
16 };
17
18 /*
19 * Future: need to be in "struct repository"
20 * when doing a full libification.
21 */
22 struct files_ref_store {
23 struct ref_store base;
24 unsigned int store_flags;
25
26 char *gitdir;
27 char *gitcommondir;
28
29 struct ref_cache *loose;
30
31 struct ref_store *packed_ref_store;
32 };
33
34 static void clear_loose_ref_cache(struct files_ref_store *refs)
35 {
36 if (refs->loose) {
37 free_ref_cache(refs->loose);
38 refs->loose = NULL;
39 }
40 }
41
42 /*
43 * Create a new submodule ref cache and add it to the internal
44 * set of caches.
45 */
46 static struct ref_store *files_ref_store_create(const char *gitdir,
47 unsigned int flags)
48 {
49 struct files_ref_store *refs = xcalloc(1, sizeof(*refs));
50 struct ref_store *ref_store = (struct ref_store *)refs;
51 struct strbuf sb = STRBUF_INIT;
52
53 base_ref_store_init(ref_store, &refs_be_files);
54 refs->store_flags = flags;
55
56 refs->gitdir = xstrdup(gitdir);
57 get_common_dir_noenv(&sb, gitdir);
58 refs->gitcommondir = strbuf_detach(&sb, NULL);
59 strbuf_addf(&sb, "%s/packed-refs", refs->gitcommondir);
60 refs->packed_ref_store = packed_ref_store_create(sb.buf, flags);
61 strbuf_release(&sb);
62
63 return ref_store;
64 }
65
66 /*
67 * Die if refs is not the main ref store. caller is used in any
68 * necessary error messages.
69 */
70 static void files_assert_main_repository(struct files_ref_store *refs,
71 const char *caller)
72 {
73 if (refs->store_flags & REF_STORE_MAIN)
74 return;
75
76 die("BUG: operation %s only allowed for main ref store", caller);
77 }
78
79 /*
80 * Downcast ref_store to files_ref_store. Die if ref_store is not a
81 * files_ref_store. required_flags is compared with ref_store's
82 * store_flags to ensure the ref_store has all required capabilities.
83 * "caller" is used in any necessary error messages.
84 */
85 static struct files_ref_store *files_downcast(struct ref_store *ref_store,
86 unsigned int required_flags,
87 const char *caller)
88 {
89 struct files_ref_store *refs;
90
91 if (ref_store->be != &refs_be_files)
92 die("BUG: ref_store is type \"%s\" not \"files\" in %s",
93 ref_store->be->name, caller);
94
95 refs = (struct files_ref_store *)ref_store;
96
97 if ((refs->store_flags & required_flags) != required_flags)
98 die("BUG: operation %s requires abilities 0x%x, but only have 0x%x",
99 caller, required_flags, refs->store_flags);
100
101 return refs;
102 }
103
104 static void files_reflog_path(struct files_ref_store *refs,
105 struct strbuf *sb,
106 const char *refname)
107 {
108 if (!refname) {
109 /*
110 * FIXME: of course this is wrong in multi worktree
111 * setting. To be fixed real soon.
112 */
113 strbuf_addf(sb, "%s/logs", refs->gitcommondir);
114 return;
115 }
116
117 switch (ref_type(refname)) {
118 case REF_TYPE_PER_WORKTREE:
119 case REF_TYPE_PSEUDOREF:
120 strbuf_addf(sb, "%s/logs/%s", refs->gitdir, refname);
121 break;
122 case REF_TYPE_NORMAL:
123 strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir, refname);
124 break;
125 default:
126 die("BUG: unknown ref type %d of ref %s",
127 ref_type(refname), refname);
128 }
129 }
130
131 static void files_ref_path(struct files_ref_store *refs,
132 struct strbuf *sb,
133 const char *refname)
134 {
135 switch (ref_type(refname)) {
136 case REF_TYPE_PER_WORKTREE:
137 case REF_TYPE_PSEUDOREF:
138 strbuf_addf(sb, "%s/%s", refs->gitdir, refname);
139 break;
140 case REF_TYPE_NORMAL:
141 strbuf_addf(sb, "%s/%s", refs->gitcommondir, refname);
142 break;
143 default:
144 die("BUG: unknown ref type %d of ref %s",
145 ref_type(refname), refname);
146 }
147 }
148
149 /*
150 * Read the loose references from the namespace dirname into dir
151 * (without recursing). dirname must end with '/'. dir must be the
152 * directory entry corresponding to dirname.
153 */
154 static void loose_fill_ref_dir(struct ref_store *ref_store,
155 struct ref_dir *dir, const char *dirname)
156 {
157 struct files_ref_store *refs =
158 files_downcast(ref_store, REF_STORE_READ, "fill_ref_dir");
159 DIR *d;
160 struct dirent *de;
161 int dirnamelen = strlen(dirname);
162 struct strbuf refname;
163 struct strbuf path = STRBUF_INIT;
164 size_t path_baselen;
165
166 files_ref_path(refs, &path, dirname);
167 path_baselen = path.len;
168
169 d = opendir(path.buf);
170 if (!d) {
171 strbuf_release(&path);
172 return;
173 }
174
175 strbuf_init(&refname, dirnamelen + 257);
176 strbuf_add(&refname, dirname, dirnamelen);
177
178 while ((de = readdir(d)) != NULL) {
179 struct object_id oid;
180 struct stat st;
181 int flag;
182
183 if (de->d_name[0] == '.')
184 continue;
185 if (ends_with(de->d_name, ".lock"))
186 continue;
187 strbuf_addstr(&refname, de->d_name);
188 strbuf_addstr(&path, de->d_name);
189 if (stat(path.buf, &st) < 0) {
190 ; /* silently ignore */
191 } else if (S_ISDIR(st.st_mode)) {
192 strbuf_addch(&refname, '/');
193 add_entry_to_dir(dir,
194 create_dir_entry(dir->cache, refname.buf,
195 refname.len, 1));
196 } else {
197 if (!refs_resolve_ref_unsafe(&refs->base,
198 refname.buf,
199 RESOLVE_REF_READING,
200 oid.hash, &flag)) {
201 oidclr(&oid);
202 flag |= REF_ISBROKEN;
203 } else if (is_null_oid(&oid)) {
204 /*
205 * It is so astronomically unlikely
206 * that NULL_SHA1 is the SHA-1 of an
207 * actual object that we consider its
208 * appearance in a loose reference
209 * file to be repo corruption
210 * (probably due to a software bug).
211 */
212 flag |= REF_ISBROKEN;
213 }
214
215 if (check_refname_format(refname.buf,
216 REFNAME_ALLOW_ONELEVEL)) {
217 if (!refname_is_safe(refname.buf))
218 die("loose refname is dangerous: %s", refname.buf);
219 oidclr(&oid);
220 flag |= REF_BAD_NAME | REF_ISBROKEN;
221 }
222 add_entry_to_dir(dir,
223 create_ref_entry(refname.buf, &oid, flag));
224 }
225 strbuf_setlen(&refname, dirnamelen);
226 strbuf_setlen(&path, path_baselen);
227 }
228 strbuf_release(&refname);
229 strbuf_release(&path);
230 closedir(d);
231
232 /*
233 * Manually add refs/bisect, which, being per-worktree, might
234 * not appear in the directory listing for refs/ in the main
235 * repo.
236 */
237 if (!strcmp(dirname, "refs/")) {
238 int pos = search_ref_dir(dir, "refs/bisect/", 12);
239
240 if (pos < 0) {
241 struct ref_entry *child_entry = create_dir_entry(
242 dir->cache, "refs/bisect/", 12, 1);
243 add_entry_to_dir(dir, child_entry);
244 }
245 }
246 }
247
248 static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs)
249 {
250 if (!refs->loose) {
251 /*
252 * Mark the top-level directory complete because we
253 * are about to read the only subdirectory that can
254 * hold references:
255 */
256 refs->loose = create_ref_cache(&refs->base, loose_fill_ref_dir);
257
258 /* We're going to fill the top level ourselves: */
259 refs->loose->root->flag &= ~REF_INCOMPLETE;
260
261 /*
262 * Add an incomplete entry for "refs/" (to be filled
263 * lazily):
264 */
265 add_entry_to_dir(get_ref_dir(refs->loose->root),
266 create_dir_entry(refs->loose, "refs/", 5, 1));
267 }
268 return refs->loose;
269 }
270
271 static int files_read_raw_ref(struct ref_store *ref_store,
272 const char *refname, unsigned char *sha1,
273 struct strbuf *referent, unsigned int *type)
274 {
275 struct files_ref_store *refs =
276 files_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
277 struct strbuf sb_contents = STRBUF_INIT;
278 struct strbuf sb_path = STRBUF_INIT;
279 const char *path;
280 const char *buf;
281 struct stat st;
282 int fd;
283 int ret = -1;
284 int save_errno;
285 int remaining_retries = 3;
286
287 *type = 0;
288 strbuf_reset(&sb_path);
289
290 files_ref_path(refs, &sb_path, refname);
291
292 path = sb_path.buf;
293
294 stat_ref:
295 /*
296 * We might have to loop back here to avoid a race
297 * condition: first we lstat() the file, then we try
298 * to read it as a link or as a file. But if somebody
299 * changes the type of the file (file <-> directory
300 * <-> symlink) between the lstat() and reading, then
301 * we don't want to report that as an error but rather
302 * try again starting with the lstat().
303 *
304 * We'll keep a count of the retries, though, just to avoid
305 * any confusing situation sending us into an infinite loop.
306 */
307
308 if (remaining_retries-- <= 0)
309 goto out;
310
311 if (lstat(path, &st) < 0) {
312 if (errno != ENOENT)
313 goto out;
314 if (refs_read_raw_ref(refs->packed_ref_store, refname,
315 sha1, referent, type)) {
316 errno = ENOENT;
317 goto out;
318 }
319 ret = 0;
320 goto out;
321 }
322
323 /* Follow "normalized" - ie "refs/.." symlinks by hand */
324 if (S_ISLNK(st.st_mode)) {
325 strbuf_reset(&sb_contents);
326 if (strbuf_readlink(&sb_contents, path, 0) < 0) {
327 if (errno == ENOENT || errno == EINVAL)
328 /* inconsistent with lstat; retry */
329 goto stat_ref;
330 else
331 goto out;
332 }
333 if (starts_with(sb_contents.buf, "refs/") &&
334 !check_refname_format(sb_contents.buf, 0)) {
335 strbuf_swap(&sb_contents, referent);
336 *type |= REF_ISSYMREF;
337 ret = 0;
338 goto out;
339 }
340 /*
341 * It doesn't look like a refname; fall through to just
342 * treating it like a non-symlink, and reading whatever it
343 * points to.
344 */
345 }
346
347 /* Is it a directory? */
348 if (S_ISDIR(st.st_mode)) {
349 /*
350 * Even though there is a directory where the loose
351 * ref is supposed to be, there could still be a
352 * packed ref:
353 */
354 if (refs_read_raw_ref(refs->packed_ref_store, refname,
355 sha1, referent, type)) {
356 errno = EISDIR;
357 goto out;
358 }
359 ret = 0;
360 goto out;
361 }
362
363 /*
364 * Anything else, just open it and try to use it as
365 * a ref
366 */
367 fd = open(path, O_RDONLY);
368 if (fd < 0) {
369 if (errno == ENOENT && !S_ISLNK(st.st_mode))
370 /* inconsistent with lstat; retry */
371 goto stat_ref;
372 else
373 goto out;
374 }
375 strbuf_reset(&sb_contents);
376 if (strbuf_read(&sb_contents, fd, 256) < 0) {
377 int save_errno = errno;
378 close(fd);
379 errno = save_errno;
380 goto out;
381 }
382 close(fd);
383 strbuf_rtrim(&sb_contents);
384 buf = sb_contents.buf;
385 if (starts_with(buf, "ref:")) {
386 buf += 4;
387 while (isspace(*buf))
388 buf++;
389
390 strbuf_reset(referent);
391 strbuf_addstr(referent, buf);
392 *type |= REF_ISSYMREF;
393 ret = 0;
394 goto out;
395 }
396
397 /*
398 * Please note that FETCH_HEAD has additional
399 * data after the sha.
400 */
401 if (get_sha1_hex(buf, sha1) ||
402 (buf[40] != '\0' && !isspace(buf[40]))) {
403 *type |= REF_ISBROKEN;
404 errno = EINVAL;
405 goto out;
406 }
407
408 ret = 0;
409
410 out:
411 save_errno = errno;
412 strbuf_release(&sb_path);
413 strbuf_release(&sb_contents);
414 errno = save_errno;
415 return ret;
416 }
417
418 static void unlock_ref(struct ref_lock *lock)
419 {
420 /* Do not free lock->lk -- atexit() still looks at them */
421 if (lock->lk)
422 rollback_lock_file(lock->lk);
423 free(lock->ref_name);
424 free(lock);
425 }
426
427 /*
428 * Lock refname, without following symrefs, and set *lock_p to point
429 * at a newly-allocated lock object. Fill in lock->old_oid, referent,
430 * and type similarly to read_raw_ref().
431 *
432 * The caller must verify that refname is a "safe" reference name (in
433 * the sense of refname_is_safe()) before calling this function.
434 *
435 * If the reference doesn't already exist, verify that refname doesn't
436 * have a D/F conflict with any existing references. extras and skip
437 * are passed to refs_verify_refname_available() for this check.
438 *
439 * If mustexist is not set and the reference is not found or is
440 * broken, lock the reference anyway but clear sha1.
441 *
442 * Return 0 on success. On failure, write an error message to err and
443 * return TRANSACTION_NAME_CONFLICT or TRANSACTION_GENERIC_ERROR.
444 *
445 * Implementation note: This function is basically
446 *
447 * lock reference
448 * read_raw_ref()
449 *
450 * but it includes a lot more code to
451 * - Deal with possible races with other processes
452 * - Avoid calling refs_verify_refname_available() when it can be
453 * avoided, namely if we were successfully able to read the ref
454 * - Generate informative error messages in the case of failure
455 */
456 static int lock_raw_ref(struct files_ref_store *refs,
457 const char *refname, int mustexist,
458 const struct string_list *extras,
459 const struct string_list *skip,
460 struct ref_lock **lock_p,
461 struct strbuf *referent,
462 unsigned int *type,
463 struct strbuf *err)
464 {
465 struct ref_lock *lock;
466 struct strbuf ref_file = STRBUF_INIT;
467 int attempts_remaining = 3;
468 int ret = TRANSACTION_GENERIC_ERROR;
469
470 assert(err);
471 files_assert_main_repository(refs, "lock_raw_ref");
472
473 *type = 0;
474
475 /* First lock the file so it can't change out from under us. */
476
477 *lock_p = lock = xcalloc(1, sizeof(*lock));
478
479 lock->ref_name = xstrdup(refname);
480 files_ref_path(refs, &ref_file, refname);
481
482 retry:
483 switch (safe_create_leading_directories(ref_file.buf)) {
484 case SCLD_OK:
485 break; /* success */
486 case SCLD_EXISTS:
487 /*
488 * Suppose refname is "refs/foo/bar". We just failed
489 * to create the containing directory, "refs/foo",
490 * because there was a non-directory in the way. This
491 * indicates a D/F conflict, probably because of
492 * another reference such as "refs/foo". There is no
493 * reason to expect this error to be transitory.
494 */
495 if (refs_verify_refname_available(&refs->base, refname,
496 extras, skip, err)) {
497 if (mustexist) {
498 /*
499 * To the user the relevant error is
500 * that the "mustexist" reference is
501 * missing:
502 */
503 strbuf_reset(err);
504 strbuf_addf(err, "unable to resolve reference '%s'",
505 refname);
506 } else {
507 /*
508 * The error message set by
509 * refs_verify_refname_available() is
510 * OK.
511 */
512 ret = TRANSACTION_NAME_CONFLICT;
513 }
514 } else {
515 /*
516 * The file that is in the way isn't a loose
517 * reference. Report it as a low-level
518 * failure.
519 */
520 strbuf_addf(err, "unable to create lock file %s.lock; "
521 "non-directory in the way",
522 ref_file.buf);
523 }
524 goto error_return;
525 case SCLD_VANISHED:
526 /* Maybe another process was tidying up. Try again. */
527 if (--attempts_remaining > 0)
528 goto retry;
529 /* fall through */
530 default:
531 strbuf_addf(err, "unable to create directory for %s",
532 ref_file.buf);
533 goto error_return;
534 }
535
536 if (!lock->lk)
537 lock->lk = xcalloc(1, sizeof(struct lock_file));
538
539 if (hold_lock_file_for_update(lock->lk, ref_file.buf, LOCK_NO_DEREF) < 0) {
540 if (errno == ENOENT && --attempts_remaining > 0) {
541 /*
542 * Maybe somebody just deleted one of the
543 * directories leading to ref_file. Try
544 * again:
545 */
546 goto retry;
547 } else {
548 unable_to_lock_message(ref_file.buf, errno, err);
549 goto error_return;
550 }
551 }
552
553 /*
554 * Now we hold the lock and can read the reference without
555 * fear that its value will change.
556 */
557
558 if (files_read_raw_ref(&refs->base, refname,
559 lock->old_oid.hash, referent, type)) {
560 if (errno == ENOENT) {
561 if (mustexist) {
562 /* Garden variety missing reference. */
563 strbuf_addf(err, "unable to resolve reference '%s'",
564 refname);
565 goto error_return;
566 } else {
567 /*
568 * Reference is missing, but that's OK. We
569 * know that there is not a conflict with
570 * another loose reference because
571 * (supposing that we are trying to lock
572 * reference "refs/foo/bar"):
573 *
574 * - We were successfully able to create
575 * the lockfile refs/foo/bar.lock, so we
576 * know there cannot be a loose reference
577 * named "refs/foo".
578 *
579 * - We got ENOENT and not EISDIR, so we
580 * know that there cannot be a loose
581 * reference named "refs/foo/bar/baz".
582 */
583 }
584 } else if (errno == EISDIR) {
585 /*
586 * There is a directory in the way. It might have
587 * contained references that have been deleted. If
588 * we don't require that the reference already
589 * exists, try to remove the directory so that it
590 * doesn't cause trouble when we want to rename the
591 * lockfile into place later.
592 */
593 if (mustexist) {
594 /* Garden variety missing reference. */
595 strbuf_addf(err, "unable to resolve reference '%s'",
596 refname);
597 goto error_return;
598 } else if (remove_dir_recursively(&ref_file,
599 REMOVE_DIR_EMPTY_ONLY)) {
600 if (refs_verify_refname_available(
601 &refs->base, refname,
602 extras, skip, err)) {
603 /*
604 * The error message set by
605 * verify_refname_available() is OK.
606 */
607 ret = TRANSACTION_NAME_CONFLICT;
608 goto error_return;
609 } else {
610 /*
611 * We can't delete the directory,
612 * but we also don't know of any
613 * references that it should
614 * contain.
615 */
616 strbuf_addf(err, "there is a non-empty directory '%s' "
617 "blocking reference '%s'",
618 ref_file.buf, refname);
619 goto error_return;
620 }
621 }
622 } else if (errno == EINVAL && (*type & REF_ISBROKEN)) {
623 strbuf_addf(err, "unable to resolve reference '%s': "
624 "reference broken", refname);
625 goto error_return;
626 } else {
627 strbuf_addf(err, "unable to resolve reference '%s': %s",
628 refname, strerror(errno));
629 goto error_return;
630 }
631
632 /*
633 * If the ref did not exist and we are creating it,
634 * make sure there is no existing packed ref that
635 * conflicts with refname:
636 */
637 if (refs_verify_refname_available(
638 refs->packed_ref_store, refname,
639 extras, skip, err))
640 goto error_return;
641 }
642
643 ret = 0;
644 goto out;
645
646 error_return:
647 unlock_ref(lock);
648 *lock_p = NULL;
649
650 out:
651 strbuf_release(&ref_file);
652 return ret;
653 }
654
655 static int files_peel_ref(struct ref_store *ref_store,
656 const char *refname, unsigned char *sha1)
657 {
658 struct files_ref_store *refs =
659 files_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
660 "peel_ref");
661 int flag;
662 unsigned char base[20];
663
664 if (current_ref_iter && current_ref_iter->refname == refname) {
665 struct object_id peeled;
666
667 if (ref_iterator_peel(current_ref_iter, &peeled))
668 return -1;
669 hashcpy(sha1, peeled.hash);
670 return 0;
671 }
672
673 if (refs_read_ref_full(ref_store, refname,
674 RESOLVE_REF_READING, base, &flag))
675 return -1;
676
677 /*
678 * If the reference is packed, read its ref_entry from the
679 * cache in the hope that we already know its peeled value.
680 * We only try this optimization on packed references because
681 * (a) forcing the filling of the loose reference cache could
682 * be expensive and (b) loose references anyway usually do not
683 * have REF_KNOWS_PEELED.
684 */
685 if (flag & REF_ISPACKED &&
686 !refs_peel_ref(refs->packed_ref_store, refname, sha1))
687 return 0;
688
689 return peel_object(base, sha1);
690 }
691
692 struct files_ref_iterator {
693 struct ref_iterator base;
694
695 struct ref_iterator *iter0;
696 unsigned int flags;
697 };
698
699 static int files_ref_iterator_advance(struct ref_iterator *ref_iterator)
700 {
701 struct files_ref_iterator *iter =
702 (struct files_ref_iterator *)ref_iterator;
703 int ok;
704
705 while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
706 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
707 ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
708 continue;
709
710 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
711 !ref_resolves_to_object(iter->iter0->refname,
712 iter->iter0->oid,
713 iter->iter0->flags))
714 continue;
715
716 iter->base.refname = iter->iter0->refname;
717 iter->base.oid = iter->iter0->oid;
718 iter->base.flags = iter->iter0->flags;
719 return ITER_OK;
720 }
721
722 iter->iter0 = NULL;
723 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
724 ok = ITER_ERROR;
725
726 return ok;
727 }
728
729 static int files_ref_iterator_peel(struct ref_iterator *ref_iterator,
730 struct object_id *peeled)
731 {
732 struct files_ref_iterator *iter =
733 (struct files_ref_iterator *)ref_iterator;
734
735 return ref_iterator_peel(iter->iter0, peeled);
736 }
737
738 static int files_ref_iterator_abort(struct ref_iterator *ref_iterator)
739 {
740 struct files_ref_iterator *iter =
741 (struct files_ref_iterator *)ref_iterator;
742 int ok = ITER_DONE;
743
744 if (iter->iter0)
745 ok = ref_iterator_abort(iter->iter0);
746
747 base_ref_iterator_free(ref_iterator);
748 return ok;
749 }
750
751 static struct ref_iterator_vtable files_ref_iterator_vtable = {
752 files_ref_iterator_advance,
753 files_ref_iterator_peel,
754 files_ref_iterator_abort
755 };
756
757 static struct ref_iterator *files_ref_iterator_begin(
758 struct ref_store *ref_store,
759 const char *prefix, unsigned int flags)
760 {
761 struct files_ref_store *refs;
762 struct ref_iterator *loose_iter, *packed_iter;
763 struct files_ref_iterator *iter;
764 struct ref_iterator *ref_iterator;
765 unsigned int required_flags = REF_STORE_READ;
766
767 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
768 required_flags |= REF_STORE_ODB;
769
770 refs = files_downcast(ref_store, required_flags, "ref_iterator_begin");
771
772 iter = xcalloc(1, sizeof(*iter));
773 ref_iterator = &iter->base;
774 base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable);
775
776 /*
777 * We must make sure that all loose refs are read before
778 * accessing the packed-refs file; this avoids a race
779 * condition if loose refs are migrated to the packed-refs
780 * file by a simultaneous process, but our in-memory view is
781 * from before the migration. We ensure this as follows:
782 * First, we call start the loose refs iteration with its
783 * `prime_ref` argument set to true. This causes the loose
784 * references in the subtree to be pre-read into the cache.
785 * (If they've already been read, that's OK; we only need to
786 * guarantee that they're read before the packed refs, not
787 * *how much* before.) After that, we call
788 * packed_ref_iterator_begin(), which internally checks
789 * whether the packed-ref cache is up to date with what is on
790 * disk, and re-reads it if not.
791 */
792
793 loose_iter = cache_ref_iterator_begin(get_loose_ref_cache(refs),
794 prefix, 1);
795
796 /*
797 * The packed-refs file might contain broken references, for
798 * example an old version of a reference that points at an
799 * object that has since been garbage-collected. This is OK as
800 * long as there is a corresponding loose reference that
801 * overrides it, and we don't want to emit an error message in
802 * this case. So ask the packed_ref_store for all of its
803 * references, and (if needed) do our own check for broken
804 * ones in files_ref_iterator_advance(), after we have merged
805 * the packed and loose references.
806 */
807 packed_iter = refs_ref_iterator_begin(
808 refs->packed_ref_store, prefix, 0,
809 DO_FOR_EACH_INCLUDE_BROKEN);
810
811 iter->iter0 = overlay_ref_iterator_begin(loose_iter, packed_iter);
812 iter->flags = flags;
813
814 return ref_iterator;
815 }
816
817 /*
818 * Verify that the reference locked by lock has the value old_sha1.
819 * Fail if the reference doesn't exist and mustexist is set. Return 0
820 * on success. On error, write an error message to err, set errno, and
821 * return a negative value.
822 */
823 static int verify_lock(struct ref_store *ref_store, struct ref_lock *lock,
824 const unsigned char *old_sha1, int mustexist,
825 struct strbuf *err)
826 {
827 assert(err);
828
829 if (refs_read_ref_full(ref_store, lock->ref_name,
830 mustexist ? RESOLVE_REF_READING : 0,
831 lock->old_oid.hash, NULL)) {
832 if (old_sha1) {
833 int save_errno = errno;
834 strbuf_addf(err, "can't verify ref '%s'", lock->ref_name);
835 errno = save_errno;
836 return -1;
837 } else {
838 oidclr(&lock->old_oid);
839 return 0;
840 }
841 }
842 if (old_sha1 && hashcmp(lock->old_oid.hash, old_sha1)) {
843 strbuf_addf(err, "ref '%s' is at %s but expected %s",
844 lock->ref_name,
845 oid_to_hex(&lock->old_oid),
846 sha1_to_hex(old_sha1));
847 errno = EBUSY;
848 return -1;
849 }
850 return 0;
851 }
852
853 static int remove_empty_directories(struct strbuf *path)
854 {
855 /*
856 * we want to create a file but there is a directory there;
857 * if that is an empty directory (or a directory that contains
858 * only empty directories), remove them.
859 */
860 return remove_dir_recursively(path, REMOVE_DIR_EMPTY_ONLY);
861 }
862
863 static int create_reflock(const char *path, void *cb)
864 {
865 struct lock_file *lk = cb;
866
867 return hold_lock_file_for_update(lk, path, LOCK_NO_DEREF) < 0 ? -1 : 0;
868 }
869
870 /*
871 * Locks a ref returning the lock on success and NULL on failure.
872 * On failure errno is set to something meaningful.
873 */
874 static struct ref_lock *lock_ref_sha1_basic(struct files_ref_store *refs,
875 const char *refname,
876 const unsigned char *old_sha1,
877 const struct string_list *extras,
878 const struct string_list *skip,
879 unsigned int flags, int *type,
880 struct strbuf *err)
881 {
882 struct strbuf ref_file = STRBUF_INIT;
883 struct ref_lock *lock;
884 int last_errno = 0;
885 int mustexist = (old_sha1 && !is_null_sha1(old_sha1));
886 int resolve_flags = RESOLVE_REF_NO_RECURSE;
887 int resolved;
888
889 files_assert_main_repository(refs, "lock_ref_sha1_basic");
890 assert(err);
891
892 lock = xcalloc(1, sizeof(struct ref_lock));
893
894 if (mustexist)
895 resolve_flags |= RESOLVE_REF_READING;
896 if (flags & REF_DELETING)
897 resolve_flags |= RESOLVE_REF_ALLOW_BAD_NAME;
898
899 files_ref_path(refs, &ref_file, refname);
900 resolved = !!refs_resolve_ref_unsafe(&refs->base,
901 refname, resolve_flags,
902 lock->old_oid.hash, type);
903 if (!resolved && errno == EISDIR) {
904 /*
905 * we are trying to lock foo but we used to
906 * have foo/bar which now does not exist;
907 * it is normal for the empty directory 'foo'
908 * to remain.
909 */
910 if (remove_empty_directories(&ref_file)) {
911 last_errno = errno;
912 if (!refs_verify_refname_available(
913 &refs->base,
914 refname, extras, skip, err))
915 strbuf_addf(err, "there are still refs under '%s'",
916 refname);
917 goto error_return;
918 }
919 resolved = !!refs_resolve_ref_unsafe(&refs->base,
920 refname, resolve_flags,
921 lock->old_oid.hash, type);
922 }
923 if (!resolved) {
924 last_errno = errno;
925 if (last_errno != ENOTDIR ||
926 !refs_verify_refname_available(&refs->base, refname,
927 extras, skip, err))
928 strbuf_addf(err, "unable to resolve reference '%s': %s",
929 refname, strerror(last_errno));
930
931 goto error_return;
932 }
933
934 /*
935 * If the ref did not exist and we are creating it, make sure
936 * there is no existing packed ref whose name begins with our
937 * refname, nor a packed ref whose name is a proper prefix of
938 * our refname.
939 */
940 if (is_null_oid(&lock->old_oid) &&
941 refs_verify_refname_available(refs->packed_ref_store, refname,
942 extras, skip, err)) {
943 last_errno = ENOTDIR;
944 goto error_return;
945 }
946
947 lock->lk = xcalloc(1, sizeof(struct lock_file));
948
949 lock->ref_name = xstrdup(refname);
950
951 if (raceproof_create_file(ref_file.buf, create_reflock, lock->lk)) {
952 last_errno = errno;
953 unable_to_lock_message(ref_file.buf, errno, err);
954 goto error_return;
955 }
956
957 if (verify_lock(&refs->base, lock, old_sha1, mustexist, err)) {
958 last_errno = errno;
959 goto error_return;
960 }
961 goto out;
962
963 error_return:
964 unlock_ref(lock);
965 lock = NULL;
966
967 out:
968 strbuf_release(&ref_file);
969 errno = last_errno;
970 return lock;
971 }
972
973 struct ref_to_prune {
974 struct ref_to_prune *next;
975 unsigned char sha1[20];
976 char name[FLEX_ARRAY];
977 };
978
979 enum {
980 REMOVE_EMPTY_PARENTS_REF = 0x01,
981 REMOVE_EMPTY_PARENTS_REFLOG = 0x02
982 };
983
984 /*
985 * Remove empty parent directories associated with the specified
986 * reference and/or its reflog, but spare [logs/]refs/ and immediate
987 * subdirs. flags is a combination of REMOVE_EMPTY_PARENTS_REF and/or
988 * REMOVE_EMPTY_PARENTS_REFLOG.
989 */
990 static void try_remove_empty_parents(struct files_ref_store *refs,
991 const char *refname,
992 unsigned int flags)
993 {
994 struct strbuf buf = STRBUF_INIT;
995 struct strbuf sb = STRBUF_INIT;
996 char *p, *q;
997 int i;
998
999 strbuf_addstr(&buf, refname);
1000 p = buf.buf;
1001 for (i = 0; i < 2; i++) { /* refs/{heads,tags,...}/ */
1002 while (*p && *p != '/')
1003 p++;
1004 /* tolerate duplicate slashes; see check_refname_format() */
1005 while (*p == '/')
1006 p++;
1007 }
1008 q = buf.buf + buf.len;
1009 while (flags & (REMOVE_EMPTY_PARENTS_REF | REMOVE_EMPTY_PARENTS_REFLOG)) {
1010 while (q > p && *q != '/')
1011 q--;
1012 while (q > p && *(q-1) == '/')
1013 q--;
1014 if (q == p)
1015 break;
1016 strbuf_setlen(&buf, q - buf.buf);
1017
1018 strbuf_reset(&sb);
1019 files_ref_path(refs, &sb, buf.buf);
1020 if ((flags & REMOVE_EMPTY_PARENTS_REF) && rmdir(sb.buf))
1021 flags &= ~REMOVE_EMPTY_PARENTS_REF;
1022
1023 strbuf_reset(&sb);
1024 files_reflog_path(refs, &sb, buf.buf);
1025 if ((flags & REMOVE_EMPTY_PARENTS_REFLOG) && rmdir(sb.buf))
1026 flags &= ~REMOVE_EMPTY_PARENTS_REFLOG;
1027 }
1028 strbuf_release(&buf);
1029 strbuf_release(&sb);
1030 }
1031
1032 /* make sure nobody touched the ref, and unlink */
1033 static void prune_ref(struct files_ref_store *refs, struct ref_to_prune *r)
1034 {
1035 struct ref_transaction *transaction;
1036 struct strbuf err = STRBUF_INIT;
1037
1038 if (check_refname_format(r->name, 0))
1039 return;
1040
1041 transaction = ref_store_transaction_begin(&refs->base, &err);
1042 if (!transaction ||
1043 ref_transaction_delete(transaction, r->name, r->sha1,
1044 REF_ISPRUNING | REF_NODEREF, NULL, &err) ||
1045 ref_transaction_commit(transaction, &err)) {
1046 ref_transaction_free(transaction);
1047 error("%s", err.buf);
1048 strbuf_release(&err);
1049 return;
1050 }
1051 ref_transaction_free(transaction);
1052 strbuf_release(&err);
1053 }
1054
1055 static void prune_refs(struct files_ref_store *refs, struct ref_to_prune *r)
1056 {
1057 while (r) {
1058 prune_ref(refs, r);
1059 r = r->next;
1060 }
1061 }
1062
1063 /*
1064 * Return true if the specified reference should be packed.
1065 */
1066 static int should_pack_ref(const char *refname,
1067 const struct object_id *oid, unsigned int ref_flags,
1068 unsigned int pack_flags)
1069 {
1070 /* Do not pack per-worktree refs: */
1071 if (ref_type(refname) != REF_TYPE_NORMAL)
1072 return 0;
1073
1074 /* Do not pack non-tags unless PACK_REFS_ALL is set: */
1075 if (!(pack_flags & PACK_REFS_ALL) && !starts_with(refname, "refs/tags/"))
1076 return 0;
1077
1078 /* Do not pack symbolic refs: */
1079 if (ref_flags & REF_ISSYMREF)
1080 return 0;
1081
1082 /* Do not pack broken refs: */
1083 if (!ref_resolves_to_object(refname, oid, ref_flags))
1084 return 0;
1085
1086 return 1;
1087 }
1088
1089 static int files_pack_refs(struct ref_store *ref_store, unsigned int flags)
1090 {
1091 struct files_ref_store *refs =
1092 files_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB,
1093 "pack_refs");
1094 struct ref_iterator *iter;
1095 int ok;
1096 struct ref_to_prune *refs_to_prune = NULL;
1097 struct strbuf err = STRBUF_INIT;
1098
1099 packed_refs_lock(refs->packed_ref_store, LOCK_DIE_ON_ERROR, &err);
1100
1101 iter = cache_ref_iterator_begin(get_loose_ref_cache(refs), NULL, 0);
1102 while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
1103 /*
1104 * If the loose reference can be packed, add an entry
1105 * in the packed ref cache. If the reference should be
1106 * pruned, also add it to refs_to_prune.
1107 */
1108 if (!should_pack_ref(iter->refname, iter->oid, iter->flags,
1109 flags))
1110 continue;
1111
1112 /*
1113 * Create an entry in the packed-refs cache equivalent
1114 * to the one from the loose ref cache, except that
1115 * we don't copy the peeled status, because we want it
1116 * to be re-peeled.
1117 */
1118 add_packed_ref(refs->packed_ref_store, iter->refname, iter->oid);
1119
1120 /* Schedule the loose reference for pruning if requested. */
1121 if ((flags & PACK_REFS_PRUNE)) {
1122 struct ref_to_prune *n;
1123 FLEX_ALLOC_STR(n, name, iter->refname);
1124 hashcpy(n->sha1, iter->oid->hash);
1125 n->next = refs_to_prune;
1126 refs_to_prune = n;
1127 }
1128 }
1129 if (ok != ITER_DONE)
1130 die("error while iterating over references");
1131
1132 if (commit_packed_refs(refs->packed_ref_store, &err))
1133 die("unable to overwrite old ref-pack file: %s", err.buf);
1134 packed_refs_unlock(refs->packed_ref_store);
1135
1136 prune_refs(refs, refs_to_prune);
1137 strbuf_release(&err);
1138 return 0;
1139 }
1140
1141 static int files_delete_refs(struct ref_store *ref_store, const char *msg,
1142 struct string_list *refnames, unsigned int flags)
1143 {
1144 struct files_ref_store *refs =
1145 files_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
1146 struct strbuf err = STRBUF_INIT;
1147 int i, result = 0;
1148
1149 if (!refnames->nr)
1150 return 0;
1151
1152 if (packed_refs_lock(refs->packed_ref_store, 0, &err))
1153 goto error;
1154
1155 if (repack_without_refs(refs->packed_ref_store, refnames, &err)) {
1156 packed_refs_unlock(refs->packed_ref_store);
1157 goto error;
1158 }
1159
1160 packed_refs_unlock(refs->packed_ref_store);
1161
1162 for (i = 0; i < refnames->nr; i++) {
1163 const char *refname = refnames->items[i].string;
1164
1165 if (refs_delete_ref(&refs->base, msg, refname, NULL, flags))
1166 result |= error(_("could not remove reference %s"), refname);
1167 }
1168
1169 strbuf_release(&err);
1170 return result;
1171
1172 error:
1173 /*
1174 * If we failed to rewrite the packed-refs file, then it is
1175 * unsafe to try to remove loose refs, because doing so might
1176 * expose an obsolete packed value for a reference that might
1177 * even point at an object that has been garbage collected.
1178 */
1179 if (refnames->nr == 1)
1180 error(_("could not delete reference %s: %s"),
1181 refnames->items[0].string, err.buf);
1182 else
1183 error(_("could not delete references: %s"), err.buf);
1184
1185 strbuf_release(&err);
1186 return -1;
1187 }
1188
1189 /*
1190 * People using contrib's git-new-workdir have .git/logs/refs ->
1191 * /some/other/path/.git/logs/refs, and that may live on another device.
1192 *
1193 * IOW, to avoid cross device rename errors, the temporary renamed log must
1194 * live into logs/refs.
1195 */
1196 #define TMP_RENAMED_LOG "refs/.tmp-renamed-log"
1197
1198 struct rename_cb {
1199 const char *tmp_renamed_log;
1200 int true_errno;
1201 };
1202
1203 static int rename_tmp_log_callback(const char *path, void *cb_data)
1204 {
1205 struct rename_cb *cb = cb_data;
1206
1207 if (rename(cb->tmp_renamed_log, path)) {
1208 /*
1209 * rename(a, b) when b is an existing directory ought
1210 * to result in ISDIR, but Solaris 5.8 gives ENOTDIR.
1211 * Sheesh. Record the true errno for error reporting,
1212 * but report EISDIR to raceproof_create_file() so
1213 * that it knows to retry.
1214 */
1215 cb->true_errno = errno;
1216 if (errno == ENOTDIR)
1217 errno = EISDIR;
1218 return -1;
1219 } else {
1220 return 0;
1221 }
1222 }
1223
1224 static int rename_tmp_log(struct files_ref_store *refs, const char *newrefname)
1225 {
1226 struct strbuf path = STRBUF_INIT;
1227 struct strbuf tmp = STRBUF_INIT;
1228 struct rename_cb cb;
1229 int ret;
1230
1231 files_reflog_path(refs, &path, newrefname);
1232 files_reflog_path(refs, &tmp, TMP_RENAMED_LOG);
1233 cb.tmp_renamed_log = tmp.buf;
1234 ret = raceproof_create_file(path.buf, rename_tmp_log_callback, &cb);
1235 if (ret) {
1236 if (errno == EISDIR)
1237 error("directory not empty: %s", path.buf);
1238 else
1239 error("unable to move logfile %s to %s: %s",
1240 tmp.buf, path.buf,
1241 strerror(cb.true_errno));
1242 }
1243
1244 strbuf_release(&path);
1245 strbuf_release(&tmp);
1246 return ret;
1247 }
1248
1249 static int write_ref_to_lockfile(struct ref_lock *lock,
1250 const struct object_id *oid, struct strbuf *err);
1251 static int commit_ref_update(struct files_ref_store *refs,
1252 struct ref_lock *lock,
1253 const struct object_id *oid, const char *logmsg,
1254 struct strbuf *err);
1255
1256 static int files_rename_ref(struct ref_store *ref_store,
1257 const char *oldrefname, const char *newrefname,
1258 const char *logmsg)
1259 {
1260 struct files_ref_store *refs =
1261 files_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1262 struct object_id oid, orig_oid;
1263 int flag = 0, logmoved = 0;
1264 struct ref_lock *lock;
1265 struct stat loginfo;
1266 struct strbuf sb_oldref = STRBUF_INIT;
1267 struct strbuf sb_newref = STRBUF_INIT;
1268 struct strbuf tmp_renamed_log = STRBUF_INIT;
1269 int log, ret;
1270 struct strbuf err = STRBUF_INIT;
1271
1272 files_reflog_path(refs, &sb_oldref, oldrefname);
1273 files_reflog_path(refs, &sb_newref, newrefname);
1274 files_reflog_path(refs, &tmp_renamed_log, TMP_RENAMED_LOG);
1275
1276 log = !lstat(sb_oldref.buf, &loginfo);
1277 if (log && S_ISLNK(loginfo.st_mode)) {
1278 ret = error("reflog for %s is a symlink", oldrefname);
1279 goto out;
1280 }
1281
1282 if (!refs_resolve_ref_unsafe(&refs->base, oldrefname,
1283 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
1284 orig_oid.hash, &flag)) {
1285 ret = error("refname %s not found", oldrefname);
1286 goto out;
1287 }
1288
1289 if (flag & REF_ISSYMREF) {
1290 ret = error("refname %s is a symbolic ref, renaming it is not supported",
1291 oldrefname);
1292 goto out;
1293 }
1294 if (!refs_rename_ref_available(&refs->base, oldrefname, newrefname)) {
1295 ret = 1;
1296 goto out;
1297 }
1298
1299 if (log && rename(sb_oldref.buf, tmp_renamed_log.buf)) {
1300 ret = error("unable to move logfile logs/%s to logs/"TMP_RENAMED_LOG": %s",
1301 oldrefname, strerror(errno));
1302 goto out;
1303 }
1304
1305 if (refs_delete_ref(&refs->base, logmsg, oldrefname,
1306 orig_oid.hash, REF_NODEREF)) {
1307 error("unable to delete old %s", oldrefname);
1308 goto rollback;
1309 }
1310
1311 /*
1312 * Since we are doing a shallow lookup, oid is not the
1313 * correct value to pass to delete_ref as old_oid. But that
1314 * doesn't matter, because an old_oid check wouldn't add to
1315 * the safety anyway; we want to delete the reference whatever
1316 * its current value.
1317 */
1318 if (!refs_read_ref_full(&refs->base, newrefname,
1319 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
1320 oid.hash, NULL) &&
1321 refs_delete_ref(&refs->base, NULL, newrefname,
1322 NULL, REF_NODEREF)) {
1323 if (errno == EISDIR) {
1324 struct strbuf path = STRBUF_INIT;
1325 int result;
1326
1327 files_ref_path(refs, &path, newrefname);
1328 result = remove_empty_directories(&path);
1329 strbuf_release(&path);
1330
1331 if (result) {
1332 error("Directory not empty: %s", newrefname);
1333 goto rollback;
1334 }
1335 } else {
1336 error("unable to delete existing %s", newrefname);
1337 goto rollback;
1338 }
1339 }
1340
1341 if (log && rename_tmp_log(refs, newrefname))
1342 goto rollback;
1343
1344 logmoved = log;
1345
1346 lock = lock_ref_sha1_basic(refs, newrefname, NULL, NULL, NULL,
1347 REF_NODEREF, NULL, &err);
1348 if (!lock) {
1349 error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf);
1350 strbuf_release(&err);
1351 goto rollback;
1352 }
1353 oidcpy(&lock->old_oid, &orig_oid);
1354
1355 if (write_ref_to_lockfile(lock, &orig_oid, &err) ||
1356 commit_ref_update(refs, lock, &orig_oid, logmsg, &err)) {
1357 error("unable to write current sha1 into %s: %s", newrefname, err.buf);
1358 strbuf_release(&err);
1359 goto rollback;
1360 }
1361
1362 ret = 0;
1363 goto out;
1364
1365 rollback:
1366 lock = lock_ref_sha1_basic(refs, oldrefname, NULL, NULL, NULL,
1367 REF_NODEREF, NULL, &err);
1368 if (!lock) {
1369 error("unable to lock %s for rollback: %s", oldrefname, err.buf);
1370 strbuf_release(&err);
1371 goto rollbacklog;
1372 }
1373
1374 flag = log_all_ref_updates;
1375 log_all_ref_updates = LOG_REFS_NONE;
1376 if (write_ref_to_lockfile(lock, &orig_oid, &err) ||
1377 commit_ref_update(refs, lock, &orig_oid, NULL, &err)) {
1378 error("unable to write current sha1 into %s: %s", oldrefname, err.buf);
1379 strbuf_release(&err);
1380 }
1381 log_all_ref_updates = flag;
1382
1383 rollbacklog:
1384 if (logmoved && rename(sb_newref.buf, sb_oldref.buf))
1385 error("unable to restore logfile %s from %s: %s",
1386 oldrefname, newrefname, strerror(errno));
1387 if (!logmoved && log &&
1388 rename(tmp_renamed_log.buf, sb_oldref.buf))
1389 error("unable to restore logfile %s from logs/"TMP_RENAMED_LOG": %s",
1390 oldrefname, strerror(errno));
1391 ret = 1;
1392 out:
1393 strbuf_release(&sb_newref);
1394 strbuf_release(&sb_oldref);
1395 strbuf_release(&tmp_renamed_log);
1396
1397 return ret;
1398 }
1399
1400 static int close_ref(struct ref_lock *lock)
1401 {
1402 if (close_lock_file(lock->lk))
1403 return -1;
1404 return 0;
1405 }
1406
1407 static int commit_ref(struct ref_lock *lock)
1408 {
1409 char *path = get_locked_file_path(lock->lk);
1410 struct stat st;
1411
1412 if (!lstat(path, &st) && S_ISDIR(st.st_mode)) {
1413 /*
1414 * There is a directory at the path we want to rename
1415 * the lockfile to. Hopefully it is empty; try to
1416 * delete it.
1417 */
1418 size_t len = strlen(path);
1419 struct strbuf sb_path = STRBUF_INIT;
1420
1421 strbuf_attach(&sb_path, path, len, len);
1422
1423 /*
1424 * If this fails, commit_lock_file() will also fail
1425 * and will report the problem.
1426 */
1427 remove_empty_directories(&sb_path);
1428 strbuf_release(&sb_path);
1429 } else {
1430 free(path);
1431 }
1432
1433 if (commit_lock_file(lock->lk))
1434 return -1;
1435 return 0;
1436 }
1437
1438 static int open_or_create_logfile(const char *path, void *cb)
1439 {
1440 int *fd = cb;
1441
1442 *fd = open(path, O_APPEND | O_WRONLY | O_CREAT, 0666);
1443 return (*fd < 0) ? -1 : 0;
1444 }
1445
1446 /*
1447 * Create a reflog for a ref. If force_create = 0, only create the
1448 * reflog for certain refs (those for which should_autocreate_reflog
1449 * returns non-zero). Otherwise, create it regardless of the reference
1450 * name. If the logfile already existed or was created, return 0 and
1451 * set *logfd to the file descriptor opened for appending to the file.
1452 * If no logfile exists and we decided not to create one, return 0 and
1453 * set *logfd to -1. On failure, fill in *err, set *logfd to -1, and
1454 * return -1.
1455 */
1456 static int log_ref_setup(struct files_ref_store *refs,
1457 const char *refname, int force_create,
1458 int *logfd, struct strbuf *err)
1459 {
1460 struct strbuf logfile_sb = STRBUF_INIT;
1461 char *logfile;
1462
1463 files_reflog_path(refs, &logfile_sb, refname);
1464 logfile = strbuf_detach(&logfile_sb, NULL);
1465
1466 if (force_create || should_autocreate_reflog(refname)) {
1467 if (raceproof_create_file(logfile, open_or_create_logfile, logfd)) {
1468 if (errno == ENOENT)
1469 strbuf_addf(err, "unable to create directory for '%s': "
1470 "%s", logfile, strerror(errno));
1471 else if (errno == EISDIR)
1472 strbuf_addf(err, "there are still logs under '%s'",
1473 logfile);
1474 else
1475 strbuf_addf(err, "unable to append to '%s': %s",
1476 logfile, strerror(errno));
1477
1478 goto error;
1479 }
1480 } else {
1481 *logfd = open(logfile, O_APPEND | O_WRONLY, 0666);
1482 if (*logfd < 0) {
1483 if (errno == ENOENT || errno == EISDIR) {
1484 /*
1485 * The logfile doesn't already exist,
1486 * but that is not an error; it only
1487 * means that we won't write log
1488 * entries to it.
1489 */
1490 ;
1491 } else {
1492 strbuf_addf(err, "unable to append to '%s': %s",
1493 logfile, strerror(errno));
1494 goto error;
1495 }
1496 }
1497 }
1498
1499 if (*logfd >= 0)
1500 adjust_shared_perm(logfile);
1501
1502 free(logfile);
1503 return 0;
1504
1505 error:
1506 free(logfile);
1507 return -1;
1508 }
1509
1510 static int files_create_reflog(struct ref_store *ref_store,
1511 const char *refname, int force_create,
1512 struct strbuf *err)
1513 {
1514 struct files_ref_store *refs =
1515 files_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1516 int fd;
1517
1518 if (log_ref_setup(refs, refname, force_create, &fd, err))
1519 return -1;
1520
1521 if (fd >= 0)
1522 close(fd);
1523
1524 return 0;
1525 }
1526
1527 static int log_ref_write_fd(int fd, const struct object_id *old_oid,
1528 const struct object_id *new_oid,
1529 const char *committer, const char *msg)
1530 {
1531 int msglen, written;
1532 unsigned maxlen, len;
1533 char *logrec;
1534
1535 msglen = msg ? strlen(msg) : 0;
1536 maxlen = strlen(committer) + msglen + 100;
1537 logrec = xmalloc(maxlen);
1538 len = xsnprintf(logrec, maxlen, "%s %s %s\n",
1539 oid_to_hex(old_oid),
1540 oid_to_hex(new_oid),
1541 committer);
1542 if (msglen)
1543 len += copy_reflog_msg(logrec + len - 1, msg) - 1;
1544
1545 written = len <= maxlen ? write_in_full(fd, logrec, len) : -1;
1546 free(logrec);
1547 if (written != len)
1548 return -1;
1549
1550 return 0;
1551 }
1552
1553 static int files_log_ref_write(struct files_ref_store *refs,
1554 const char *refname, const struct object_id *old_oid,
1555 const struct object_id *new_oid, const char *msg,
1556 int flags, struct strbuf *err)
1557 {
1558 int logfd, result;
1559
1560 if (log_all_ref_updates == LOG_REFS_UNSET)
1561 log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
1562
1563 result = log_ref_setup(refs, refname,
1564 flags & REF_FORCE_CREATE_REFLOG,
1565 &logfd, err);
1566
1567 if (result)
1568 return result;
1569
1570 if (logfd < 0)
1571 return 0;
1572 result = log_ref_write_fd(logfd, old_oid, new_oid,
1573 git_committer_info(0), msg);
1574 if (result) {
1575 struct strbuf sb = STRBUF_INIT;
1576 int save_errno = errno;
1577
1578 files_reflog_path(refs, &sb, refname);
1579 strbuf_addf(err, "unable to append to '%s': %s",
1580 sb.buf, strerror(save_errno));
1581 strbuf_release(&sb);
1582 close(logfd);
1583 return -1;
1584 }
1585 if (close(logfd)) {
1586 struct strbuf sb = STRBUF_INIT;
1587 int save_errno = errno;
1588
1589 files_reflog_path(refs, &sb, refname);
1590 strbuf_addf(err, "unable to append to '%s': %s",
1591 sb.buf, strerror(save_errno));
1592 strbuf_release(&sb);
1593 return -1;
1594 }
1595 return 0;
1596 }
1597
1598 /*
1599 * Write sha1 into the open lockfile, then close the lockfile. On
1600 * errors, rollback the lockfile, fill in *err and
1601 * return -1.
1602 */
1603 static int write_ref_to_lockfile(struct ref_lock *lock,
1604 const struct object_id *oid, struct strbuf *err)
1605 {
1606 static char term = '\n';
1607 struct object *o;
1608 int fd;
1609
1610 o = parse_object(oid);
1611 if (!o) {
1612 strbuf_addf(err,
1613 "trying to write ref '%s' with nonexistent object %s",
1614 lock->ref_name, oid_to_hex(oid));
1615 unlock_ref(lock);
1616 return -1;
1617 }
1618 if (o->type != OBJ_COMMIT && is_branch(lock->ref_name)) {
1619 strbuf_addf(err,
1620 "trying to write non-commit object %s to branch '%s'",
1621 oid_to_hex(oid), lock->ref_name);
1622 unlock_ref(lock);
1623 return -1;
1624 }
1625 fd = get_lock_file_fd(lock->lk);
1626 if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
1627 write_in_full(fd, &term, 1) != 1 ||
1628 close_ref(lock) < 0) {
1629 strbuf_addf(err,
1630 "couldn't write '%s'", get_lock_file_path(lock->lk));
1631 unlock_ref(lock);
1632 return -1;
1633 }
1634 return 0;
1635 }
1636
1637 /*
1638 * Commit a change to a loose reference that has already been written
1639 * to the loose reference lockfile. Also update the reflogs if
1640 * necessary, using the specified lockmsg (which can be NULL).
1641 */
1642 static int commit_ref_update(struct files_ref_store *refs,
1643 struct ref_lock *lock,
1644 const struct object_id *oid, const char *logmsg,
1645 struct strbuf *err)
1646 {
1647 files_assert_main_repository(refs, "commit_ref_update");
1648
1649 clear_loose_ref_cache(refs);
1650 if (files_log_ref_write(refs, lock->ref_name,
1651 &lock->old_oid, oid,
1652 logmsg, 0, err)) {
1653 char *old_msg = strbuf_detach(err, NULL);
1654 strbuf_addf(err, "cannot update the ref '%s': %s",
1655 lock->ref_name, old_msg);
1656 free(old_msg);
1657 unlock_ref(lock);
1658 return -1;
1659 }
1660
1661 if (strcmp(lock->ref_name, "HEAD") != 0) {
1662 /*
1663 * Special hack: If a branch is updated directly and HEAD
1664 * points to it (may happen on the remote side of a push
1665 * for example) then logically the HEAD reflog should be
1666 * updated too.
1667 * A generic solution implies reverse symref information,
1668 * but finding all symrefs pointing to the given branch
1669 * would be rather costly for this rare event (the direct
1670 * update of a branch) to be worth it. So let's cheat and
1671 * check with HEAD only which should cover 99% of all usage
1672 * scenarios (even 100% of the default ones).
1673 */
1674 struct object_id head_oid;
1675 int head_flag;
1676 const char *head_ref;
1677
1678 head_ref = refs_resolve_ref_unsafe(&refs->base, "HEAD",
1679 RESOLVE_REF_READING,
1680 head_oid.hash, &head_flag);
1681 if (head_ref && (head_flag & REF_ISSYMREF) &&
1682 !strcmp(head_ref, lock->ref_name)) {
1683 struct strbuf log_err = STRBUF_INIT;
1684 if (files_log_ref_write(refs, "HEAD",
1685 &lock->old_oid, oid,
1686 logmsg, 0, &log_err)) {
1687 error("%s", log_err.buf);
1688 strbuf_release(&log_err);
1689 }
1690 }
1691 }
1692
1693 if (commit_ref(lock)) {
1694 strbuf_addf(err, "couldn't set '%s'", lock->ref_name);
1695 unlock_ref(lock);
1696 return -1;
1697 }
1698
1699 unlock_ref(lock);
1700 return 0;
1701 }
1702
1703 static int create_ref_symlink(struct ref_lock *lock, const char *target)
1704 {
1705 int ret = -1;
1706 #ifndef NO_SYMLINK_HEAD
1707 char *ref_path = get_locked_file_path(lock->lk);
1708 unlink(ref_path);
1709 ret = symlink(target, ref_path);
1710 free(ref_path);
1711
1712 if (ret)
1713 fprintf(stderr, "no symlink - falling back to symbolic ref\n");
1714 #endif
1715 return ret;
1716 }
1717
1718 static void update_symref_reflog(struct files_ref_store *refs,
1719 struct ref_lock *lock, const char *refname,
1720 const char *target, const char *logmsg)
1721 {
1722 struct strbuf err = STRBUF_INIT;
1723 struct object_id new_oid;
1724 if (logmsg &&
1725 !refs_read_ref_full(&refs->base, target,
1726 RESOLVE_REF_READING, new_oid.hash, NULL) &&
1727 files_log_ref_write(refs, refname, &lock->old_oid,
1728 &new_oid, logmsg, 0, &err)) {
1729 error("%s", err.buf);
1730 strbuf_release(&err);
1731 }
1732 }
1733
1734 static int create_symref_locked(struct files_ref_store *refs,
1735 struct ref_lock *lock, const char *refname,
1736 const char *target, const char *logmsg)
1737 {
1738 if (prefer_symlink_refs && !create_ref_symlink(lock, target)) {
1739 update_symref_reflog(refs, lock, refname, target, logmsg);
1740 return 0;
1741 }
1742
1743 if (!fdopen_lock_file(lock->lk, "w"))
1744 return error("unable to fdopen %s: %s",
1745 lock->lk->tempfile.filename.buf, strerror(errno));
1746
1747 update_symref_reflog(refs, lock, refname, target, logmsg);
1748
1749 /* no error check; commit_ref will check ferror */
1750 fprintf(lock->lk->tempfile.fp, "ref: %s\n", target);
1751 if (commit_ref(lock) < 0)
1752 return error("unable to write symref for %s: %s", refname,
1753 strerror(errno));
1754 return 0;
1755 }
1756
1757 static int files_create_symref(struct ref_store *ref_store,
1758 const char *refname, const char *target,
1759 const char *logmsg)
1760 {
1761 struct files_ref_store *refs =
1762 files_downcast(ref_store, REF_STORE_WRITE, "create_symref");
1763 struct strbuf err = STRBUF_INIT;
1764 struct ref_lock *lock;
1765 int ret;
1766
1767 lock = lock_ref_sha1_basic(refs, refname, NULL,
1768 NULL, NULL, REF_NODEREF, NULL,
1769 &err);
1770 if (!lock) {
1771 error("%s", err.buf);
1772 strbuf_release(&err);
1773 return -1;
1774 }
1775
1776 ret = create_symref_locked(refs, lock, refname, target, logmsg);
1777 unlock_ref(lock);
1778 return ret;
1779 }
1780
1781 static int files_reflog_exists(struct ref_store *ref_store,
1782 const char *refname)
1783 {
1784 struct files_ref_store *refs =
1785 files_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1786 struct strbuf sb = STRBUF_INIT;
1787 struct stat st;
1788 int ret;
1789
1790 files_reflog_path(refs, &sb, refname);
1791 ret = !lstat(sb.buf, &st) && S_ISREG(st.st_mode);
1792 strbuf_release(&sb);
1793 return ret;
1794 }
1795
1796 static int files_delete_reflog(struct ref_store *ref_store,
1797 const char *refname)
1798 {
1799 struct files_ref_store *refs =
1800 files_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
1801 struct strbuf sb = STRBUF_INIT;
1802 int ret;
1803
1804 files_reflog_path(refs, &sb, refname);
1805 ret = remove_path(sb.buf);
1806 strbuf_release(&sb);
1807 return ret;
1808 }
1809
1810 static int show_one_reflog_ent(struct strbuf *sb, each_reflog_ent_fn fn, void *cb_data)
1811 {
1812 struct object_id ooid, noid;
1813 char *email_end, *message;
1814 timestamp_t timestamp;
1815 int tz;
1816 const char *p = sb->buf;
1817
1818 /* old SP new SP name <email> SP time TAB msg LF */
1819 if (!sb->len || sb->buf[sb->len - 1] != '\n' ||
1820 parse_oid_hex(p, &ooid, &p) || *p++ != ' ' ||
1821 parse_oid_hex(p, &noid, &p) || *p++ != ' ' ||
1822 !(email_end = strchr(p, '>')) ||
1823 email_end[1] != ' ' ||
1824 !(timestamp = parse_timestamp(email_end + 2, &message, 10)) ||
1825 !message || message[0] != ' ' ||
1826 (message[1] != '+' && message[1] != '-') ||
1827 !isdigit(message[2]) || !isdigit(message[3]) ||
1828 !isdigit(message[4]) || !isdigit(message[5]))
1829 return 0; /* corrupt? */
1830 email_end[1] = '\0';
1831 tz = strtol(message + 1, NULL, 10);
1832 if (message[6] != '\t')
1833 message += 6;
1834 else
1835 message += 7;
1836 return fn(&ooid, &noid, p, timestamp, tz, message, cb_data);
1837 }
1838
1839 static char *find_beginning_of_line(char *bob, char *scan)
1840 {
1841 while (bob < scan && *(--scan) != '\n')
1842 ; /* keep scanning backwards */
1843 /*
1844 * Return either beginning of the buffer, or LF at the end of
1845 * the previous line.
1846 */
1847 return scan;
1848 }
1849
1850 static int files_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1851 const char *refname,
1852 each_reflog_ent_fn fn,
1853 void *cb_data)
1854 {
1855 struct files_ref_store *refs =
1856 files_downcast(ref_store, REF_STORE_READ,
1857 "for_each_reflog_ent_reverse");
1858 struct strbuf sb = STRBUF_INIT;
1859 FILE *logfp;
1860 long pos;
1861 int ret = 0, at_tail = 1;
1862
1863 files_reflog_path(refs, &sb, refname);
1864 logfp = fopen(sb.buf, "r");
1865 strbuf_release(&sb);
1866 if (!logfp)
1867 return -1;
1868
1869 /* Jump to the end */
1870 if (fseek(logfp, 0, SEEK_END) < 0)
1871 ret = error("cannot seek back reflog for %s: %s",
1872 refname, strerror(errno));
1873 pos = ftell(logfp);
1874 while (!ret && 0 < pos) {
1875 int cnt;
1876 size_t nread;
1877 char buf[BUFSIZ];
1878 char *endp, *scanp;
1879
1880 /* Fill next block from the end */
1881 cnt = (sizeof(buf) < pos) ? sizeof(buf) : pos;
1882 if (fseek(logfp, pos - cnt, SEEK_SET)) {
1883 ret = error("cannot seek back reflog for %s: %s",
1884 refname, strerror(errno));
1885 break;
1886 }
1887 nread = fread(buf, cnt, 1, logfp);
1888 if (nread != 1) {
1889 ret = error("cannot read %d bytes from reflog for %s: %s",
1890 cnt, refname, strerror(errno));
1891 break;
1892 }
1893 pos -= cnt;
1894
1895 scanp = endp = buf + cnt;
1896 if (at_tail && scanp[-1] == '\n')
1897 /* Looking at the final LF at the end of the file */
1898 scanp--;
1899 at_tail = 0;
1900
1901 while (buf < scanp) {
1902 /*
1903 * terminating LF of the previous line, or the beginning
1904 * of the buffer.
1905 */
1906 char *bp;
1907
1908 bp = find_beginning_of_line(buf, scanp);
1909
1910 if (*bp == '\n') {
1911 /*
1912 * The newline is the end of the previous line,
1913 * so we know we have complete line starting
1914 * at (bp + 1). Prefix it onto any prior data
1915 * we collected for the line and process it.
1916 */
1917 strbuf_splice(&sb, 0, 0, bp + 1, endp - (bp + 1));
1918 scanp = bp;
1919 endp = bp + 1;
1920 ret = show_one_reflog_ent(&sb, fn, cb_data);
1921 strbuf_reset(&sb);
1922 if (ret)
1923 break;
1924 } else if (!pos) {
1925 /*
1926 * We are at the start of the buffer, and the
1927 * start of the file; there is no previous
1928 * line, and we have everything for this one.
1929 * Process it, and we can end the loop.
1930 */
1931 strbuf_splice(&sb, 0, 0, buf, endp - buf);
1932 ret = show_one_reflog_ent(&sb, fn, cb_data);
1933 strbuf_reset(&sb);
1934 break;
1935 }
1936
1937 if (bp == buf) {
1938 /*
1939 * We are at the start of the buffer, and there
1940 * is more file to read backwards. Which means
1941 * we are in the middle of a line. Note that we
1942 * may get here even if *bp was a newline; that
1943 * just means we are at the exact end of the
1944 * previous line, rather than some spot in the
1945 * middle.
1946 *
1947 * Save away what we have to be combined with
1948 * the data from the next read.
1949 */
1950 strbuf_splice(&sb, 0, 0, buf, endp - buf);
1951 break;
1952 }
1953 }
1954
1955 }
1956 if (!ret && sb.len)
1957 die("BUG: reverse reflog parser had leftover data");
1958
1959 fclose(logfp);
1960 strbuf_release(&sb);
1961 return ret;
1962 }
1963
1964 static int files_for_each_reflog_ent(struct ref_store *ref_store,
1965 const char *refname,
1966 each_reflog_ent_fn fn, void *cb_data)
1967 {
1968 struct files_ref_store *refs =
1969 files_downcast(ref_store, REF_STORE_READ,
1970 "for_each_reflog_ent");
1971 FILE *logfp;
1972 struct strbuf sb = STRBUF_INIT;
1973 int ret = 0;
1974
1975 files_reflog_path(refs, &sb, refname);
1976 logfp = fopen(sb.buf, "r");
1977 strbuf_release(&sb);
1978 if (!logfp)
1979 return -1;
1980
1981 while (!ret && !strbuf_getwholeline(&sb, logfp, '\n'))
1982 ret = show_one_reflog_ent(&sb, fn, cb_data);
1983 fclose(logfp);
1984 strbuf_release(&sb);
1985 return ret;
1986 }
1987
1988 struct files_reflog_iterator {
1989 struct ref_iterator base;
1990
1991 struct ref_store *ref_store;
1992 struct dir_iterator *dir_iterator;
1993 struct object_id oid;
1994 };
1995
1996 static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator)
1997 {
1998 struct files_reflog_iterator *iter =
1999 (struct files_reflog_iterator *)ref_iterator;
2000 struct dir_iterator *diter = iter->dir_iterator;
2001 int ok;
2002
2003 while ((ok = dir_iterator_advance(diter)) == ITER_OK) {
2004 int flags;
2005
2006 if (!S_ISREG(diter->st.st_mode))
2007 continue;
2008 if (diter->basename[0] == '.')
2009 continue;
2010 if (ends_with(diter->basename, ".lock"))
2011 continue;
2012
2013 if (refs_read_ref_full(iter->ref_store,
2014 diter->relative_path, 0,
2015 iter->oid.hash, &flags)) {
2016 error("bad ref for %s", diter->path.buf);
2017 continue;
2018 }
2019
2020 iter->base.refname = diter->relative_path;
2021 iter->base.oid = &iter->oid;
2022 iter->base.flags = flags;
2023 return ITER_OK;
2024 }
2025
2026 iter->dir_iterator = NULL;
2027 if (ref_iterator_abort(ref_iterator) == ITER_ERROR)
2028 ok = ITER_ERROR;
2029 return ok;
2030 }
2031
2032 static int files_reflog_iterator_peel(struct ref_iterator *ref_iterator,
2033 struct object_id *peeled)
2034 {
2035 die("BUG: ref_iterator_peel() called for reflog_iterator");
2036 }
2037
2038 static int files_reflog_iterator_abort(struct ref_iterator *ref_iterator)
2039 {
2040 struct files_reflog_iterator *iter =
2041 (struct files_reflog_iterator *)ref_iterator;
2042 int ok = ITER_DONE;
2043
2044 if (iter->dir_iterator)
2045 ok = dir_iterator_abort(iter->dir_iterator);
2046
2047 base_ref_iterator_free(ref_iterator);
2048 return ok;
2049 }
2050
2051 static struct ref_iterator_vtable files_reflog_iterator_vtable = {
2052 files_reflog_iterator_advance,
2053 files_reflog_iterator_peel,
2054 files_reflog_iterator_abort
2055 };
2056
2057 static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)
2058 {
2059 struct files_ref_store *refs =
2060 files_downcast(ref_store, REF_STORE_READ,
2061 "reflog_iterator_begin");
2062 struct files_reflog_iterator *iter = xcalloc(1, sizeof(*iter));
2063 struct ref_iterator *ref_iterator = &iter->base;
2064 struct strbuf sb = STRBUF_INIT;
2065
2066 base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);
2067 files_reflog_path(refs, &sb, NULL);
2068 iter->dir_iterator = dir_iterator_begin(sb.buf);
2069 iter->ref_store = ref_store;
2070 strbuf_release(&sb);
2071 return ref_iterator;
2072 }
2073
2074 /*
2075 * If update is a direct update of head_ref (the reference pointed to
2076 * by HEAD), then add an extra REF_LOG_ONLY update for HEAD.
2077 */
2078 static int split_head_update(struct ref_update *update,
2079 struct ref_transaction *transaction,
2080 const char *head_ref,
2081 struct string_list *affected_refnames,
2082 struct strbuf *err)
2083 {
2084 struct string_list_item *item;
2085 struct ref_update *new_update;
2086
2087 if ((update->flags & REF_LOG_ONLY) ||
2088 (update->flags & REF_ISPRUNING) ||
2089 (update->flags & REF_UPDATE_VIA_HEAD))
2090 return 0;
2091
2092 if (strcmp(update->refname, head_ref))
2093 return 0;
2094
2095 /*
2096 * First make sure that HEAD is not already in the
2097 * transaction. This insertion is O(N) in the transaction
2098 * size, but it happens at most once per transaction.
2099 */
2100 item = string_list_insert(affected_refnames, "HEAD");
2101 if (item->util) {
2102 /* An entry already existed */
2103 strbuf_addf(err,
2104 "multiple updates for 'HEAD' (including one "
2105 "via its referent '%s') are not allowed",
2106 update->refname);
2107 return TRANSACTION_NAME_CONFLICT;
2108 }
2109
2110 new_update = ref_transaction_add_update(
2111 transaction, "HEAD",
2112 update->flags | REF_LOG_ONLY | REF_NODEREF,
2113 update->new_oid.hash, update->old_oid.hash,
2114 update->msg);
2115
2116 item->util = new_update;
2117
2118 return 0;
2119 }
2120
2121 /*
2122 * update is for a symref that points at referent and doesn't have
2123 * REF_NODEREF set. Split it into two updates:
2124 * - The original update, but with REF_LOG_ONLY and REF_NODEREF set
2125 * - A new, separate update for the referent reference
2126 * Note that the new update will itself be subject to splitting when
2127 * the iteration gets to it.
2128 */
2129 static int split_symref_update(struct files_ref_store *refs,
2130 struct ref_update *update,
2131 const char *referent,
2132 struct ref_transaction *transaction,
2133 struct string_list *affected_refnames,
2134 struct strbuf *err)
2135 {
2136 struct string_list_item *item;
2137 struct ref_update *new_update;
2138 unsigned int new_flags;
2139
2140 /*
2141 * First make sure that referent is not already in the
2142 * transaction. This insertion is O(N) in the transaction
2143 * size, but it happens at most once per symref in a
2144 * transaction.
2145 */
2146 item = string_list_insert(affected_refnames, referent);
2147 if (item->util) {
2148 /* An entry already existed */
2149 strbuf_addf(err,
2150 "multiple updates for '%s' (including one "
2151 "via symref '%s') are not allowed",
2152 referent, update->refname);
2153 return TRANSACTION_NAME_CONFLICT;
2154 }
2155
2156 new_flags = update->flags;
2157 if (!strcmp(update->refname, "HEAD")) {
2158 /*
2159 * Record that the new update came via HEAD, so that
2160 * when we process it, split_head_update() doesn't try
2161 * to add another reflog update for HEAD. Note that
2162 * this bit will be propagated if the new_update
2163 * itself needs to be split.
2164 */
2165 new_flags |= REF_UPDATE_VIA_HEAD;
2166 }
2167
2168 new_update = ref_transaction_add_update(
2169 transaction, referent, new_flags,
2170 update->new_oid.hash, update->old_oid.hash,
2171 update->msg);
2172
2173 new_update->parent_update = update;
2174
2175 /*
2176 * Change the symbolic ref update to log only. Also, it
2177 * doesn't need to check its old SHA-1 value, as that will be
2178 * done when new_update is processed.
2179 */
2180 update->flags |= REF_LOG_ONLY | REF_NODEREF;
2181 update->flags &= ~REF_HAVE_OLD;
2182
2183 item->util = new_update;
2184
2185 return 0;
2186 }
2187
2188 /*
2189 * Return the refname under which update was originally requested.
2190 */
2191 static const char *original_update_refname(struct ref_update *update)
2192 {
2193 while (update->parent_update)
2194 update = update->parent_update;
2195
2196 return update->refname;
2197 }
2198
2199 /*
2200 * Check whether the REF_HAVE_OLD and old_oid values stored in update
2201 * are consistent with oid, which is the reference's current value. If
2202 * everything is OK, return 0; otherwise, write an error message to
2203 * err and return -1.
2204 */
2205 static int check_old_oid(struct ref_update *update, struct object_id *oid,
2206 struct strbuf *err)
2207 {
2208 if (!(update->flags & REF_HAVE_OLD) ||
2209 !oidcmp(oid, &update->old_oid))
2210 return 0;
2211
2212 if (is_null_oid(&update->old_oid))
2213 strbuf_addf(err, "cannot lock ref '%s': "
2214 "reference already exists",
2215 original_update_refname(update));
2216 else if (is_null_oid(oid))
2217 strbuf_addf(err, "cannot lock ref '%s': "
2218 "reference is missing but expected %s",
2219 original_update_refname(update),
2220 oid_to_hex(&update->old_oid));
2221 else
2222 strbuf_addf(err, "cannot lock ref '%s': "
2223 "is at %s but expected %s",
2224 original_update_refname(update),
2225 oid_to_hex(oid),
2226 oid_to_hex(&update->old_oid));
2227
2228 return -1;
2229 }
2230
2231 /*
2232 * Prepare for carrying out update:
2233 * - Lock the reference referred to by update.
2234 * - Read the reference under lock.
2235 * - Check that its old SHA-1 value (if specified) is correct, and in
2236 * any case record it in update->lock->old_oid for later use when
2237 * writing the reflog.
2238 * - If it is a symref update without REF_NODEREF, split it up into a
2239 * REF_LOG_ONLY update of the symref and add a separate update for
2240 * the referent to transaction.
2241 * - If it is an update of head_ref, add a corresponding REF_LOG_ONLY
2242 * update of HEAD.
2243 */
2244 static int lock_ref_for_update(struct files_ref_store *refs,
2245 struct ref_update *update,
2246 struct ref_transaction *transaction,
2247 const char *head_ref,
2248 struct string_list *affected_refnames,
2249 struct strbuf *err)
2250 {
2251 struct strbuf referent = STRBUF_INIT;
2252 int mustexist = (update->flags & REF_HAVE_OLD) &&
2253 !is_null_oid(&update->old_oid);
2254 int ret;
2255 struct ref_lock *lock;
2256
2257 files_assert_main_repository(refs, "lock_ref_for_update");
2258
2259 if ((update->flags & REF_HAVE_NEW) && is_null_oid(&update->new_oid))
2260 update->flags |= REF_DELETING;
2261
2262 if (head_ref) {
2263 ret = split_head_update(update, transaction, head_ref,
2264 affected_refnames, err);
2265 if (ret)
2266 return ret;
2267 }
2268
2269 ret = lock_raw_ref(refs, update->refname, mustexist,
2270 affected_refnames, NULL,
2271 &lock, &referent,
2272 &update->type, err);
2273 if (ret) {
2274 char *reason;
2275
2276 reason = strbuf_detach(err, NULL);
2277 strbuf_addf(err, "cannot lock ref '%s': %s",
2278 original_update_refname(update), reason);
2279 free(reason);
2280 return ret;
2281 }
2282
2283 update->backend_data = lock;
2284
2285 if (update->type & REF_ISSYMREF) {
2286 if (update->flags & REF_NODEREF) {
2287 /*
2288 * We won't be reading the referent as part of
2289 * the transaction, so we have to read it here
2290 * to record and possibly check old_sha1:
2291 */
2292 if (refs_read_ref_full(&refs->base,
2293 referent.buf, 0,
2294 lock->old_oid.hash, NULL)) {
2295 if (update->flags & REF_HAVE_OLD) {
2296 strbuf_addf(err, "cannot lock ref '%s': "
2297 "error reading reference",
2298 original_update_refname(update));
2299 return -1;
2300 }
2301 } else if (check_old_oid(update, &lock->old_oid, err)) {
2302 return TRANSACTION_GENERIC_ERROR;
2303 }
2304 } else {
2305 /*
2306 * Create a new update for the reference this
2307 * symref is pointing at. Also, we will record
2308 * and verify old_sha1 for this update as part
2309 * of processing the split-off update, so we
2310 * don't have to do it here.
2311 */
2312 ret = split_symref_update(refs, update,
2313 referent.buf, transaction,
2314 affected_refnames, err);
2315 if (ret)
2316 return ret;
2317 }
2318 } else {
2319 struct ref_update *parent_update;
2320
2321 if (check_old_oid(update, &lock->old_oid, err))
2322 return TRANSACTION_GENERIC_ERROR;
2323
2324 /*
2325 * If this update is happening indirectly because of a
2326 * symref update, record the old SHA-1 in the parent
2327 * update:
2328 */
2329 for (parent_update = update->parent_update;
2330 parent_update;
2331 parent_update = parent_update->parent_update) {
2332 struct ref_lock *parent_lock = parent_update->backend_data;
2333 oidcpy(&parent_lock->old_oid, &lock->old_oid);
2334 }
2335 }
2336
2337 if ((update->flags & REF_HAVE_NEW) &&
2338 !(update->flags & REF_DELETING) &&
2339 !(update->flags & REF_LOG_ONLY)) {
2340 if (!(update->type & REF_ISSYMREF) &&
2341 !oidcmp(&lock->old_oid, &update->new_oid)) {
2342 /*
2343 * The reference already has the desired
2344 * value, so we don't need to write it.
2345 */
2346 } else if (write_ref_to_lockfile(lock, &update->new_oid,
2347 err)) {
2348 char *write_err = strbuf_detach(err, NULL);
2349
2350 /*
2351 * The lock was freed upon failure of
2352 * write_ref_to_lockfile():
2353 */
2354 update->backend_data = NULL;
2355 strbuf_addf(err,
2356 "cannot update ref '%s': %s",
2357 update->refname, write_err);
2358 free(write_err);
2359 return TRANSACTION_GENERIC_ERROR;
2360 } else {
2361 update->flags |= REF_NEEDS_COMMIT;
2362 }
2363 }
2364 if (!(update->flags & REF_NEEDS_COMMIT)) {
2365 /*
2366 * We didn't call write_ref_to_lockfile(), so
2367 * the lockfile is still open. Close it to
2368 * free up the file descriptor:
2369 */
2370 if (close_ref(lock)) {
2371 strbuf_addf(err, "couldn't close '%s.lock'",
2372 update->refname);
2373 return TRANSACTION_GENERIC_ERROR;
2374 }
2375 }
2376 return 0;
2377 }
2378
2379 /*
2380 * Unlock any references in `transaction` that are still locked, and
2381 * mark the transaction closed.
2382 */
2383 static void files_transaction_cleanup(struct ref_transaction *transaction)
2384 {
2385 size_t i;
2386
2387 for (i = 0; i < transaction->nr; i++) {
2388 struct ref_update *update = transaction->updates[i];
2389 struct ref_lock *lock = update->backend_data;
2390
2391 if (lock) {
2392 unlock_ref(lock);
2393 update->backend_data = NULL;
2394 }
2395 }
2396
2397 transaction->state = REF_TRANSACTION_CLOSED;
2398 }
2399
2400 static int files_transaction_prepare(struct ref_store *ref_store,
2401 struct ref_transaction *transaction,
2402 struct strbuf *err)
2403 {
2404 struct files_ref_store *refs =
2405 files_downcast(ref_store, REF_STORE_WRITE,
2406 "ref_transaction_prepare");
2407 size_t i;
2408 int ret = 0;
2409 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
2410 char *head_ref = NULL;
2411 int head_type;
2412 struct object_id head_oid;
2413
2414 assert(err);
2415
2416 if (!transaction->nr)
2417 goto cleanup;
2418
2419 /*
2420 * Fail if a refname appears more than once in the
2421 * transaction. (If we end up splitting up any updates using
2422 * split_symref_update() or split_head_update(), those
2423 * functions will check that the new updates don't have the
2424 * same refname as any existing ones.)
2425 */
2426 for (i = 0; i < transaction->nr; i++) {
2427 struct ref_update *update = transaction->updates[i];
2428 struct string_list_item *item =
2429 string_list_append(&affected_refnames, update->refname);
2430
2431 /*
2432 * We store a pointer to update in item->util, but at
2433 * the moment we never use the value of this field
2434 * except to check whether it is non-NULL.
2435 */
2436 item->util = update;
2437 }
2438 string_list_sort(&affected_refnames);
2439 if (ref_update_reject_duplicates(&affected_refnames, err)) {
2440 ret = TRANSACTION_GENERIC_ERROR;
2441 goto cleanup;
2442 }
2443
2444 /*
2445 * Special hack: If a branch is updated directly and HEAD
2446 * points to it (may happen on the remote side of a push
2447 * for example) then logically the HEAD reflog should be
2448 * updated too.
2449 *
2450 * A generic solution would require reverse symref lookups,
2451 * but finding all symrefs pointing to a given branch would be
2452 * rather costly for this rare event (the direct update of a
2453 * branch) to be worth it. So let's cheat and check with HEAD
2454 * only, which should cover 99% of all usage scenarios (even
2455 * 100% of the default ones).
2456 *
2457 * So if HEAD is a symbolic reference, then record the name of
2458 * the reference that it points to. If we see an update of
2459 * head_ref within the transaction, then split_head_update()
2460 * arranges for the reflog of HEAD to be updated, too.
2461 */
2462 head_ref = refs_resolve_refdup(ref_store, "HEAD",
2463 RESOLVE_REF_NO_RECURSE,
2464 head_oid.hash, &head_type);
2465
2466 if (head_ref && !(head_type & REF_ISSYMREF)) {
2467 free(head_ref);
2468 head_ref = NULL;
2469 }
2470
2471 /*
2472 * Acquire all locks, verify old values if provided, check
2473 * that new values are valid, and write new values to the
2474 * lockfiles, ready to be activated. Only keep one lockfile
2475 * open at a time to avoid running out of file descriptors.
2476 * Note that lock_ref_for_update() might append more updates
2477 * to the transaction.
2478 */
2479 for (i = 0; i < transaction->nr; i++) {
2480 struct ref_update *update = transaction->updates[i];
2481
2482 ret = lock_ref_for_update(refs, update, transaction,
2483 head_ref, &affected_refnames, err);
2484 if (ret)
2485 break;
2486 }
2487
2488 cleanup:
2489 free(head_ref);
2490 string_list_clear(&affected_refnames, 0);
2491
2492 if (ret)
2493 files_transaction_cleanup(transaction);
2494 else
2495 transaction->state = REF_TRANSACTION_PREPARED;
2496
2497 return ret;
2498 }
2499
2500 static int files_transaction_finish(struct ref_store *ref_store,
2501 struct ref_transaction *transaction,
2502 struct strbuf *err)
2503 {
2504 struct files_ref_store *refs =
2505 files_downcast(ref_store, 0, "ref_transaction_finish");
2506 size_t i;
2507 int ret = 0;
2508 struct string_list refs_to_delete = STRING_LIST_INIT_NODUP;
2509 struct string_list_item *ref_to_delete;
2510 struct strbuf sb = STRBUF_INIT;
2511
2512 assert(err);
2513
2514 if (!transaction->nr) {
2515 transaction->state = REF_TRANSACTION_CLOSED;
2516 return 0;
2517 }
2518
2519 /* Perform updates first so live commits remain referenced */
2520 for (i = 0; i < transaction->nr; i++) {
2521 struct ref_update *update = transaction->updates[i];
2522 struct ref_lock *lock = update->backend_data;
2523
2524 if (update->flags & REF_NEEDS_COMMIT ||
2525 update->flags & REF_LOG_ONLY) {
2526 if (files_log_ref_write(refs,
2527 lock->ref_name,
2528 &lock->old_oid,
2529 &update->new_oid,
2530 update->msg, update->flags,
2531 err)) {
2532 char *old_msg = strbuf_detach(err, NULL);
2533
2534 strbuf_addf(err, "cannot update the ref '%s': %s",
2535 lock->ref_name, old_msg);
2536 free(old_msg);
2537 unlock_ref(lock);
2538 update->backend_data = NULL;
2539 ret = TRANSACTION_GENERIC_ERROR;
2540 goto cleanup;
2541 }
2542 }
2543 if (update->flags & REF_NEEDS_COMMIT) {
2544 clear_loose_ref_cache(refs);
2545 if (commit_ref(lock)) {
2546 strbuf_addf(err, "couldn't set '%s'", lock->ref_name);
2547 unlock_ref(lock);
2548 update->backend_data = NULL;
2549 ret = TRANSACTION_GENERIC_ERROR;
2550 goto cleanup;
2551 }
2552 }
2553 }
2554 /* Perform deletes now that updates are safely completed */
2555 for (i = 0; i < transaction->nr; i++) {
2556 struct ref_update *update = transaction->updates[i];
2557 struct ref_lock *lock = update->backend_data;
2558
2559 if (update->flags & REF_DELETING &&
2560 !(update->flags & REF_LOG_ONLY)) {
2561 if (!(update->type & REF_ISPACKED) ||
2562 update->type & REF_ISSYMREF) {
2563 /* It is a loose reference. */
2564 strbuf_reset(&sb);
2565 files_ref_path(refs, &sb, lock->ref_name);
2566 if (unlink_or_msg(sb.buf, err)) {
2567 ret = TRANSACTION_GENERIC_ERROR;
2568 goto cleanup;
2569 }
2570 update->flags |= REF_DELETED_LOOSE;
2571 }
2572
2573 if (!(update->flags & REF_ISPRUNING))
2574 string_list_append(&refs_to_delete,
2575 lock->ref_name);
2576 }
2577 }
2578
2579 if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
2580 ret = TRANSACTION_GENERIC_ERROR;
2581 goto cleanup;
2582 }
2583
2584 if (repack_without_refs(refs->packed_ref_store, &refs_to_delete, err)) {
2585 ret = TRANSACTION_GENERIC_ERROR;
2586 packed_refs_unlock(refs->packed_ref_store);
2587 goto cleanup;
2588 }
2589
2590 packed_refs_unlock(refs->packed_ref_store);
2591
2592 /* Delete the reflogs of any references that were deleted: */
2593 for_each_string_list_item(ref_to_delete, &refs_to_delete) {
2594 strbuf_reset(&sb);
2595 files_reflog_path(refs, &sb, ref_to_delete->string);
2596 if (!unlink_or_warn(sb.buf))
2597 try_remove_empty_parents(refs, ref_to_delete->string,
2598 REMOVE_EMPTY_PARENTS_REFLOG);
2599 }
2600
2601 clear_loose_ref_cache(refs);
2602
2603 cleanup:
2604 files_transaction_cleanup(transaction);
2605
2606 for (i = 0; i < transaction->nr; i++) {
2607 struct ref_update *update = transaction->updates[i];
2608
2609 if (update->flags & REF_DELETED_LOOSE) {
2610 /*
2611 * The loose reference was deleted. Delete any
2612 * empty parent directories. (Note that this
2613 * can only work because we have already
2614 * removed the lockfile.)
2615 */
2616 try_remove_empty_parents(refs, update->refname,
2617 REMOVE_EMPTY_PARENTS_REF);
2618 }
2619 }
2620
2621 strbuf_release(&sb);
2622 string_list_clear(&refs_to_delete, 0);
2623 return ret;
2624 }
2625
2626 static int files_transaction_abort(struct ref_store *ref_store,
2627 struct ref_transaction *transaction,
2628 struct strbuf *err)
2629 {
2630 files_transaction_cleanup(transaction);
2631 return 0;
2632 }
2633
2634 static int ref_present(const char *refname,
2635 const struct object_id *oid, int flags, void *cb_data)
2636 {
2637 struct string_list *affected_refnames = cb_data;
2638
2639 return string_list_has_string(affected_refnames, refname);
2640 }
2641
2642 static int files_initial_transaction_commit(struct ref_store *ref_store,
2643 struct ref_transaction *transaction,
2644 struct strbuf *err)
2645 {
2646 struct files_ref_store *refs =
2647 files_downcast(ref_store, REF_STORE_WRITE,
2648 "initial_ref_transaction_commit");
2649 size_t i;
2650 int ret = 0;
2651 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
2652
2653 assert(err);
2654
2655 if (transaction->state != REF_TRANSACTION_OPEN)
2656 die("BUG: commit called for transaction that is not open");
2657
2658 /* Fail if a refname appears more than once in the transaction: */
2659 for (i = 0; i < transaction->nr; i++)
2660 string_list_append(&affected_refnames,
2661 transaction->updates[i]->refname);
2662 string_list_sort(&affected_refnames);
2663 if (ref_update_reject_duplicates(&affected_refnames, err)) {
2664 ret = TRANSACTION_GENERIC_ERROR;
2665 goto cleanup;
2666 }
2667
2668 /*
2669 * It's really undefined to call this function in an active
2670 * repository or when there are existing references: we are
2671 * only locking and changing packed-refs, so (1) any
2672 * simultaneous processes might try to change a reference at
2673 * the same time we do, and (2) any existing loose versions of
2674 * the references that we are setting would have precedence
2675 * over our values. But some remote helpers create the remote
2676 * "HEAD" and "master" branches before calling this function,
2677 * so here we really only check that none of the references
2678 * that we are creating already exists.
2679 */
2680 if (refs_for_each_rawref(&refs->base, ref_present,
2681 &affected_refnames))
2682 die("BUG: initial ref transaction called with existing refs");
2683
2684 for (i = 0; i < transaction->nr; i++) {
2685 struct ref_update *update = transaction->updates[i];
2686
2687 if ((update->flags & REF_HAVE_OLD) &&
2688 !is_null_oid(&update->old_oid))
2689 die("BUG: initial ref transaction with old_sha1 set");
2690 if (refs_verify_refname_available(&refs->base, update->refname,
2691 &affected_refnames, NULL,
2692 err)) {
2693 ret = TRANSACTION_NAME_CONFLICT;
2694 goto cleanup;
2695 }
2696 }
2697
2698 if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
2699 ret = TRANSACTION_GENERIC_ERROR;
2700 goto cleanup;
2701 }
2702
2703 for (i = 0; i < transaction->nr; i++) {
2704 struct ref_update *update = transaction->updates[i];
2705
2706 if ((update->flags & REF_HAVE_NEW) &&
2707 !is_null_oid(&update->new_oid))
2708 add_packed_ref(refs->packed_ref_store, update->refname,
2709 &update->new_oid);
2710 }
2711
2712 if (commit_packed_refs(refs->packed_ref_store, err)) {
2713 ret = TRANSACTION_GENERIC_ERROR;
2714 goto cleanup;
2715 }
2716
2717 cleanup:
2718 packed_refs_unlock(refs->packed_ref_store);
2719 transaction->state = REF_TRANSACTION_CLOSED;
2720 string_list_clear(&affected_refnames, 0);
2721 return ret;
2722 }
2723
2724 struct expire_reflog_cb {
2725 unsigned int flags;
2726 reflog_expiry_should_prune_fn *should_prune_fn;
2727 void *policy_cb;
2728 FILE *newlog;
2729 struct object_id last_kept_oid;
2730 };
2731
2732 static int expire_reflog_ent(struct object_id *ooid, struct object_id *noid,
2733 const char *email, timestamp_t timestamp, int tz,
2734 const char *message, void *cb_data)
2735 {
2736 struct expire_reflog_cb *cb = cb_data;
2737 struct expire_reflog_policy_cb *policy_cb = cb->policy_cb;
2738
2739 if (cb->flags & EXPIRE_REFLOGS_REWRITE)
2740 ooid = &cb->last_kept_oid;
2741
2742 if ((*cb->should_prune_fn)(ooid, noid, email, timestamp, tz,
2743 message, policy_cb)) {
2744 if (!cb->newlog)
2745 printf("would prune %s", message);
2746 else if (cb->flags & EXPIRE_REFLOGS_VERBOSE)
2747 printf("prune %s", message);
2748 } else {
2749 if (cb->newlog) {
2750 fprintf(cb->newlog, "%s %s %s %"PRItime" %+05d\t%s",
2751 oid_to_hex(ooid), oid_to_hex(noid),
2752 email, timestamp, tz, message);
2753 oidcpy(&cb->last_kept_oid, noid);
2754 }
2755 if (cb->flags & EXPIRE_REFLOGS_VERBOSE)
2756 printf("keep %s", message);
2757 }
2758 return 0;
2759 }
2760
2761 static int files_reflog_expire(struct ref_store *ref_store,
2762 const char *refname, const unsigned char *sha1,
2763 unsigned int flags,
2764 reflog_expiry_prepare_fn prepare_fn,
2765 reflog_expiry_should_prune_fn should_prune_fn,
2766 reflog_expiry_cleanup_fn cleanup_fn,
2767 void *policy_cb_data)
2768 {
2769 struct files_ref_store *refs =
2770 files_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2771 static struct lock_file reflog_lock;
2772 struct expire_reflog_cb cb;
2773 struct ref_lock *lock;
2774 struct strbuf log_file_sb = STRBUF_INIT;
2775 char *log_file;
2776 int status = 0;
2777 int type;
2778 struct strbuf err = STRBUF_INIT;
2779 struct object_id oid;
2780
2781 memset(&cb, 0, sizeof(cb));
2782 cb.flags = flags;
2783 cb.policy_cb = policy_cb_data;
2784 cb.should_prune_fn = should_prune_fn;
2785
2786 /*
2787 * The reflog file is locked by holding the lock on the
2788 * reference itself, plus we might need to update the
2789 * reference if --updateref was specified:
2790 */
2791 lock = lock_ref_sha1_basic(refs, refname, sha1,
2792 NULL, NULL, REF_NODEREF,
2793 &type, &err);
2794 if (!lock) {
2795 error("cannot lock ref '%s': %s", refname, err.buf);
2796 strbuf_release(&err);
2797 return -1;
2798 }
2799 if (!refs_reflog_exists(ref_store, refname)) {
2800 unlock_ref(lock);
2801 return 0;
2802 }
2803
2804 files_reflog_path(refs, &log_file_sb, refname);
2805 log_file = strbuf_detach(&log_file_sb, NULL);
2806 if (!(flags & EXPIRE_REFLOGS_DRY_RUN)) {
2807 /*
2808 * Even though holding $GIT_DIR/logs/$reflog.lock has
2809 * no locking implications, we use the lock_file
2810 * machinery here anyway because it does a lot of the
2811 * work we need, including cleaning up if the program
2812 * exits unexpectedly.
2813 */
2814 if (hold_lock_file_for_update(&reflog_lock, log_file, 0) < 0) {
2815 struct strbuf err = STRBUF_INIT;
2816 unable_to_lock_message(log_file, errno, &err);
2817 error("%s", err.buf);
2818 strbuf_release(&err);
2819 goto failure;
2820 }
2821 cb.newlog = fdopen_lock_file(&reflog_lock, "w");
2822 if (!cb.newlog) {
2823 error("cannot fdopen %s (%s)",
2824 get_lock_file_path(&reflog_lock), strerror(errno));
2825 goto failure;
2826 }
2827 }
2828
2829 hashcpy(oid.hash, sha1);
2830
2831 (*prepare_fn)(refname, &oid, cb.policy_cb);
2832 refs_for_each_reflog_ent(ref_store, refname, expire_reflog_ent, &cb);
2833 (*cleanup_fn)(cb.policy_cb);
2834
2835 if (!(flags & EXPIRE_REFLOGS_DRY_RUN)) {
2836 /*
2837 * It doesn't make sense to adjust a reference pointed
2838 * to by a symbolic ref based on expiring entries in
2839 * the symbolic reference's reflog. Nor can we update
2840 * a reference if there are no remaining reflog
2841 * entries.
2842 */
2843 int update = (flags & EXPIRE_REFLOGS_UPDATE_REF) &&
2844 !(type & REF_ISSYMREF) &&
2845 !is_null_oid(&cb.last_kept_oid);
2846
2847 if (close_lock_file(&reflog_lock)) {
2848 status |= error("couldn't write %s: %s", log_file,
2849 strerror(errno));
2850 } else if (update &&
2851 (write_in_full(get_lock_file_fd(lock->lk),
2852 oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
2853 write_str_in_full(get_lock_file_fd(lock->lk), "\n") != 1 ||
2854 close_ref(lock) < 0)) {
2855 status |= error("couldn't write %s",
2856 get_lock_file_path(lock->lk));
2857 rollback_lock_file(&reflog_lock);
2858 } else if (commit_lock_file(&reflog_lock)) {
2859 status |= error("unable to write reflog '%s' (%s)",
2860 log_file, strerror(errno));
2861 } else if (update && commit_ref(lock)) {
2862 status |= error("couldn't set %s", lock->ref_name);
2863 }
2864 }
2865 free(log_file);
2866 unlock_ref(lock);
2867 return status;
2868
2869 failure:
2870 rollback_lock_file(&reflog_lock);
2871 free(log_file);
2872 unlock_ref(lock);
2873 return -1;
2874 }
2875
2876 static int files_init_db(struct ref_store *ref_store, struct strbuf *err)
2877 {
2878 struct files_ref_store *refs =
2879 files_downcast(ref_store, REF_STORE_WRITE, "init_db");
2880 struct strbuf sb = STRBUF_INIT;
2881
2882 /*
2883 * Create .git/refs/{heads,tags}
2884 */
2885 files_ref_path(refs, &sb, "refs/heads");
2886 safe_create_dir(sb.buf, 1);
2887
2888 strbuf_reset(&sb);
2889 files_ref_path(refs, &sb, "refs/tags");
2890 safe_create_dir(sb.buf, 1);
2891
2892 strbuf_release(&sb);
2893 return 0;
2894 }
2895
2896 struct ref_storage_be refs_be_files = {
2897 NULL,
2898 "files",
2899 files_ref_store_create,
2900 files_init_db,
2901 files_transaction_prepare,
2902 files_transaction_finish,
2903 files_transaction_abort,
2904 files_initial_transaction_commit,
2905
2906 files_pack_refs,
2907 files_peel_ref,
2908 files_create_symref,
2909 files_delete_refs,
2910 files_rename_ref,
2911
2912 files_ref_iterator_begin,
2913 files_read_raw_ref,
2914
2915 files_reflog_iterator_begin,
2916 files_for_each_reflog_ent,
2917 files_for_each_reflog_ent_reverse,
2918 files_reflog_exists,
2919 files_create_reflog,
2920 files_delete_reflog,
2921 files_reflog_expire
2922 };