]> git.ipfire.org Git - thirdparty/git.git/blob - refs/files-backend.c
Merge branch 'tc/curl-with-backports'
[thirdparty/git.git] / refs / files-backend.c
1 #include "../cache.h"
2 #include "../config.h"
3 #include "../refs.h"
4 #include "refs-internal.h"
5 #include "ref-cache.h"
6 #include "packed-backend.h"
7 #include "../iterator.h"
8 #include "../dir-iterator.h"
9 #include "../lockfile.h"
10 #include "../object.h"
11 #include "../dir.h"
12
13 struct ref_lock {
14 char *ref_name;
15 struct lock_file *lk;
16 struct object_id old_oid;
17 };
18
19 /*
20 * Future: need to be in "struct repository"
21 * when doing a full libification.
22 */
23 struct files_ref_store {
24 struct ref_store base;
25 unsigned int store_flags;
26
27 char *gitdir;
28 char *gitcommondir;
29
30 struct ref_cache *loose;
31
32 struct ref_store *packed_ref_store;
33 };
34
35 static void clear_loose_ref_cache(struct files_ref_store *refs)
36 {
37 if (refs->loose) {
38 free_ref_cache(refs->loose);
39 refs->loose = NULL;
40 }
41 }
42
43 /*
44 * Create a new submodule ref cache and add it to the internal
45 * set of caches.
46 */
47 static struct ref_store *files_ref_store_create(const char *gitdir,
48 unsigned int flags)
49 {
50 struct files_ref_store *refs = xcalloc(1, sizeof(*refs));
51 struct ref_store *ref_store = (struct ref_store *)refs;
52 struct strbuf sb = STRBUF_INIT;
53
54 base_ref_store_init(ref_store, &refs_be_files);
55 refs->store_flags = flags;
56
57 refs->gitdir = xstrdup(gitdir);
58 get_common_dir_noenv(&sb, gitdir);
59 refs->gitcommondir = strbuf_detach(&sb, NULL);
60 strbuf_addf(&sb, "%s/packed-refs", refs->gitcommondir);
61 refs->packed_ref_store = packed_ref_store_create(sb.buf, flags);
62 strbuf_release(&sb);
63
64 return ref_store;
65 }
66
67 /*
68 * Die if refs is not the main ref store. caller is used in any
69 * necessary error messages.
70 */
71 static void files_assert_main_repository(struct files_ref_store *refs,
72 const char *caller)
73 {
74 if (refs->store_flags & REF_STORE_MAIN)
75 return;
76
77 die("BUG: operation %s only allowed for main ref store", caller);
78 }
79
80 /*
81 * Downcast ref_store to files_ref_store. Die if ref_store is not a
82 * files_ref_store. required_flags is compared with ref_store's
83 * store_flags to ensure the ref_store has all required capabilities.
84 * "caller" is used in any necessary error messages.
85 */
86 static struct files_ref_store *files_downcast(struct ref_store *ref_store,
87 unsigned int required_flags,
88 const char *caller)
89 {
90 struct files_ref_store *refs;
91
92 if (ref_store->be != &refs_be_files)
93 die("BUG: ref_store is type \"%s\" not \"files\" in %s",
94 ref_store->be->name, caller);
95
96 refs = (struct files_ref_store *)ref_store;
97
98 if ((refs->store_flags & required_flags) != required_flags)
99 die("BUG: operation %s requires abilities 0x%x, but only have 0x%x",
100 caller, required_flags, refs->store_flags);
101
102 return refs;
103 }
104
105 static void files_reflog_path(struct files_ref_store *refs,
106 struct strbuf *sb,
107 const char *refname)
108 {
109 if (!refname) {
110 /*
111 * FIXME: of course this is wrong in multi worktree
112 * setting. To be fixed real soon.
113 */
114 strbuf_addf(sb, "%s/logs", refs->gitcommondir);
115 return;
116 }
117
118 switch (ref_type(refname)) {
119 case REF_TYPE_PER_WORKTREE:
120 case REF_TYPE_PSEUDOREF:
121 strbuf_addf(sb, "%s/logs/%s", refs->gitdir, refname);
122 break;
123 case REF_TYPE_NORMAL:
124 strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir, refname);
125 break;
126 default:
127 die("BUG: unknown ref type %d of ref %s",
128 ref_type(refname), refname);
129 }
130 }
131
132 static void files_ref_path(struct files_ref_store *refs,
133 struct strbuf *sb,
134 const char *refname)
135 {
136 switch (ref_type(refname)) {
137 case REF_TYPE_PER_WORKTREE:
138 case REF_TYPE_PSEUDOREF:
139 strbuf_addf(sb, "%s/%s", refs->gitdir, refname);
140 break;
141 case REF_TYPE_NORMAL:
142 strbuf_addf(sb, "%s/%s", refs->gitcommondir, refname);
143 break;
144 default:
145 die("BUG: unknown ref type %d of ref %s",
146 ref_type(refname), refname);
147 }
148 }
149
150 /*
151 * Read the loose references from the namespace dirname into dir
152 * (without recursing). dirname must end with '/'. dir must be the
153 * directory entry corresponding to dirname.
154 */
155 static void loose_fill_ref_dir(struct ref_store *ref_store,
156 struct ref_dir *dir, const char *dirname)
157 {
158 struct files_ref_store *refs =
159 files_downcast(ref_store, REF_STORE_READ, "fill_ref_dir");
160 DIR *d;
161 struct dirent *de;
162 int dirnamelen = strlen(dirname);
163 struct strbuf refname;
164 struct strbuf path = STRBUF_INIT;
165 size_t path_baselen;
166
167 files_ref_path(refs, &path, dirname);
168 path_baselen = path.len;
169
170 d = opendir(path.buf);
171 if (!d) {
172 strbuf_release(&path);
173 return;
174 }
175
176 strbuf_init(&refname, dirnamelen + 257);
177 strbuf_add(&refname, dirname, dirnamelen);
178
179 while ((de = readdir(d)) != NULL) {
180 struct object_id oid;
181 struct stat st;
182 int flag;
183
184 if (de->d_name[0] == '.')
185 continue;
186 if (ends_with(de->d_name, ".lock"))
187 continue;
188 strbuf_addstr(&refname, de->d_name);
189 strbuf_addstr(&path, de->d_name);
190 if (stat(path.buf, &st) < 0) {
191 ; /* silently ignore */
192 } else if (S_ISDIR(st.st_mode)) {
193 strbuf_addch(&refname, '/');
194 add_entry_to_dir(dir,
195 create_dir_entry(dir->cache, refname.buf,
196 refname.len, 1));
197 } else {
198 if (!refs_resolve_ref_unsafe(&refs->base,
199 refname.buf,
200 RESOLVE_REF_READING,
201 oid.hash, &flag)) {
202 oidclr(&oid);
203 flag |= REF_ISBROKEN;
204 } else if (is_null_oid(&oid)) {
205 /*
206 * It is so astronomically unlikely
207 * that NULL_SHA1 is the SHA-1 of an
208 * actual object that we consider its
209 * appearance in a loose reference
210 * file to be repo corruption
211 * (probably due to a software bug).
212 */
213 flag |= REF_ISBROKEN;
214 }
215
216 if (check_refname_format(refname.buf,
217 REFNAME_ALLOW_ONELEVEL)) {
218 if (!refname_is_safe(refname.buf))
219 die("loose refname is dangerous: %s", refname.buf);
220 oidclr(&oid);
221 flag |= REF_BAD_NAME | REF_ISBROKEN;
222 }
223 add_entry_to_dir(dir,
224 create_ref_entry(refname.buf, &oid, flag));
225 }
226 strbuf_setlen(&refname, dirnamelen);
227 strbuf_setlen(&path, path_baselen);
228 }
229 strbuf_release(&refname);
230 strbuf_release(&path);
231 closedir(d);
232
233 /*
234 * Manually add refs/bisect, which, being per-worktree, might
235 * not appear in the directory listing for refs/ in the main
236 * repo.
237 */
238 if (!strcmp(dirname, "refs/")) {
239 int pos = search_ref_dir(dir, "refs/bisect/", 12);
240
241 if (pos < 0) {
242 struct ref_entry *child_entry = create_dir_entry(
243 dir->cache, "refs/bisect/", 12, 1);
244 add_entry_to_dir(dir, child_entry);
245 }
246 }
247 }
248
249 static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs)
250 {
251 if (!refs->loose) {
252 /*
253 * Mark the top-level directory complete because we
254 * are about to read the only subdirectory that can
255 * hold references:
256 */
257 refs->loose = create_ref_cache(&refs->base, loose_fill_ref_dir);
258
259 /* We're going to fill the top level ourselves: */
260 refs->loose->root->flag &= ~REF_INCOMPLETE;
261
262 /*
263 * Add an incomplete entry for "refs/" (to be filled
264 * lazily):
265 */
266 add_entry_to_dir(get_ref_dir(refs->loose->root),
267 create_dir_entry(refs->loose, "refs/", 5, 1));
268 }
269 return refs->loose;
270 }
271
272 static int files_read_raw_ref(struct ref_store *ref_store,
273 const char *refname, unsigned char *sha1,
274 struct strbuf *referent, unsigned int *type)
275 {
276 struct files_ref_store *refs =
277 files_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
278 struct strbuf sb_contents = STRBUF_INIT;
279 struct strbuf sb_path = STRBUF_INIT;
280 const char *path;
281 const char *buf;
282 struct stat st;
283 int fd;
284 int ret = -1;
285 int save_errno;
286 int remaining_retries = 3;
287
288 *type = 0;
289 strbuf_reset(&sb_path);
290
291 files_ref_path(refs, &sb_path, refname);
292
293 path = sb_path.buf;
294
295 stat_ref:
296 /*
297 * We might have to loop back here to avoid a race
298 * condition: first we lstat() the file, then we try
299 * to read it as a link or as a file. But if somebody
300 * changes the type of the file (file <-> directory
301 * <-> symlink) between the lstat() and reading, then
302 * we don't want to report that as an error but rather
303 * try again starting with the lstat().
304 *
305 * We'll keep a count of the retries, though, just to avoid
306 * any confusing situation sending us into an infinite loop.
307 */
308
309 if (remaining_retries-- <= 0)
310 goto out;
311
312 if (lstat(path, &st) < 0) {
313 if (errno != ENOENT)
314 goto out;
315 if (refs_read_raw_ref(refs->packed_ref_store, refname,
316 sha1, referent, type)) {
317 errno = ENOENT;
318 goto out;
319 }
320 ret = 0;
321 goto out;
322 }
323
324 /* Follow "normalized" - ie "refs/.." symlinks by hand */
325 if (S_ISLNK(st.st_mode)) {
326 strbuf_reset(&sb_contents);
327 if (strbuf_readlink(&sb_contents, path, 0) < 0) {
328 if (errno == ENOENT || errno == EINVAL)
329 /* inconsistent with lstat; retry */
330 goto stat_ref;
331 else
332 goto out;
333 }
334 if (starts_with(sb_contents.buf, "refs/") &&
335 !check_refname_format(sb_contents.buf, 0)) {
336 strbuf_swap(&sb_contents, referent);
337 *type |= REF_ISSYMREF;
338 ret = 0;
339 goto out;
340 }
341 /*
342 * It doesn't look like a refname; fall through to just
343 * treating it like a non-symlink, and reading whatever it
344 * points to.
345 */
346 }
347
348 /* Is it a directory? */
349 if (S_ISDIR(st.st_mode)) {
350 /*
351 * Even though there is a directory where the loose
352 * ref is supposed to be, there could still be a
353 * packed ref:
354 */
355 if (refs_read_raw_ref(refs->packed_ref_store, refname,
356 sha1, referent, type)) {
357 errno = EISDIR;
358 goto out;
359 }
360 ret = 0;
361 goto out;
362 }
363
364 /*
365 * Anything else, just open it and try to use it as
366 * a ref
367 */
368 fd = open(path, O_RDONLY);
369 if (fd < 0) {
370 if (errno == ENOENT && !S_ISLNK(st.st_mode))
371 /* inconsistent with lstat; retry */
372 goto stat_ref;
373 else
374 goto out;
375 }
376 strbuf_reset(&sb_contents);
377 if (strbuf_read(&sb_contents, fd, 256) < 0) {
378 int save_errno = errno;
379 close(fd);
380 errno = save_errno;
381 goto out;
382 }
383 close(fd);
384 strbuf_rtrim(&sb_contents);
385 buf = sb_contents.buf;
386 if (starts_with(buf, "ref:")) {
387 buf += 4;
388 while (isspace(*buf))
389 buf++;
390
391 strbuf_reset(referent);
392 strbuf_addstr(referent, buf);
393 *type |= REF_ISSYMREF;
394 ret = 0;
395 goto out;
396 }
397
398 /*
399 * Please note that FETCH_HEAD has additional
400 * data after the sha.
401 */
402 if (get_sha1_hex(buf, sha1) ||
403 (buf[40] != '\0' && !isspace(buf[40]))) {
404 *type |= REF_ISBROKEN;
405 errno = EINVAL;
406 goto out;
407 }
408
409 ret = 0;
410
411 out:
412 save_errno = errno;
413 strbuf_release(&sb_path);
414 strbuf_release(&sb_contents);
415 errno = save_errno;
416 return ret;
417 }
418
419 static void unlock_ref(struct ref_lock *lock)
420 {
421 /* Do not free lock->lk -- atexit() still looks at them */
422 if (lock->lk)
423 rollback_lock_file(lock->lk);
424 free(lock->ref_name);
425 free(lock);
426 }
427
428 /*
429 * Lock refname, without following symrefs, and set *lock_p to point
430 * at a newly-allocated lock object. Fill in lock->old_oid, referent,
431 * and type similarly to read_raw_ref().
432 *
433 * The caller must verify that refname is a "safe" reference name (in
434 * the sense of refname_is_safe()) before calling this function.
435 *
436 * If the reference doesn't already exist, verify that refname doesn't
437 * have a D/F conflict with any existing references. extras and skip
438 * are passed to refs_verify_refname_available() for this check.
439 *
440 * If mustexist is not set and the reference is not found or is
441 * broken, lock the reference anyway but clear sha1.
442 *
443 * Return 0 on success. On failure, write an error message to err and
444 * return TRANSACTION_NAME_CONFLICT or TRANSACTION_GENERIC_ERROR.
445 *
446 * Implementation note: This function is basically
447 *
448 * lock reference
449 * read_raw_ref()
450 *
451 * but it includes a lot more code to
452 * - Deal with possible races with other processes
453 * - Avoid calling refs_verify_refname_available() when it can be
454 * avoided, namely if we were successfully able to read the ref
455 * - Generate informative error messages in the case of failure
456 */
457 static int lock_raw_ref(struct files_ref_store *refs,
458 const char *refname, int mustexist,
459 const struct string_list *extras,
460 const struct string_list *skip,
461 struct ref_lock **lock_p,
462 struct strbuf *referent,
463 unsigned int *type,
464 struct strbuf *err)
465 {
466 struct ref_lock *lock;
467 struct strbuf ref_file = STRBUF_INIT;
468 int attempts_remaining = 3;
469 int ret = TRANSACTION_GENERIC_ERROR;
470
471 assert(err);
472 files_assert_main_repository(refs, "lock_raw_ref");
473
474 *type = 0;
475
476 /* First lock the file so it can't change out from under us. */
477
478 *lock_p = lock = xcalloc(1, sizeof(*lock));
479
480 lock->ref_name = xstrdup(refname);
481 files_ref_path(refs, &ref_file, refname);
482
483 retry:
484 switch (safe_create_leading_directories(ref_file.buf)) {
485 case SCLD_OK:
486 break; /* success */
487 case SCLD_EXISTS:
488 /*
489 * Suppose refname is "refs/foo/bar". We just failed
490 * to create the containing directory, "refs/foo",
491 * because there was a non-directory in the way. This
492 * indicates a D/F conflict, probably because of
493 * another reference such as "refs/foo". There is no
494 * reason to expect this error to be transitory.
495 */
496 if (refs_verify_refname_available(&refs->base, refname,
497 extras, skip, err)) {
498 if (mustexist) {
499 /*
500 * To the user the relevant error is
501 * that the "mustexist" reference is
502 * missing:
503 */
504 strbuf_reset(err);
505 strbuf_addf(err, "unable to resolve reference '%s'",
506 refname);
507 } else {
508 /*
509 * The error message set by
510 * refs_verify_refname_available() is
511 * OK.
512 */
513 ret = TRANSACTION_NAME_CONFLICT;
514 }
515 } else {
516 /*
517 * The file that is in the way isn't a loose
518 * reference. Report it as a low-level
519 * failure.
520 */
521 strbuf_addf(err, "unable to create lock file %s.lock; "
522 "non-directory in the way",
523 ref_file.buf);
524 }
525 goto error_return;
526 case SCLD_VANISHED:
527 /* Maybe another process was tidying up. Try again. */
528 if (--attempts_remaining > 0)
529 goto retry;
530 /* fall through */
531 default:
532 strbuf_addf(err, "unable to create directory for %s",
533 ref_file.buf);
534 goto error_return;
535 }
536
537 if (!lock->lk)
538 lock->lk = xcalloc(1, sizeof(struct lock_file));
539
540 if (hold_lock_file_for_update(lock->lk, ref_file.buf, LOCK_NO_DEREF) < 0) {
541 if (errno == ENOENT && --attempts_remaining > 0) {
542 /*
543 * Maybe somebody just deleted one of the
544 * directories leading to ref_file. Try
545 * again:
546 */
547 goto retry;
548 } else {
549 unable_to_lock_message(ref_file.buf, errno, err);
550 goto error_return;
551 }
552 }
553
554 /*
555 * Now we hold the lock and can read the reference without
556 * fear that its value will change.
557 */
558
559 if (files_read_raw_ref(&refs->base, refname,
560 lock->old_oid.hash, referent, type)) {
561 if (errno == ENOENT) {
562 if (mustexist) {
563 /* Garden variety missing reference. */
564 strbuf_addf(err, "unable to resolve reference '%s'",
565 refname);
566 goto error_return;
567 } else {
568 /*
569 * Reference is missing, but that's OK. We
570 * know that there is not a conflict with
571 * another loose reference because
572 * (supposing that we are trying to lock
573 * reference "refs/foo/bar"):
574 *
575 * - We were successfully able to create
576 * the lockfile refs/foo/bar.lock, so we
577 * know there cannot be a loose reference
578 * named "refs/foo".
579 *
580 * - We got ENOENT and not EISDIR, so we
581 * know that there cannot be a loose
582 * reference named "refs/foo/bar/baz".
583 */
584 }
585 } else if (errno == EISDIR) {
586 /*
587 * There is a directory in the way. It might have
588 * contained references that have been deleted. If
589 * we don't require that the reference already
590 * exists, try to remove the directory so that it
591 * doesn't cause trouble when we want to rename the
592 * lockfile into place later.
593 */
594 if (mustexist) {
595 /* Garden variety missing reference. */
596 strbuf_addf(err, "unable to resolve reference '%s'",
597 refname);
598 goto error_return;
599 } else if (remove_dir_recursively(&ref_file,
600 REMOVE_DIR_EMPTY_ONLY)) {
601 if (refs_verify_refname_available(
602 &refs->base, refname,
603 extras, skip, err)) {
604 /*
605 * The error message set by
606 * verify_refname_available() is OK.
607 */
608 ret = TRANSACTION_NAME_CONFLICT;
609 goto error_return;
610 } else {
611 /*
612 * We can't delete the directory,
613 * but we also don't know of any
614 * references that it should
615 * contain.
616 */
617 strbuf_addf(err, "there is a non-empty directory '%s' "
618 "blocking reference '%s'",
619 ref_file.buf, refname);
620 goto error_return;
621 }
622 }
623 } else if (errno == EINVAL && (*type & REF_ISBROKEN)) {
624 strbuf_addf(err, "unable to resolve reference '%s': "
625 "reference broken", refname);
626 goto error_return;
627 } else {
628 strbuf_addf(err, "unable to resolve reference '%s': %s",
629 refname, strerror(errno));
630 goto error_return;
631 }
632
633 /*
634 * If the ref did not exist and we are creating it,
635 * make sure there is no existing packed ref that
636 * conflicts with refname:
637 */
638 if (refs_verify_refname_available(
639 refs->packed_ref_store, refname,
640 extras, skip, err))
641 goto error_return;
642 }
643
644 ret = 0;
645 goto out;
646
647 error_return:
648 unlock_ref(lock);
649 *lock_p = NULL;
650
651 out:
652 strbuf_release(&ref_file);
653 return ret;
654 }
655
656 static int files_peel_ref(struct ref_store *ref_store,
657 const char *refname, unsigned char *sha1)
658 {
659 struct files_ref_store *refs =
660 files_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
661 "peel_ref");
662 int flag;
663 unsigned char base[20];
664
665 if (current_ref_iter && current_ref_iter->refname == refname) {
666 struct object_id peeled;
667
668 if (ref_iterator_peel(current_ref_iter, &peeled))
669 return -1;
670 hashcpy(sha1, peeled.hash);
671 return 0;
672 }
673
674 if (refs_read_ref_full(ref_store, refname,
675 RESOLVE_REF_READING, base, &flag))
676 return -1;
677
678 /*
679 * If the reference is packed, read its ref_entry from the
680 * cache in the hope that we already know its peeled value.
681 * We only try this optimization on packed references because
682 * (a) forcing the filling of the loose reference cache could
683 * be expensive and (b) loose references anyway usually do not
684 * have REF_KNOWS_PEELED.
685 */
686 if (flag & REF_ISPACKED &&
687 !refs_peel_ref(refs->packed_ref_store, refname, sha1))
688 return 0;
689
690 return peel_object(base, sha1);
691 }
692
693 struct files_ref_iterator {
694 struct ref_iterator base;
695
696 struct ref_iterator *iter0;
697 unsigned int flags;
698 };
699
700 static int files_ref_iterator_advance(struct ref_iterator *ref_iterator)
701 {
702 struct files_ref_iterator *iter =
703 (struct files_ref_iterator *)ref_iterator;
704 int ok;
705
706 while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
707 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
708 ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
709 continue;
710
711 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
712 !ref_resolves_to_object(iter->iter0->refname,
713 iter->iter0->oid,
714 iter->iter0->flags))
715 continue;
716
717 iter->base.refname = iter->iter0->refname;
718 iter->base.oid = iter->iter0->oid;
719 iter->base.flags = iter->iter0->flags;
720 return ITER_OK;
721 }
722
723 iter->iter0 = NULL;
724 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
725 ok = ITER_ERROR;
726
727 return ok;
728 }
729
730 static int files_ref_iterator_peel(struct ref_iterator *ref_iterator,
731 struct object_id *peeled)
732 {
733 struct files_ref_iterator *iter =
734 (struct files_ref_iterator *)ref_iterator;
735
736 return ref_iterator_peel(iter->iter0, peeled);
737 }
738
739 static int files_ref_iterator_abort(struct ref_iterator *ref_iterator)
740 {
741 struct files_ref_iterator *iter =
742 (struct files_ref_iterator *)ref_iterator;
743 int ok = ITER_DONE;
744
745 if (iter->iter0)
746 ok = ref_iterator_abort(iter->iter0);
747
748 base_ref_iterator_free(ref_iterator);
749 return ok;
750 }
751
752 static struct ref_iterator_vtable files_ref_iterator_vtable = {
753 files_ref_iterator_advance,
754 files_ref_iterator_peel,
755 files_ref_iterator_abort
756 };
757
758 static struct ref_iterator *files_ref_iterator_begin(
759 struct ref_store *ref_store,
760 const char *prefix, unsigned int flags)
761 {
762 struct files_ref_store *refs;
763 struct ref_iterator *loose_iter, *packed_iter;
764 struct files_ref_iterator *iter;
765 struct ref_iterator *ref_iterator;
766 unsigned int required_flags = REF_STORE_READ;
767
768 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
769 required_flags |= REF_STORE_ODB;
770
771 refs = files_downcast(ref_store, required_flags, "ref_iterator_begin");
772
773 iter = xcalloc(1, sizeof(*iter));
774 ref_iterator = &iter->base;
775 base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable);
776
777 /*
778 * We must make sure that all loose refs are read before
779 * accessing the packed-refs file; this avoids a race
780 * condition if loose refs are migrated to the packed-refs
781 * file by a simultaneous process, but our in-memory view is
782 * from before the migration. We ensure this as follows:
783 * First, we call start the loose refs iteration with its
784 * `prime_ref` argument set to true. This causes the loose
785 * references in the subtree to be pre-read into the cache.
786 * (If they've already been read, that's OK; we only need to
787 * guarantee that they're read before the packed refs, not
788 * *how much* before.) After that, we call
789 * packed_ref_iterator_begin(), which internally checks
790 * whether the packed-ref cache is up to date with what is on
791 * disk, and re-reads it if not.
792 */
793
794 loose_iter = cache_ref_iterator_begin(get_loose_ref_cache(refs),
795 prefix, 1);
796
797 /*
798 * The packed-refs file might contain broken references, for
799 * example an old version of a reference that points at an
800 * object that has since been garbage-collected. This is OK as
801 * long as there is a corresponding loose reference that
802 * overrides it, and we don't want to emit an error message in
803 * this case. So ask the packed_ref_store for all of its
804 * references, and (if needed) do our own check for broken
805 * ones in files_ref_iterator_advance(), after we have merged
806 * the packed and loose references.
807 */
808 packed_iter = refs_ref_iterator_begin(
809 refs->packed_ref_store, prefix, 0,
810 DO_FOR_EACH_INCLUDE_BROKEN);
811
812 iter->iter0 = overlay_ref_iterator_begin(loose_iter, packed_iter);
813 iter->flags = flags;
814
815 return ref_iterator;
816 }
817
818 /*
819 * Verify that the reference locked by lock has the value old_sha1.
820 * Fail if the reference doesn't exist and mustexist is set. Return 0
821 * on success. On error, write an error message to err, set errno, and
822 * return a negative value.
823 */
824 static int verify_lock(struct ref_store *ref_store, struct ref_lock *lock,
825 const unsigned char *old_sha1, int mustexist,
826 struct strbuf *err)
827 {
828 assert(err);
829
830 if (refs_read_ref_full(ref_store, lock->ref_name,
831 mustexist ? RESOLVE_REF_READING : 0,
832 lock->old_oid.hash, NULL)) {
833 if (old_sha1) {
834 int save_errno = errno;
835 strbuf_addf(err, "can't verify ref '%s'", lock->ref_name);
836 errno = save_errno;
837 return -1;
838 } else {
839 oidclr(&lock->old_oid);
840 return 0;
841 }
842 }
843 if (old_sha1 && hashcmp(lock->old_oid.hash, old_sha1)) {
844 strbuf_addf(err, "ref '%s' is at %s but expected %s",
845 lock->ref_name,
846 oid_to_hex(&lock->old_oid),
847 sha1_to_hex(old_sha1));
848 errno = EBUSY;
849 return -1;
850 }
851 return 0;
852 }
853
854 static int remove_empty_directories(struct strbuf *path)
855 {
856 /*
857 * we want to create a file but there is a directory there;
858 * if that is an empty directory (or a directory that contains
859 * only empty directories), remove them.
860 */
861 return remove_dir_recursively(path, REMOVE_DIR_EMPTY_ONLY);
862 }
863
864 static int create_reflock(const char *path, void *cb)
865 {
866 struct lock_file *lk = cb;
867
868 return hold_lock_file_for_update(lk, path, LOCK_NO_DEREF) < 0 ? -1 : 0;
869 }
870
871 /*
872 * Locks a ref returning the lock on success and NULL on failure.
873 * On failure errno is set to something meaningful.
874 */
875 static struct ref_lock *lock_ref_sha1_basic(struct files_ref_store *refs,
876 const char *refname,
877 const unsigned char *old_sha1,
878 const struct string_list *extras,
879 const struct string_list *skip,
880 unsigned int flags, int *type,
881 struct strbuf *err)
882 {
883 struct strbuf ref_file = STRBUF_INIT;
884 struct ref_lock *lock;
885 int last_errno = 0;
886 int mustexist = (old_sha1 && !is_null_sha1(old_sha1));
887 int resolve_flags = RESOLVE_REF_NO_RECURSE;
888 int resolved;
889
890 files_assert_main_repository(refs, "lock_ref_sha1_basic");
891 assert(err);
892
893 lock = xcalloc(1, sizeof(struct ref_lock));
894
895 if (mustexist)
896 resolve_flags |= RESOLVE_REF_READING;
897 if (flags & REF_DELETING)
898 resolve_flags |= RESOLVE_REF_ALLOW_BAD_NAME;
899
900 files_ref_path(refs, &ref_file, refname);
901 resolved = !!refs_resolve_ref_unsafe(&refs->base,
902 refname, resolve_flags,
903 lock->old_oid.hash, type);
904 if (!resolved && errno == EISDIR) {
905 /*
906 * we are trying to lock foo but we used to
907 * have foo/bar which now does not exist;
908 * it is normal for the empty directory 'foo'
909 * to remain.
910 */
911 if (remove_empty_directories(&ref_file)) {
912 last_errno = errno;
913 if (!refs_verify_refname_available(
914 &refs->base,
915 refname, extras, skip, err))
916 strbuf_addf(err, "there are still refs under '%s'",
917 refname);
918 goto error_return;
919 }
920 resolved = !!refs_resolve_ref_unsafe(&refs->base,
921 refname, resolve_flags,
922 lock->old_oid.hash, type);
923 }
924 if (!resolved) {
925 last_errno = errno;
926 if (last_errno != ENOTDIR ||
927 !refs_verify_refname_available(&refs->base, refname,
928 extras, skip, err))
929 strbuf_addf(err, "unable to resolve reference '%s': %s",
930 refname, strerror(last_errno));
931
932 goto error_return;
933 }
934
935 /*
936 * If the ref did not exist and we are creating it, make sure
937 * there is no existing packed ref whose name begins with our
938 * refname, nor a packed ref whose name is a proper prefix of
939 * our refname.
940 */
941 if (is_null_oid(&lock->old_oid) &&
942 refs_verify_refname_available(refs->packed_ref_store, refname,
943 extras, skip, err)) {
944 last_errno = ENOTDIR;
945 goto error_return;
946 }
947
948 lock->lk = xcalloc(1, sizeof(struct lock_file));
949
950 lock->ref_name = xstrdup(refname);
951
952 if (raceproof_create_file(ref_file.buf, create_reflock, lock->lk)) {
953 last_errno = errno;
954 unable_to_lock_message(ref_file.buf, errno, err);
955 goto error_return;
956 }
957
958 if (verify_lock(&refs->base, lock, old_sha1, mustexist, err)) {
959 last_errno = errno;
960 goto error_return;
961 }
962 goto out;
963
964 error_return:
965 unlock_ref(lock);
966 lock = NULL;
967
968 out:
969 strbuf_release(&ref_file);
970 errno = last_errno;
971 return lock;
972 }
973
974 struct ref_to_prune {
975 struct ref_to_prune *next;
976 unsigned char sha1[20];
977 char name[FLEX_ARRAY];
978 };
979
980 enum {
981 REMOVE_EMPTY_PARENTS_REF = 0x01,
982 REMOVE_EMPTY_PARENTS_REFLOG = 0x02
983 };
984
985 /*
986 * Remove empty parent directories associated with the specified
987 * reference and/or its reflog, but spare [logs/]refs/ and immediate
988 * subdirs. flags is a combination of REMOVE_EMPTY_PARENTS_REF and/or
989 * REMOVE_EMPTY_PARENTS_REFLOG.
990 */
991 static void try_remove_empty_parents(struct files_ref_store *refs,
992 const char *refname,
993 unsigned int flags)
994 {
995 struct strbuf buf = STRBUF_INIT;
996 struct strbuf sb = STRBUF_INIT;
997 char *p, *q;
998 int i;
999
1000 strbuf_addstr(&buf, refname);
1001 p = buf.buf;
1002 for (i = 0; i < 2; i++) { /* refs/{heads,tags,...}/ */
1003 while (*p && *p != '/')
1004 p++;
1005 /* tolerate duplicate slashes; see check_refname_format() */
1006 while (*p == '/')
1007 p++;
1008 }
1009 q = buf.buf + buf.len;
1010 while (flags & (REMOVE_EMPTY_PARENTS_REF | REMOVE_EMPTY_PARENTS_REFLOG)) {
1011 while (q > p && *q != '/')
1012 q--;
1013 while (q > p && *(q-1) == '/')
1014 q--;
1015 if (q == p)
1016 break;
1017 strbuf_setlen(&buf, q - buf.buf);
1018
1019 strbuf_reset(&sb);
1020 files_ref_path(refs, &sb, buf.buf);
1021 if ((flags & REMOVE_EMPTY_PARENTS_REF) && rmdir(sb.buf))
1022 flags &= ~REMOVE_EMPTY_PARENTS_REF;
1023
1024 strbuf_reset(&sb);
1025 files_reflog_path(refs, &sb, buf.buf);
1026 if ((flags & REMOVE_EMPTY_PARENTS_REFLOG) && rmdir(sb.buf))
1027 flags &= ~REMOVE_EMPTY_PARENTS_REFLOG;
1028 }
1029 strbuf_release(&buf);
1030 strbuf_release(&sb);
1031 }
1032
1033 /* make sure nobody touched the ref, and unlink */
1034 static void prune_ref(struct files_ref_store *refs, struct ref_to_prune *r)
1035 {
1036 struct ref_transaction *transaction;
1037 struct strbuf err = STRBUF_INIT;
1038
1039 if (check_refname_format(r->name, 0))
1040 return;
1041
1042 transaction = ref_store_transaction_begin(&refs->base, &err);
1043 if (!transaction ||
1044 ref_transaction_delete(transaction, r->name, r->sha1,
1045 REF_ISPRUNING | REF_NODEREF, NULL, &err) ||
1046 ref_transaction_commit(transaction, &err)) {
1047 ref_transaction_free(transaction);
1048 error("%s", err.buf);
1049 strbuf_release(&err);
1050 return;
1051 }
1052 ref_transaction_free(transaction);
1053 strbuf_release(&err);
1054 }
1055
1056 static void prune_refs(struct files_ref_store *refs, struct ref_to_prune *r)
1057 {
1058 while (r) {
1059 prune_ref(refs, r);
1060 r = r->next;
1061 }
1062 }
1063
1064 /*
1065 * Return true if the specified reference should be packed.
1066 */
1067 static int should_pack_ref(const char *refname,
1068 const struct object_id *oid, unsigned int ref_flags,
1069 unsigned int pack_flags)
1070 {
1071 /* Do not pack per-worktree refs: */
1072 if (ref_type(refname) != REF_TYPE_NORMAL)
1073 return 0;
1074
1075 /* Do not pack non-tags unless PACK_REFS_ALL is set: */
1076 if (!(pack_flags & PACK_REFS_ALL) && !starts_with(refname, "refs/tags/"))
1077 return 0;
1078
1079 /* Do not pack symbolic refs: */
1080 if (ref_flags & REF_ISSYMREF)
1081 return 0;
1082
1083 /* Do not pack broken refs: */
1084 if (!ref_resolves_to_object(refname, oid, ref_flags))
1085 return 0;
1086
1087 return 1;
1088 }
1089
1090 static int files_pack_refs(struct ref_store *ref_store, unsigned int flags)
1091 {
1092 struct files_ref_store *refs =
1093 files_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB,
1094 "pack_refs");
1095 struct ref_iterator *iter;
1096 int ok;
1097 struct ref_to_prune *refs_to_prune = NULL;
1098 struct strbuf err = STRBUF_INIT;
1099
1100 packed_refs_lock(refs->packed_ref_store, LOCK_DIE_ON_ERROR, &err);
1101
1102 iter = cache_ref_iterator_begin(get_loose_ref_cache(refs), NULL, 0);
1103 while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
1104 /*
1105 * If the loose reference can be packed, add an entry
1106 * in the packed ref cache. If the reference should be
1107 * pruned, also add it to refs_to_prune.
1108 */
1109 if (!should_pack_ref(iter->refname, iter->oid, iter->flags,
1110 flags))
1111 continue;
1112
1113 /*
1114 * Create an entry in the packed-refs cache equivalent
1115 * to the one from the loose ref cache, except that
1116 * we don't copy the peeled status, because we want it
1117 * to be re-peeled.
1118 */
1119 add_packed_ref(refs->packed_ref_store, iter->refname, iter->oid);
1120
1121 /* Schedule the loose reference for pruning if requested. */
1122 if ((flags & PACK_REFS_PRUNE)) {
1123 struct ref_to_prune *n;
1124 FLEX_ALLOC_STR(n, name, iter->refname);
1125 hashcpy(n->sha1, iter->oid->hash);
1126 n->next = refs_to_prune;
1127 refs_to_prune = n;
1128 }
1129 }
1130 if (ok != ITER_DONE)
1131 die("error while iterating over references");
1132
1133 if (commit_packed_refs(refs->packed_ref_store, &err))
1134 die("unable to overwrite old ref-pack file: %s", err.buf);
1135 packed_refs_unlock(refs->packed_ref_store);
1136
1137 prune_refs(refs, refs_to_prune);
1138 strbuf_release(&err);
1139 return 0;
1140 }
1141
1142 static int files_delete_refs(struct ref_store *ref_store, const char *msg,
1143 struct string_list *refnames, unsigned int flags)
1144 {
1145 struct files_ref_store *refs =
1146 files_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
1147 struct strbuf err = STRBUF_INIT;
1148 int i, result = 0;
1149
1150 if (!refnames->nr)
1151 return 0;
1152
1153 if (packed_refs_lock(refs->packed_ref_store, 0, &err))
1154 goto error;
1155
1156 if (repack_without_refs(refs->packed_ref_store, refnames, &err)) {
1157 packed_refs_unlock(refs->packed_ref_store);
1158 goto error;
1159 }
1160
1161 packed_refs_unlock(refs->packed_ref_store);
1162
1163 for (i = 0; i < refnames->nr; i++) {
1164 const char *refname = refnames->items[i].string;
1165
1166 if (refs_delete_ref(&refs->base, msg, refname, NULL, flags))
1167 result |= error(_("could not remove reference %s"), refname);
1168 }
1169
1170 strbuf_release(&err);
1171 return result;
1172
1173 error:
1174 /*
1175 * If we failed to rewrite the packed-refs file, then it is
1176 * unsafe to try to remove loose refs, because doing so might
1177 * expose an obsolete packed value for a reference that might
1178 * even point at an object that has been garbage collected.
1179 */
1180 if (refnames->nr == 1)
1181 error(_("could not delete reference %s: %s"),
1182 refnames->items[0].string, err.buf);
1183 else
1184 error(_("could not delete references: %s"), err.buf);
1185
1186 strbuf_release(&err);
1187 return -1;
1188 }
1189
1190 /*
1191 * People using contrib's git-new-workdir have .git/logs/refs ->
1192 * /some/other/path/.git/logs/refs, and that may live on another device.
1193 *
1194 * IOW, to avoid cross device rename errors, the temporary renamed log must
1195 * live into logs/refs.
1196 */
1197 #define TMP_RENAMED_LOG "refs/.tmp-renamed-log"
1198
1199 struct rename_cb {
1200 const char *tmp_renamed_log;
1201 int true_errno;
1202 };
1203
1204 static int rename_tmp_log_callback(const char *path, void *cb_data)
1205 {
1206 struct rename_cb *cb = cb_data;
1207
1208 if (rename(cb->tmp_renamed_log, path)) {
1209 /*
1210 * rename(a, b) when b is an existing directory ought
1211 * to result in ISDIR, but Solaris 5.8 gives ENOTDIR.
1212 * Sheesh. Record the true errno for error reporting,
1213 * but report EISDIR to raceproof_create_file() so
1214 * that it knows to retry.
1215 */
1216 cb->true_errno = errno;
1217 if (errno == ENOTDIR)
1218 errno = EISDIR;
1219 return -1;
1220 } else {
1221 return 0;
1222 }
1223 }
1224
1225 static int rename_tmp_log(struct files_ref_store *refs, const char *newrefname)
1226 {
1227 struct strbuf path = STRBUF_INIT;
1228 struct strbuf tmp = STRBUF_INIT;
1229 struct rename_cb cb;
1230 int ret;
1231
1232 files_reflog_path(refs, &path, newrefname);
1233 files_reflog_path(refs, &tmp, TMP_RENAMED_LOG);
1234 cb.tmp_renamed_log = tmp.buf;
1235 ret = raceproof_create_file(path.buf, rename_tmp_log_callback, &cb);
1236 if (ret) {
1237 if (errno == EISDIR)
1238 error("directory not empty: %s", path.buf);
1239 else
1240 error("unable to move logfile %s to %s: %s",
1241 tmp.buf, path.buf,
1242 strerror(cb.true_errno));
1243 }
1244
1245 strbuf_release(&path);
1246 strbuf_release(&tmp);
1247 return ret;
1248 }
1249
1250 static int write_ref_to_lockfile(struct ref_lock *lock,
1251 const struct object_id *oid, struct strbuf *err);
1252 static int commit_ref_update(struct files_ref_store *refs,
1253 struct ref_lock *lock,
1254 const struct object_id *oid, const char *logmsg,
1255 struct strbuf *err);
1256
1257 static int files_rename_ref(struct ref_store *ref_store,
1258 const char *oldrefname, const char *newrefname,
1259 const char *logmsg)
1260 {
1261 struct files_ref_store *refs =
1262 files_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1263 struct object_id oid, orig_oid;
1264 int flag = 0, logmoved = 0;
1265 struct ref_lock *lock;
1266 struct stat loginfo;
1267 struct strbuf sb_oldref = STRBUF_INIT;
1268 struct strbuf sb_newref = STRBUF_INIT;
1269 struct strbuf tmp_renamed_log = STRBUF_INIT;
1270 int log, ret;
1271 struct strbuf err = STRBUF_INIT;
1272
1273 files_reflog_path(refs, &sb_oldref, oldrefname);
1274 files_reflog_path(refs, &sb_newref, newrefname);
1275 files_reflog_path(refs, &tmp_renamed_log, TMP_RENAMED_LOG);
1276
1277 log = !lstat(sb_oldref.buf, &loginfo);
1278 if (log && S_ISLNK(loginfo.st_mode)) {
1279 ret = error("reflog for %s is a symlink", oldrefname);
1280 goto out;
1281 }
1282
1283 if (!refs_resolve_ref_unsafe(&refs->base, oldrefname,
1284 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
1285 orig_oid.hash, &flag)) {
1286 ret = error("refname %s not found", oldrefname);
1287 goto out;
1288 }
1289
1290 if (flag & REF_ISSYMREF) {
1291 ret = error("refname %s is a symbolic ref, renaming it is not supported",
1292 oldrefname);
1293 goto out;
1294 }
1295 if (!refs_rename_ref_available(&refs->base, oldrefname, newrefname)) {
1296 ret = 1;
1297 goto out;
1298 }
1299
1300 if (log && rename(sb_oldref.buf, tmp_renamed_log.buf)) {
1301 ret = error("unable to move logfile logs/%s to logs/"TMP_RENAMED_LOG": %s",
1302 oldrefname, strerror(errno));
1303 goto out;
1304 }
1305
1306 if (refs_delete_ref(&refs->base, logmsg, oldrefname,
1307 orig_oid.hash, REF_NODEREF)) {
1308 error("unable to delete old %s", oldrefname);
1309 goto rollback;
1310 }
1311
1312 /*
1313 * Since we are doing a shallow lookup, oid is not the
1314 * correct value to pass to delete_ref as old_oid. But that
1315 * doesn't matter, because an old_oid check wouldn't add to
1316 * the safety anyway; we want to delete the reference whatever
1317 * its current value.
1318 */
1319 if (!refs_read_ref_full(&refs->base, newrefname,
1320 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
1321 oid.hash, NULL) &&
1322 refs_delete_ref(&refs->base, NULL, newrefname,
1323 NULL, REF_NODEREF)) {
1324 if (errno == EISDIR) {
1325 struct strbuf path = STRBUF_INIT;
1326 int result;
1327
1328 files_ref_path(refs, &path, newrefname);
1329 result = remove_empty_directories(&path);
1330 strbuf_release(&path);
1331
1332 if (result) {
1333 error("Directory not empty: %s", newrefname);
1334 goto rollback;
1335 }
1336 } else {
1337 error("unable to delete existing %s", newrefname);
1338 goto rollback;
1339 }
1340 }
1341
1342 if (log && rename_tmp_log(refs, newrefname))
1343 goto rollback;
1344
1345 logmoved = log;
1346
1347 lock = lock_ref_sha1_basic(refs, newrefname, NULL, NULL, NULL,
1348 REF_NODEREF, NULL, &err);
1349 if (!lock) {
1350 error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf);
1351 strbuf_release(&err);
1352 goto rollback;
1353 }
1354 oidcpy(&lock->old_oid, &orig_oid);
1355
1356 if (write_ref_to_lockfile(lock, &orig_oid, &err) ||
1357 commit_ref_update(refs, lock, &orig_oid, logmsg, &err)) {
1358 error("unable to write current sha1 into %s: %s", newrefname, err.buf);
1359 strbuf_release(&err);
1360 goto rollback;
1361 }
1362
1363 ret = 0;
1364 goto out;
1365
1366 rollback:
1367 lock = lock_ref_sha1_basic(refs, oldrefname, NULL, NULL, NULL,
1368 REF_NODEREF, NULL, &err);
1369 if (!lock) {
1370 error("unable to lock %s for rollback: %s", oldrefname, err.buf);
1371 strbuf_release(&err);
1372 goto rollbacklog;
1373 }
1374
1375 flag = log_all_ref_updates;
1376 log_all_ref_updates = LOG_REFS_NONE;
1377 if (write_ref_to_lockfile(lock, &orig_oid, &err) ||
1378 commit_ref_update(refs, lock, &orig_oid, NULL, &err)) {
1379 error("unable to write current sha1 into %s: %s", oldrefname, err.buf);
1380 strbuf_release(&err);
1381 }
1382 log_all_ref_updates = flag;
1383
1384 rollbacklog:
1385 if (logmoved && rename(sb_newref.buf, sb_oldref.buf))
1386 error("unable to restore logfile %s from %s: %s",
1387 oldrefname, newrefname, strerror(errno));
1388 if (!logmoved && log &&
1389 rename(tmp_renamed_log.buf, sb_oldref.buf))
1390 error("unable to restore logfile %s from logs/"TMP_RENAMED_LOG": %s",
1391 oldrefname, strerror(errno));
1392 ret = 1;
1393 out:
1394 strbuf_release(&sb_newref);
1395 strbuf_release(&sb_oldref);
1396 strbuf_release(&tmp_renamed_log);
1397
1398 return ret;
1399 }
1400
1401 static int close_ref(struct ref_lock *lock)
1402 {
1403 if (close_lock_file(lock->lk))
1404 return -1;
1405 return 0;
1406 }
1407
1408 static int commit_ref(struct ref_lock *lock)
1409 {
1410 char *path = get_locked_file_path(lock->lk);
1411 struct stat st;
1412
1413 if (!lstat(path, &st) && S_ISDIR(st.st_mode)) {
1414 /*
1415 * There is a directory at the path we want to rename
1416 * the lockfile to. Hopefully it is empty; try to
1417 * delete it.
1418 */
1419 size_t len = strlen(path);
1420 struct strbuf sb_path = STRBUF_INIT;
1421
1422 strbuf_attach(&sb_path, path, len, len);
1423
1424 /*
1425 * If this fails, commit_lock_file() will also fail
1426 * and will report the problem.
1427 */
1428 remove_empty_directories(&sb_path);
1429 strbuf_release(&sb_path);
1430 } else {
1431 free(path);
1432 }
1433
1434 if (commit_lock_file(lock->lk))
1435 return -1;
1436 return 0;
1437 }
1438
1439 static int open_or_create_logfile(const char *path, void *cb)
1440 {
1441 int *fd = cb;
1442
1443 *fd = open(path, O_APPEND | O_WRONLY | O_CREAT, 0666);
1444 return (*fd < 0) ? -1 : 0;
1445 }
1446
1447 /*
1448 * Create a reflog for a ref. If force_create = 0, only create the
1449 * reflog for certain refs (those for which should_autocreate_reflog
1450 * returns non-zero). Otherwise, create it regardless of the reference
1451 * name. If the logfile already existed or was created, return 0 and
1452 * set *logfd to the file descriptor opened for appending to the file.
1453 * If no logfile exists and we decided not to create one, return 0 and
1454 * set *logfd to -1. On failure, fill in *err, set *logfd to -1, and
1455 * return -1.
1456 */
1457 static int log_ref_setup(struct files_ref_store *refs,
1458 const char *refname, int force_create,
1459 int *logfd, struct strbuf *err)
1460 {
1461 struct strbuf logfile_sb = STRBUF_INIT;
1462 char *logfile;
1463
1464 files_reflog_path(refs, &logfile_sb, refname);
1465 logfile = strbuf_detach(&logfile_sb, NULL);
1466
1467 if (force_create || should_autocreate_reflog(refname)) {
1468 if (raceproof_create_file(logfile, open_or_create_logfile, logfd)) {
1469 if (errno == ENOENT)
1470 strbuf_addf(err, "unable to create directory for '%s': "
1471 "%s", logfile, strerror(errno));
1472 else if (errno == EISDIR)
1473 strbuf_addf(err, "there are still logs under '%s'",
1474 logfile);
1475 else
1476 strbuf_addf(err, "unable to append to '%s': %s",
1477 logfile, strerror(errno));
1478
1479 goto error;
1480 }
1481 } else {
1482 *logfd = open(logfile, O_APPEND | O_WRONLY, 0666);
1483 if (*logfd < 0) {
1484 if (errno == ENOENT || errno == EISDIR) {
1485 /*
1486 * The logfile doesn't already exist,
1487 * but that is not an error; it only
1488 * means that we won't write log
1489 * entries to it.
1490 */
1491 ;
1492 } else {
1493 strbuf_addf(err, "unable to append to '%s': %s",
1494 logfile, strerror(errno));
1495 goto error;
1496 }
1497 }
1498 }
1499
1500 if (*logfd >= 0)
1501 adjust_shared_perm(logfile);
1502
1503 free(logfile);
1504 return 0;
1505
1506 error:
1507 free(logfile);
1508 return -1;
1509 }
1510
1511 static int files_create_reflog(struct ref_store *ref_store,
1512 const char *refname, int force_create,
1513 struct strbuf *err)
1514 {
1515 struct files_ref_store *refs =
1516 files_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1517 int fd;
1518
1519 if (log_ref_setup(refs, refname, force_create, &fd, err))
1520 return -1;
1521
1522 if (fd >= 0)
1523 close(fd);
1524
1525 return 0;
1526 }
1527
1528 static int log_ref_write_fd(int fd, const struct object_id *old_oid,
1529 const struct object_id *new_oid,
1530 const char *committer, const char *msg)
1531 {
1532 int msglen, written;
1533 unsigned maxlen, len;
1534 char *logrec;
1535
1536 msglen = msg ? strlen(msg) : 0;
1537 maxlen = strlen(committer) + msglen + 100;
1538 logrec = xmalloc(maxlen);
1539 len = xsnprintf(logrec, maxlen, "%s %s %s\n",
1540 oid_to_hex(old_oid),
1541 oid_to_hex(new_oid),
1542 committer);
1543 if (msglen)
1544 len += copy_reflog_msg(logrec + len - 1, msg) - 1;
1545
1546 written = len <= maxlen ? write_in_full(fd, logrec, len) : -1;
1547 free(logrec);
1548 if (written != len)
1549 return -1;
1550
1551 return 0;
1552 }
1553
1554 static int files_log_ref_write(struct files_ref_store *refs,
1555 const char *refname, const struct object_id *old_oid,
1556 const struct object_id *new_oid, const char *msg,
1557 int flags, struct strbuf *err)
1558 {
1559 int logfd, result;
1560
1561 if (log_all_ref_updates == LOG_REFS_UNSET)
1562 log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
1563
1564 result = log_ref_setup(refs, refname,
1565 flags & REF_FORCE_CREATE_REFLOG,
1566 &logfd, err);
1567
1568 if (result)
1569 return result;
1570
1571 if (logfd < 0)
1572 return 0;
1573 result = log_ref_write_fd(logfd, old_oid, new_oid,
1574 git_committer_info(0), msg);
1575 if (result) {
1576 struct strbuf sb = STRBUF_INIT;
1577 int save_errno = errno;
1578
1579 files_reflog_path(refs, &sb, refname);
1580 strbuf_addf(err, "unable to append to '%s': %s",
1581 sb.buf, strerror(save_errno));
1582 strbuf_release(&sb);
1583 close(logfd);
1584 return -1;
1585 }
1586 if (close(logfd)) {
1587 struct strbuf sb = STRBUF_INIT;
1588 int save_errno = errno;
1589
1590 files_reflog_path(refs, &sb, refname);
1591 strbuf_addf(err, "unable to append to '%s': %s",
1592 sb.buf, strerror(save_errno));
1593 strbuf_release(&sb);
1594 return -1;
1595 }
1596 return 0;
1597 }
1598
1599 /*
1600 * Write sha1 into the open lockfile, then close the lockfile. On
1601 * errors, rollback the lockfile, fill in *err and
1602 * return -1.
1603 */
1604 static int write_ref_to_lockfile(struct ref_lock *lock,
1605 const struct object_id *oid, struct strbuf *err)
1606 {
1607 static char term = '\n';
1608 struct object *o;
1609 int fd;
1610
1611 o = parse_object(oid);
1612 if (!o) {
1613 strbuf_addf(err,
1614 "trying to write ref '%s' with nonexistent object %s",
1615 lock->ref_name, oid_to_hex(oid));
1616 unlock_ref(lock);
1617 return -1;
1618 }
1619 if (o->type != OBJ_COMMIT && is_branch(lock->ref_name)) {
1620 strbuf_addf(err,
1621 "trying to write non-commit object %s to branch '%s'",
1622 oid_to_hex(oid), lock->ref_name);
1623 unlock_ref(lock);
1624 return -1;
1625 }
1626 fd = get_lock_file_fd(lock->lk);
1627 if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
1628 write_in_full(fd, &term, 1) != 1 ||
1629 close_ref(lock) < 0) {
1630 strbuf_addf(err,
1631 "couldn't write '%s'", get_lock_file_path(lock->lk));
1632 unlock_ref(lock);
1633 return -1;
1634 }
1635 return 0;
1636 }
1637
1638 /*
1639 * Commit a change to a loose reference that has already been written
1640 * to the loose reference lockfile. Also update the reflogs if
1641 * necessary, using the specified lockmsg (which can be NULL).
1642 */
1643 static int commit_ref_update(struct files_ref_store *refs,
1644 struct ref_lock *lock,
1645 const struct object_id *oid, const char *logmsg,
1646 struct strbuf *err)
1647 {
1648 files_assert_main_repository(refs, "commit_ref_update");
1649
1650 clear_loose_ref_cache(refs);
1651 if (files_log_ref_write(refs, lock->ref_name,
1652 &lock->old_oid, oid,
1653 logmsg, 0, err)) {
1654 char *old_msg = strbuf_detach(err, NULL);
1655 strbuf_addf(err, "cannot update the ref '%s': %s",
1656 lock->ref_name, old_msg);
1657 free(old_msg);
1658 unlock_ref(lock);
1659 return -1;
1660 }
1661
1662 if (strcmp(lock->ref_name, "HEAD") != 0) {
1663 /*
1664 * Special hack: If a branch is updated directly and HEAD
1665 * points to it (may happen on the remote side of a push
1666 * for example) then logically the HEAD reflog should be
1667 * updated too.
1668 * A generic solution implies reverse symref information,
1669 * but finding all symrefs pointing to the given branch
1670 * would be rather costly for this rare event (the direct
1671 * update of a branch) to be worth it. So let's cheat and
1672 * check with HEAD only which should cover 99% of all usage
1673 * scenarios (even 100% of the default ones).
1674 */
1675 struct object_id head_oid;
1676 int head_flag;
1677 const char *head_ref;
1678
1679 head_ref = refs_resolve_ref_unsafe(&refs->base, "HEAD",
1680 RESOLVE_REF_READING,
1681 head_oid.hash, &head_flag);
1682 if (head_ref && (head_flag & REF_ISSYMREF) &&
1683 !strcmp(head_ref, lock->ref_name)) {
1684 struct strbuf log_err = STRBUF_INIT;
1685 if (files_log_ref_write(refs, "HEAD",
1686 &lock->old_oid, oid,
1687 logmsg, 0, &log_err)) {
1688 error("%s", log_err.buf);
1689 strbuf_release(&log_err);
1690 }
1691 }
1692 }
1693
1694 if (commit_ref(lock)) {
1695 strbuf_addf(err, "couldn't set '%s'", lock->ref_name);
1696 unlock_ref(lock);
1697 return -1;
1698 }
1699
1700 unlock_ref(lock);
1701 return 0;
1702 }
1703
1704 static int create_ref_symlink(struct ref_lock *lock, const char *target)
1705 {
1706 int ret = -1;
1707 #ifndef NO_SYMLINK_HEAD
1708 char *ref_path = get_locked_file_path(lock->lk);
1709 unlink(ref_path);
1710 ret = symlink(target, ref_path);
1711 free(ref_path);
1712
1713 if (ret)
1714 fprintf(stderr, "no symlink - falling back to symbolic ref\n");
1715 #endif
1716 return ret;
1717 }
1718
1719 static void update_symref_reflog(struct files_ref_store *refs,
1720 struct ref_lock *lock, const char *refname,
1721 const char *target, const char *logmsg)
1722 {
1723 struct strbuf err = STRBUF_INIT;
1724 struct object_id new_oid;
1725 if (logmsg &&
1726 !refs_read_ref_full(&refs->base, target,
1727 RESOLVE_REF_READING, new_oid.hash, NULL) &&
1728 files_log_ref_write(refs, refname, &lock->old_oid,
1729 &new_oid, logmsg, 0, &err)) {
1730 error("%s", err.buf);
1731 strbuf_release(&err);
1732 }
1733 }
1734
1735 static int create_symref_locked(struct files_ref_store *refs,
1736 struct ref_lock *lock, const char *refname,
1737 const char *target, const char *logmsg)
1738 {
1739 if (prefer_symlink_refs && !create_ref_symlink(lock, target)) {
1740 update_symref_reflog(refs, lock, refname, target, logmsg);
1741 return 0;
1742 }
1743
1744 if (!fdopen_lock_file(lock->lk, "w"))
1745 return error("unable to fdopen %s: %s",
1746 lock->lk->tempfile.filename.buf, strerror(errno));
1747
1748 update_symref_reflog(refs, lock, refname, target, logmsg);
1749
1750 /* no error check; commit_ref will check ferror */
1751 fprintf(lock->lk->tempfile.fp, "ref: %s\n", target);
1752 if (commit_ref(lock) < 0)
1753 return error("unable to write symref for %s: %s", refname,
1754 strerror(errno));
1755 return 0;
1756 }
1757
1758 static int files_create_symref(struct ref_store *ref_store,
1759 const char *refname, const char *target,
1760 const char *logmsg)
1761 {
1762 struct files_ref_store *refs =
1763 files_downcast(ref_store, REF_STORE_WRITE, "create_symref");
1764 struct strbuf err = STRBUF_INIT;
1765 struct ref_lock *lock;
1766 int ret;
1767
1768 lock = lock_ref_sha1_basic(refs, refname, NULL,
1769 NULL, NULL, REF_NODEREF, NULL,
1770 &err);
1771 if (!lock) {
1772 error("%s", err.buf);
1773 strbuf_release(&err);
1774 return -1;
1775 }
1776
1777 ret = create_symref_locked(refs, lock, refname, target, logmsg);
1778 unlock_ref(lock);
1779 return ret;
1780 }
1781
1782 static int files_reflog_exists(struct ref_store *ref_store,
1783 const char *refname)
1784 {
1785 struct files_ref_store *refs =
1786 files_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1787 struct strbuf sb = STRBUF_INIT;
1788 struct stat st;
1789 int ret;
1790
1791 files_reflog_path(refs, &sb, refname);
1792 ret = !lstat(sb.buf, &st) && S_ISREG(st.st_mode);
1793 strbuf_release(&sb);
1794 return ret;
1795 }
1796
1797 static int files_delete_reflog(struct ref_store *ref_store,
1798 const char *refname)
1799 {
1800 struct files_ref_store *refs =
1801 files_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
1802 struct strbuf sb = STRBUF_INIT;
1803 int ret;
1804
1805 files_reflog_path(refs, &sb, refname);
1806 ret = remove_path(sb.buf);
1807 strbuf_release(&sb);
1808 return ret;
1809 }
1810
1811 static int show_one_reflog_ent(struct strbuf *sb, each_reflog_ent_fn fn, void *cb_data)
1812 {
1813 struct object_id ooid, noid;
1814 char *email_end, *message;
1815 timestamp_t timestamp;
1816 int tz;
1817 const char *p = sb->buf;
1818
1819 /* old SP new SP name <email> SP time TAB msg LF */
1820 if (!sb->len || sb->buf[sb->len - 1] != '\n' ||
1821 parse_oid_hex(p, &ooid, &p) || *p++ != ' ' ||
1822 parse_oid_hex(p, &noid, &p) || *p++ != ' ' ||
1823 !(email_end = strchr(p, '>')) ||
1824 email_end[1] != ' ' ||
1825 !(timestamp = parse_timestamp(email_end + 2, &message, 10)) ||
1826 !message || message[0] != ' ' ||
1827 (message[1] != '+' && message[1] != '-') ||
1828 !isdigit(message[2]) || !isdigit(message[3]) ||
1829 !isdigit(message[4]) || !isdigit(message[5]))
1830 return 0; /* corrupt? */
1831 email_end[1] = '\0';
1832 tz = strtol(message + 1, NULL, 10);
1833 if (message[6] != '\t')
1834 message += 6;
1835 else
1836 message += 7;
1837 return fn(&ooid, &noid, p, timestamp, tz, message, cb_data);
1838 }
1839
1840 static char *find_beginning_of_line(char *bob, char *scan)
1841 {
1842 while (bob < scan && *(--scan) != '\n')
1843 ; /* keep scanning backwards */
1844 /*
1845 * Return either beginning of the buffer, or LF at the end of
1846 * the previous line.
1847 */
1848 return scan;
1849 }
1850
1851 static int files_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1852 const char *refname,
1853 each_reflog_ent_fn fn,
1854 void *cb_data)
1855 {
1856 struct files_ref_store *refs =
1857 files_downcast(ref_store, REF_STORE_READ,
1858 "for_each_reflog_ent_reverse");
1859 struct strbuf sb = STRBUF_INIT;
1860 FILE *logfp;
1861 long pos;
1862 int ret = 0, at_tail = 1;
1863
1864 files_reflog_path(refs, &sb, refname);
1865 logfp = fopen(sb.buf, "r");
1866 strbuf_release(&sb);
1867 if (!logfp)
1868 return -1;
1869
1870 /* Jump to the end */
1871 if (fseek(logfp, 0, SEEK_END) < 0)
1872 ret = error("cannot seek back reflog for %s: %s",
1873 refname, strerror(errno));
1874 pos = ftell(logfp);
1875 while (!ret && 0 < pos) {
1876 int cnt;
1877 size_t nread;
1878 char buf[BUFSIZ];
1879 char *endp, *scanp;
1880
1881 /* Fill next block from the end */
1882 cnt = (sizeof(buf) < pos) ? sizeof(buf) : pos;
1883 if (fseek(logfp, pos - cnt, SEEK_SET)) {
1884 ret = error("cannot seek back reflog for %s: %s",
1885 refname, strerror(errno));
1886 break;
1887 }
1888 nread = fread(buf, cnt, 1, logfp);
1889 if (nread != 1) {
1890 ret = error("cannot read %d bytes from reflog for %s: %s",
1891 cnt, refname, strerror(errno));
1892 break;
1893 }
1894 pos -= cnt;
1895
1896 scanp = endp = buf + cnt;
1897 if (at_tail && scanp[-1] == '\n')
1898 /* Looking at the final LF at the end of the file */
1899 scanp--;
1900 at_tail = 0;
1901
1902 while (buf < scanp) {
1903 /*
1904 * terminating LF of the previous line, or the beginning
1905 * of the buffer.
1906 */
1907 char *bp;
1908
1909 bp = find_beginning_of_line(buf, scanp);
1910
1911 if (*bp == '\n') {
1912 /*
1913 * The newline is the end of the previous line,
1914 * so we know we have complete line starting
1915 * at (bp + 1). Prefix it onto any prior data
1916 * we collected for the line and process it.
1917 */
1918 strbuf_splice(&sb, 0, 0, bp + 1, endp - (bp + 1));
1919 scanp = bp;
1920 endp = bp + 1;
1921 ret = show_one_reflog_ent(&sb, fn, cb_data);
1922 strbuf_reset(&sb);
1923 if (ret)
1924 break;
1925 } else if (!pos) {
1926 /*
1927 * We are at the start of the buffer, and the
1928 * start of the file; there is no previous
1929 * line, and we have everything for this one.
1930 * Process it, and we can end the loop.
1931 */
1932 strbuf_splice(&sb, 0, 0, buf, endp - buf);
1933 ret = show_one_reflog_ent(&sb, fn, cb_data);
1934 strbuf_reset(&sb);
1935 break;
1936 }
1937
1938 if (bp == buf) {
1939 /*
1940 * We are at the start of the buffer, and there
1941 * is more file to read backwards. Which means
1942 * we are in the middle of a line. Note that we
1943 * may get here even if *bp was a newline; that
1944 * just means we are at the exact end of the
1945 * previous line, rather than some spot in the
1946 * middle.
1947 *
1948 * Save away what we have to be combined with
1949 * the data from the next read.
1950 */
1951 strbuf_splice(&sb, 0, 0, buf, endp - buf);
1952 break;
1953 }
1954 }
1955
1956 }
1957 if (!ret && sb.len)
1958 die("BUG: reverse reflog parser had leftover data");
1959
1960 fclose(logfp);
1961 strbuf_release(&sb);
1962 return ret;
1963 }
1964
1965 static int files_for_each_reflog_ent(struct ref_store *ref_store,
1966 const char *refname,
1967 each_reflog_ent_fn fn, void *cb_data)
1968 {
1969 struct files_ref_store *refs =
1970 files_downcast(ref_store, REF_STORE_READ,
1971 "for_each_reflog_ent");
1972 FILE *logfp;
1973 struct strbuf sb = STRBUF_INIT;
1974 int ret = 0;
1975
1976 files_reflog_path(refs, &sb, refname);
1977 logfp = fopen(sb.buf, "r");
1978 strbuf_release(&sb);
1979 if (!logfp)
1980 return -1;
1981
1982 while (!ret && !strbuf_getwholeline(&sb, logfp, '\n'))
1983 ret = show_one_reflog_ent(&sb, fn, cb_data);
1984 fclose(logfp);
1985 strbuf_release(&sb);
1986 return ret;
1987 }
1988
1989 struct files_reflog_iterator {
1990 struct ref_iterator base;
1991
1992 struct ref_store *ref_store;
1993 struct dir_iterator *dir_iterator;
1994 struct object_id oid;
1995 };
1996
1997 static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator)
1998 {
1999 struct files_reflog_iterator *iter =
2000 (struct files_reflog_iterator *)ref_iterator;
2001 struct dir_iterator *diter = iter->dir_iterator;
2002 int ok;
2003
2004 while ((ok = dir_iterator_advance(diter)) == ITER_OK) {
2005 int flags;
2006
2007 if (!S_ISREG(diter->st.st_mode))
2008 continue;
2009 if (diter->basename[0] == '.')
2010 continue;
2011 if (ends_with(diter->basename, ".lock"))
2012 continue;
2013
2014 if (refs_read_ref_full(iter->ref_store,
2015 diter->relative_path, 0,
2016 iter->oid.hash, &flags)) {
2017 error("bad ref for %s", diter->path.buf);
2018 continue;
2019 }
2020
2021 iter->base.refname = diter->relative_path;
2022 iter->base.oid = &iter->oid;
2023 iter->base.flags = flags;
2024 return ITER_OK;
2025 }
2026
2027 iter->dir_iterator = NULL;
2028 if (ref_iterator_abort(ref_iterator) == ITER_ERROR)
2029 ok = ITER_ERROR;
2030 return ok;
2031 }
2032
2033 static int files_reflog_iterator_peel(struct ref_iterator *ref_iterator,
2034 struct object_id *peeled)
2035 {
2036 die("BUG: ref_iterator_peel() called for reflog_iterator");
2037 }
2038
2039 static int files_reflog_iterator_abort(struct ref_iterator *ref_iterator)
2040 {
2041 struct files_reflog_iterator *iter =
2042 (struct files_reflog_iterator *)ref_iterator;
2043 int ok = ITER_DONE;
2044
2045 if (iter->dir_iterator)
2046 ok = dir_iterator_abort(iter->dir_iterator);
2047
2048 base_ref_iterator_free(ref_iterator);
2049 return ok;
2050 }
2051
2052 static struct ref_iterator_vtable files_reflog_iterator_vtable = {
2053 files_reflog_iterator_advance,
2054 files_reflog_iterator_peel,
2055 files_reflog_iterator_abort
2056 };
2057
2058 static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)
2059 {
2060 struct files_ref_store *refs =
2061 files_downcast(ref_store, REF_STORE_READ,
2062 "reflog_iterator_begin");
2063 struct files_reflog_iterator *iter = xcalloc(1, sizeof(*iter));
2064 struct ref_iterator *ref_iterator = &iter->base;
2065 struct strbuf sb = STRBUF_INIT;
2066
2067 base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);
2068 files_reflog_path(refs, &sb, NULL);
2069 iter->dir_iterator = dir_iterator_begin(sb.buf);
2070 iter->ref_store = ref_store;
2071 strbuf_release(&sb);
2072 return ref_iterator;
2073 }
2074
2075 /*
2076 * If update is a direct update of head_ref (the reference pointed to
2077 * by HEAD), then add an extra REF_LOG_ONLY update for HEAD.
2078 */
2079 static int split_head_update(struct ref_update *update,
2080 struct ref_transaction *transaction,
2081 const char *head_ref,
2082 struct string_list *affected_refnames,
2083 struct strbuf *err)
2084 {
2085 struct string_list_item *item;
2086 struct ref_update *new_update;
2087
2088 if ((update->flags & REF_LOG_ONLY) ||
2089 (update->flags & REF_ISPRUNING) ||
2090 (update->flags & REF_UPDATE_VIA_HEAD))
2091 return 0;
2092
2093 if (strcmp(update->refname, head_ref))
2094 return 0;
2095
2096 /*
2097 * First make sure that HEAD is not already in the
2098 * transaction. This insertion is O(N) in the transaction
2099 * size, but it happens at most once per transaction.
2100 */
2101 item = string_list_insert(affected_refnames, "HEAD");
2102 if (item->util) {
2103 /* An entry already existed */
2104 strbuf_addf(err,
2105 "multiple updates for 'HEAD' (including one "
2106 "via its referent '%s') are not allowed",
2107 update->refname);
2108 return TRANSACTION_NAME_CONFLICT;
2109 }
2110
2111 new_update = ref_transaction_add_update(
2112 transaction, "HEAD",
2113 update->flags | REF_LOG_ONLY | REF_NODEREF,
2114 update->new_oid.hash, update->old_oid.hash,
2115 update->msg);
2116
2117 item->util = new_update;
2118
2119 return 0;
2120 }
2121
2122 /*
2123 * update is for a symref that points at referent and doesn't have
2124 * REF_NODEREF set. Split it into two updates:
2125 * - The original update, but with REF_LOG_ONLY and REF_NODEREF set
2126 * - A new, separate update for the referent reference
2127 * Note that the new update will itself be subject to splitting when
2128 * the iteration gets to it.
2129 */
2130 static int split_symref_update(struct files_ref_store *refs,
2131 struct ref_update *update,
2132 const char *referent,
2133 struct ref_transaction *transaction,
2134 struct string_list *affected_refnames,
2135 struct strbuf *err)
2136 {
2137 struct string_list_item *item;
2138 struct ref_update *new_update;
2139 unsigned int new_flags;
2140
2141 /*
2142 * First make sure that referent is not already in the
2143 * transaction. This insertion is O(N) in the transaction
2144 * size, but it happens at most once per symref in a
2145 * transaction.
2146 */
2147 item = string_list_insert(affected_refnames, referent);
2148 if (item->util) {
2149 /* An entry already existed */
2150 strbuf_addf(err,
2151 "multiple updates for '%s' (including one "
2152 "via symref '%s') are not allowed",
2153 referent, update->refname);
2154 return TRANSACTION_NAME_CONFLICT;
2155 }
2156
2157 new_flags = update->flags;
2158 if (!strcmp(update->refname, "HEAD")) {
2159 /*
2160 * Record that the new update came via HEAD, so that
2161 * when we process it, split_head_update() doesn't try
2162 * to add another reflog update for HEAD. Note that
2163 * this bit will be propagated if the new_update
2164 * itself needs to be split.
2165 */
2166 new_flags |= REF_UPDATE_VIA_HEAD;
2167 }
2168
2169 new_update = ref_transaction_add_update(
2170 transaction, referent, new_flags,
2171 update->new_oid.hash, update->old_oid.hash,
2172 update->msg);
2173
2174 new_update->parent_update = update;
2175
2176 /*
2177 * Change the symbolic ref update to log only. Also, it
2178 * doesn't need to check its old SHA-1 value, as that will be
2179 * done when new_update is processed.
2180 */
2181 update->flags |= REF_LOG_ONLY | REF_NODEREF;
2182 update->flags &= ~REF_HAVE_OLD;
2183
2184 item->util = new_update;
2185
2186 return 0;
2187 }
2188
2189 /*
2190 * Return the refname under which update was originally requested.
2191 */
2192 static const char *original_update_refname(struct ref_update *update)
2193 {
2194 while (update->parent_update)
2195 update = update->parent_update;
2196
2197 return update->refname;
2198 }
2199
2200 /*
2201 * Check whether the REF_HAVE_OLD and old_oid values stored in update
2202 * are consistent with oid, which is the reference's current value. If
2203 * everything is OK, return 0; otherwise, write an error message to
2204 * err and return -1.
2205 */
2206 static int check_old_oid(struct ref_update *update, struct object_id *oid,
2207 struct strbuf *err)
2208 {
2209 if (!(update->flags & REF_HAVE_OLD) ||
2210 !oidcmp(oid, &update->old_oid))
2211 return 0;
2212
2213 if (is_null_oid(&update->old_oid))
2214 strbuf_addf(err, "cannot lock ref '%s': "
2215 "reference already exists",
2216 original_update_refname(update));
2217 else if (is_null_oid(oid))
2218 strbuf_addf(err, "cannot lock ref '%s': "
2219 "reference is missing but expected %s",
2220 original_update_refname(update),
2221 oid_to_hex(&update->old_oid));
2222 else
2223 strbuf_addf(err, "cannot lock ref '%s': "
2224 "is at %s but expected %s",
2225 original_update_refname(update),
2226 oid_to_hex(oid),
2227 oid_to_hex(&update->old_oid));
2228
2229 return -1;
2230 }
2231
2232 /*
2233 * Prepare for carrying out update:
2234 * - Lock the reference referred to by update.
2235 * - Read the reference under lock.
2236 * - Check that its old SHA-1 value (if specified) is correct, and in
2237 * any case record it in update->lock->old_oid for later use when
2238 * writing the reflog.
2239 * - If it is a symref update without REF_NODEREF, split it up into a
2240 * REF_LOG_ONLY update of the symref and add a separate update for
2241 * the referent to transaction.
2242 * - If it is an update of head_ref, add a corresponding REF_LOG_ONLY
2243 * update of HEAD.
2244 */
2245 static int lock_ref_for_update(struct files_ref_store *refs,
2246 struct ref_update *update,
2247 struct ref_transaction *transaction,
2248 const char *head_ref,
2249 struct string_list *affected_refnames,
2250 struct strbuf *err)
2251 {
2252 struct strbuf referent = STRBUF_INIT;
2253 int mustexist = (update->flags & REF_HAVE_OLD) &&
2254 !is_null_oid(&update->old_oid);
2255 int ret;
2256 struct ref_lock *lock;
2257
2258 files_assert_main_repository(refs, "lock_ref_for_update");
2259
2260 if ((update->flags & REF_HAVE_NEW) && is_null_oid(&update->new_oid))
2261 update->flags |= REF_DELETING;
2262
2263 if (head_ref) {
2264 ret = split_head_update(update, transaction, head_ref,
2265 affected_refnames, err);
2266 if (ret)
2267 return ret;
2268 }
2269
2270 ret = lock_raw_ref(refs, update->refname, mustexist,
2271 affected_refnames, NULL,
2272 &lock, &referent,
2273 &update->type, err);
2274 if (ret) {
2275 char *reason;
2276
2277 reason = strbuf_detach(err, NULL);
2278 strbuf_addf(err, "cannot lock ref '%s': %s",
2279 original_update_refname(update), reason);
2280 free(reason);
2281 return ret;
2282 }
2283
2284 update->backend_data = lock;
2285
2286 if (update->type & REF_ISSYMREF) {
2287 if (update->flags & REF_NODEREF) {
2288 /*
2289 * We won't be reading the referent as part of
2290 * the transaction, so we have to read it here
2291 * to record and possibly check old_sha1:
2292 */
2293 if (refs_read_ref_full(&refs->base,
2294 referent.buf, 0,
2295 lock->old_oid.hash, NULL)) {
2296 if (update->flags & REF_HAVE_OLD) {
2297 strbuf_addf(err, "cannot lock ref '%s': "
2298 "error reading reference",
2299 original_update_refname(update));
2300 return -1;
2301 }
2302 } else if (check_old_oid(update, &lock->old_oid, err)) {
2303 return TRANSACTION_GENERIC_ERROR;
2304 }
2305 } else {
2306 /*
2307 * Create a new update for the reference this
2308 * symref is pointing at. Also, we will record
2309 * and verify old_sha1 for this update as part
2310 * of processing the split-off update, so we
2311 * don't have to do it here.
2312 */
2313 ret = split_symref_update(refs, update,
2314 referent.buf, transaction,
2315 affected_refnames, err);
2316 if (ret)
2317 return ret;
2318 }
2319 } else {
2320 struct ref_update *parent_update;
2321
2322 if (check_old_oid(update, &lock->old_oid, err))
2323 return TRANSACTION_GENERIC_ERROR;
2324
2325 /*
2326 * If this update is happening indirectly because of a
2327 * symref update, record the old SHA-1 in the parent
2328 * update:
2329 */
2330 for (parent_update = update->parent_update;
2331 parent_update;
2332 parent_update = parent_update->parent_update) {
2333 struct ref_lock *parent_lock = parent_update->backend_data;
2334 oidcpy(&parent_lock->old_oid, &lock->old_oid);
2335 }
2336 }
2337
2338 if ((update->flags & REF_HAVE_NEW) &&
2339 !(update->flags & REF_DELETING) &&
2340 !(update->flags & REF_LOG_ONLY)) {
2341 if (!(update->type & REF_ISSYMREF) &&
2342 !oidcmp(&lock->old_oid, &update->new_oid)) {
2343 /*
2344 * The reference already has the desired
2345 * value, so we don't need to write it.
2346 */
2347 } else if (write_ref_to_lockfile(lock, &update->new_oid,
2348 err)) {
2349 char *write_err = strbuf_detach(err, NULL);
2350
2351 /*
2352 * The lock was freed upon failure of
2353 * write_ref_to_lockfile():
2354 */
2355 update->backend_data = NULL;
2356 strbuf_addf(err,
2357 "cannot update ref '%s': %s",
2358 update->refname, write_err);
2359 free(write_err);
2360 return TRANSACTION_GENERIC_ERROR;
2361 } else {
2362 update->flags |= REF_NEEDS_COMMIT;
2363 }
2364 }
2365 if (!(update->flags & REF_NEEDS_COMMIT)) {
2366 /*
2367 * We didn't call write_ref_to_lockfile(), so
2368 * the lockfile is still open. Close it to
2369 * free up the file descriptor:
2370 */
2371 if (close_ref(lock)) {
2372 strbuf_addf(err, "couldn't close '%s.lock'",
2373 update->refname);
2374 return TRANSACTION_GENERIC_ERROR;
2375 }
2376 }
2377 return 0;
2378 }
2379
2380 /*
2381 * Unlock any references in `transaction` that are still locked, and
2382 * mark the transaction closed.
2383 */
2384 static void files_transaction_cleanup(struct ref_transaction *transaction)
2385 {
2386 size_t i;
2387
2388 for (i = 0; i < transaction->nr; i++) {
2389 struct ref_update *update = transaction->updates[i];
2390 struct ref_lock *lock = update->backend_data;
2391
2392 if (lock) {
2393 unlock_ref(lock);
2394 update->backend_data = NULL;
2395 }
2396 }
2397
2398 transaction->state = REF_TRANSACTION_CLOSED;
2399 }
2400
2401 static int files_transaction_prepare(struct ref_store *ref_store,
2402 struct ref_transaction *transaction,
2403 struct strbuf *err)
2404 {
2405 struct files_ref_store *refs =
2406 files_downcast(ref_store, REF_STORE_WRITE,
2407 "ref_transaction_prepare");
2408 size_t i;
2409 int ret = 0;
2410 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
2411 char *head_ref = NULL;
2412 int head_type;
2413 struct object_id head_oid;
2414
2415 assert(err);
2416
2417 if (!transaction->nr)
2418 goto cleanup;
2419
2420 /*
2421 * Fail if a refname appears more than once in the
2422 * transaction. (If we end up splitting up any updates using
2423 * split_symref_update() or split_head_update(), those
2424 * functions will check that the new updates don't have the
2425 * same refname as any existing ones.)
2426 */
2427 for (i = 0; i < transaction->nr; i++) {
2428 struct ref_update *update = transaction->updates[i];
2429 struct string_list_item *item =
2430 string_list_append(&affected_refnames, update->refname);
2431
2432 /*
2433 * We store a pointer to update in item->util, but at
2434 * the moment we never use the value of this field
2435 * except to check whether it is non-NULL.
2436 */
2437 item->util = update;
2438 }
2439 string_list_sort(&affected_refnames);
2440 if (ref_update_reject_duplicates(&affected_refnames, err)) {
2441 ret = TRANSACTION_GENERIC_ERROR;
2442 goto cleanup;
2443 }
2444
2445 /*
2446 * Special hack: If a branch is updated directly and HEAD
2447 * points to it (may happen on the remote side of a push
2448 * for example) then logically the HEAD reflog should be
2449 * updated too.
2450 *
2451 * A generic solution would require reverse symref lookups,
2452 * but finding all symrefs pointing to a given branch would be
2453 * rather costly for this rare event (the direct update of a
2454 * branch) to be worth it. So let's cheat and check with HEAD
2455 * only, which should cover 99% of all usage scenarios (even
2456 * 100% of the default ones).
2457 *
2458 * So if HEAD is a symbolic reference, then record the name of
2459 * the reference that it points to. If we see an update of
2460 * head_ref within the transaction, then split_head_update()
2461 * arranges for the reflog of HEAD to be updated, too.
2462 */
2463 head_ref = refs_resolve_refdup(ref_store, "HEAD",
2464 RESOLVE_REF_NO_RECURSE,
2465 head_oid.hash, &head_type);
2466
2467 if (head_ref && !(head_type & REF_ISSYMREF)) {
2468 FREE_AND_NULL(head_ref);
2469 }
2470
2471 /*
2472 * Acquire all locks, verify old values if provided, check
2473 * that new values are valid, and write new values to the
2474 * lockfiles, ready to be activated. Only keep one lockfile
2475 * open at a time to avoid running out of file descriptors.
2476 * Note that lock_ref_for_update() might append more updates
2477 * to the transaction.
2478 */
2479 for (i = 0; i < transaction->nr; i++) {
2480 struct ref_update *update = transaction->updates[i];
2481
2482 ret = lock_ref_for_update(refs, update, transaction,
2483 head_ref, &affected_refnames, err);
2484 if (ret)
2485 break;
2486 }
2487
2488 cleanup:
2489 free(head_ref);
2490 string_list_clear(&affected_refnames, 0);
2491
2492 if (ret)
2493 files_transaction_cleanup(transaction);
2494 else
2495 transaction->state = REF_TRANSACTION_PREPARED;
2496
2497 return ret;
2498 }
2499
2500 static int files_transaction_finish(struct ref_store *ref_store,
2501 struct ref_transaction *transaction,
2502 struct strbuf *err)
2503 {
2504 struct files_ref_store *refs =
2505 files_downcast(ref_store, 0, "ref_transaction_finish");
2506 size_t i;
2507 int ret = 0;
2508 struct string_list refs_to_delete = STRING_LIST_INIT_NODUP;
2509 struct string_list_item *ref_to_delete;
2510 struct strbuf sb = STRBUF_INIT;
2511
2512 assert(err);
2513
2514 if (!transaction->nr) {
2515 transaction->state = REF_TRANSACTION_CLOSED;
2516 return 0;
2517 }
2518
2519 /* Perform updates first so live commits remain referenced */
2520 for (i = 0; i < transaction->nr; i++) {
2521 struct ref_update *update = transaction->updates[i];
2522 struct ref_lock *lock = update->backend_data;
2523
2524 if (update->flags & REF_NEEDS_COMMIT ||
2525 update->flags & REF_LOG_ONLY) {
2526 if (files_log_ref_write(refs,
2527 lock->ref_name,
2528 &lock->old_oid,
2529 &update->new_oid,
2530 update->msg, update->flags,
2531 err)) {
2532 char *old_msg = strbuf_detach(err, NULL);
2533
2534 strbuf_addf(err, "cannot update the ref '%s': %s",
2535 lock->ref_name, old_msg);
2536 free(old_msg);
2537 unlock_ref(lock);
2538 update->backend_data = NULL;
2539 ret = TRANSACTION_GENERIC_ERROR;
2540 goto cleanup;
2541 }
2542 }
2543 if (update->flags & REF_NEEDS_COMMIT) {
2544 clear_loose_ref_cache(refs);
2545 if (commit_ref(lock)) {
2546 strbuf_addf(err, "couldn't set '%s'", lock->ref_name);
2547 unlock_ref(lock);
2548 update->backend_data = NULL;
2549 ret = TRANSACTION_GENERIC_ERROR;
2550 goto cleanup;
2551 }
2552 }
2553 }
2554 /* Perform deletes now that updates are safely completed */
2555 for (i = 0; i < transaction->nr; i++) {
2556 struct ref_update *update = transaction->updates[i];
2557 struct ref_lock *lock = update->backend_data;
2558
2559 if (update->flags & REF_DELETING &&
2560 !(update->flags & REF_LOG_ONLY)) {
2561 if (!(update->type & REF_ISPACKED) ||
2562 update->type & REF_ISSYMREF) {
2563 /* It is a loose reference. */
2564 strbuf_reset(&sb);
2565 files_ref_path(refs, &sb, lock->ref_name);
2566 if (unlink_or_msg(sb.buf, err)) {
2567 ret = TRANSACTION_GENERIC_ERROR;
2568 goto cleanup;
2569 }
2570 update->flags |= REF_DELETED_LOOSE;
2571 }
2572
2573 if (!(update->flags & REF_ISPRUNING))
2574 string_list_append(&refs_to_delete,
2575 lock->ref_name);
2576 }
2577 }
2578
2579 if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
2580 ret = TRANSACTION_GENERIC_ERROR;
2581 goto cleanup;
2582 }
2583
2584 if (repack_without_refs(refs->packed_ref_store, &refs_to_delete, err)) {
2585 ret = TRANSACTION_GENERIC_ERROR;
2586 packed_refs_unlock(refs->packed_ref_store);
2587 goto cleanup;
2588 }
2589
2590 packed_refs_unlock(refs->packed_ref_store);
2591
2592 /* Delete the reflogs of any references that were deleted: */
2593 for_each_string_list_item(ref_to_delete, &refs_to_delete) {
2594 strbuf_reset(&sb);
2595 files_reflog_path(refs, &sb, ref_to_delete->string);
2596 if (!unlink_or_warn(sb.buf))
2597 try_remove_empty_parents(refs, ref_to_delete->string,
2598 REMOVE_EMPTY_PARENTS_REFLOG);
2599 }
2600
2601 clear_loose_ref_cache(refs);
2602
2603 cleanup:
2604 files_transaction_cleanup(transaction);
2605
2606 for (i = 0; i < transaction->nr; i++) {
2607 struct ref_update *update = transaction->updates[i];
2608
2609 if (update->flags & REF_DELETED_LOOSE) {
2610 /*
2611 * The loose reference was deleted. Delete any
2612 * empty parent directories. (Note that this
2613 * can only work because we have already
2614 * removed the lockfile.)
2615 */
2616 try_remove_empty_parents(refs, update->refname,
2617 REMOVE_EMPTY_PARENTS_REF);
2618 }
2619 }
2620
2621 strbuf_release(&sb);
2622 string_list_clear(&refs_to_delete, 0);
2623 return ret;
2624 }
2625
2626 static int files_transaction_abort(struct ref_store *ref_store,
2627 struct ref_transaction *transaction,
2628 struct strbuf *err)
2629 {
2630 files_transaction_cleanup(transaction);
2631 return 0;
2632 }
2633
2634 static int ref_present(const char *refname,
2635 const struct object_id *oid, int flags, void *cb_data)
2636 {
2637 struct string_list *affected_refnames = cb_data;
2638
2639 return string_list_has_string(affected_refnames, refname);
2640 }
2641
2642 static int files_initial_transaction_commit(struct ref_store *ref_store,
2643 struct ref_transaction *transaction,
2644 struct strbuf *err)
2645 {
2646 struct files_ref_store *refs =
2647 files_downcast(ref_store, REF_STORE_WRITE,
2648 "initial_ref_transaction_commit");
2649 size_t i;
2650 int ret = 0;
2651 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
2652
2653 assert(err);
2654
2655 if (transaction->state != REF_TRANSACTION_OPEN)
2656 die("BUG: commit called for transaction that is not open");
2657
2658 /* Fail if a refname appears more than once in the transaction: */
2659 for (i = 0; i < transaction->nr; i++)
2660 string_list_append(&affected_refnames,
2661 transaction->updates[i]->refname);
2662 string_list_sort(&affected_refnames);
2663 if (ref_update_reject_duplicates(&affected_refnames, err)) {
2664 ret = TRANSACTION_GENERIC_ERROR;
2665 goto cleanup;
2666 }
2667
2668 /*
2669 * It's really undefined to call this function in an active
2670 * repository or when there are existing references: we are
2671 * only locking and changing packed-refs, so (1) any
2672 * simultaneous processes might try to change a reference at
2673 * the same time we do, and (2) any existing loose versions of
2674 * the references that we are setting would have precedence
2675 * over our values. But some remote helpers create the remote
2676 * "HEAD" and "master" branches before calling this function,
2677 * so here we really only check that none of the references
2678 * that we are creating already exists.
2679 */
2680 if (refs_for_each_rawref(&refs->base, ref_present,
2681 &affected_refnames))
2682 die("BUG: initial ref transaction called with existing refs");
2683
2684 for (i = 0; i < transaction->nr; i++) {
2685 struct ref_update *update = transaction->updates[i];
2686
2687 if ((update->flags & REF_HAVE_OLD) &&
2688 !is_null_oid(&update->old_oid))
2689 die("BUG: initial ref transaction with old_sha1 set");
2690 if (refs_verify_refname_available(&refs->base, update->refname,
2691 &affected_refnames, NULL,
2692 err)) {
2693 ret = TRANSACTION_NAME_CONFLICT;
2694 goto cleanup;
2695 }
2696 }
2697
2698 if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
2699 ret = TRANSACTION_GENERIC_ERROR;
2700 goto cleanup;
2701 }
2702
2703 for (i = 0; i < transaction->nr; i++) {
2704 struct ref_update *update = transaction->updates[i];
2705
2706 if ((update->flags & REF_HAVE_NEW) &&
2707 !is_null_oid(&update->new_oid))
2708 add_packed_ref(refs->packed_ref_store, update->refname,
2709 &update->new_oid);
2710 }
2711
2712 if (commit_packed_refs(refs->packed_ref_store, err)) {
2713 ret = TRANSACTION_GENERIC_ERROR;
2714 goto cleanup;
2715 }
2716
2717 cleanup:
2718 packed_refs_unlock(refs->packed_ref_store);
2719 transaction->state = REF_TRANSACTION_CLOSED;
2720 string_list_clear(&affected_refnames, 0);
2721 return ret;
2722 }
2723
2724 struct expire_reflog_cb {
2725 unsigned int flags;
2726 reflog_expiry_should_prune_fn *should_prune_fn;
2727 void *policy_cb;
2728 FILE *newlog;
2729 struct object_id last_kept_oid;
2730 };
2731
2732 static int expire_reflog_ent(struct object_id *ooid, struct object_id *noid,
2733 const char *email, timestamp_t timestamp, int tz,
2734 const char *message, void *cb_data)
2735 {
2736 struct expire_reflog_cb *cb = cb_data;
2737 struct expire_reflog_policy_cb *policy_cb = cb->policy_cb;
2738
2739 if (cb->flags & EXPIRE_REFLOGS_REWRITE)
2740 ooid = &cb->last_kept_oid;
2741
2742 if ((*cb->should_prune_fn)(ooid, noid, email, timestamp, tz,
2743 message, policy_cb)) {
2744 if (!cb->newlog)
2745 printf("would prune %s", message);
2746 else if (cb->flags & EXPIRE_REFLOGS_VERBOSE)
2747 printf("prune %s", message);
2748 } else {
2749 if (cb->newlog) {
2750 fprintf(cb->newlog, "%s %s %s %"PRItime" %+05d\t%s",
2751 oid_to_hex(ooid), oid_to_hex(noid),
2752 email, timestamp, tz, message);
2753 oidcpy(&cb->last_kept_oid, noid);
2754 }
2755 if (cb->flags & EXPIRE_REFLOGS_VERBOSE)
2756 printf("keep %s", message);
2757 }
2758 return 0;
2759 }
2760
2761 static int files_reflog_expire(struct ref_store *ref_store,
2762 const char *refname, const unsigned char *sha1,
2763 unsigned int flags,
2764 reflog_expiry_prepare_fn prepare_fn,
2765 reflog_expiry_should_prune_fn should_prune_fn,
2766 reflog_expiry_cleanup_fn cleanup_fn,
2767 void *policy_cb_data)
2768 {
2769 struct files_ref_store *refs =
2770 files_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2771 static struct lock_file reflog_lock;
2772 struct expire_reflog_cb cb;
2773 struct ref_lock *lock;
2774 struct strbuf log_file_sb = STRBUF_INIT;
2775 char *log_file;
2776 int status = 0;
2777 int type;
2778 struct strbuf err = STRBUF_INIT;
2779 struct object_id oid;
2780
2781 memset(&cb, 0, sizeof(cb));
2782 cb.flags = flags;
2783 cb.policy_cb = policy_cb_data;
2784 cb.should_prune_fn = should_prune_fn;
2785
2786 /*
2787 * The reflog file is locked by holding the lock on the
2788 * reference itself, plus we might need to update the
2789 * reference if --updateref was specified:
2790 */
2791 lock = lock_ref_sha1_basic(refs, refname, sha1,
2792 NULL, NULL, REF_NODEREF,
2793 &type, &err);
2794 if (!lock) {
2795 error("cannot lock ref '%s': %s", refname, err.buf);
2796 strbuf_release(&err);
2797 return -1;
2798 }
2799 if (!refs_reflog_exists(ref_store, refname)) {
2800 unlock_ref(lock);
2801 return 0;
2802 }
2803
2804 files_reflog_path(refs, &log_file_sb, refname);
2805 log_file = strbuf_detach(&log_file_sb, NULL);
2806 if (!(flags & EXPIRE_REFLOGS_DRY_RUN)) {
2807 /*
2808 * Even though holding $GIT_DIR/logs/$reflog.lock has
2809 * no locking implications, we use the lock_file
2810 * machinery here anyway because it does a lot of the
2811 * work we need, including cleaning up if the program
2812 * exits unexpectedly.
2813 */
2814 if (hold_lock_file_for_update(&reflog_lock, log_file, 0) < 0) {
2815 struct strbuf err = STRBUF_INIT;
2816 unable_to_lock_message(log_file, errno, &err);
2817 error("%s", err.buf);
2818 strbuf_release(&err);
2819 goto failure;
2820 }
2821 cb.newlog = fdopen_lock_file(&reflog_lock, "w");
2822 if (!cb.newlog) {
2823 error("cannot fdopen %s (%s)",
2824 get_lock_file_path(&reflog_lock), strerror(errno));
2825 goto failure;
2826 }
2827 }
2828
2829 hashcpy(oid.hash, sha1);
2830
2831 (*prepare_fn)(refname, &oid, cb.policy_cb);
2832 refs_for_each_reflog_ent(ref_store, refname, expire_reflog_ent, &cb);
2833 (*cleanup_fn)(cb.policy_cb);
2834
2835 if (!(flags & EXPIRE_REFLOGS_DRY_RUN)) {
2836 /*
2837 * It doesn't make sense to adjust a reference pointed
2838 * to by a symbolic ref based on expiring entries in
2839 * the symbolic reference's reflog. Nor can we update
2840 * a reference if there are no remaining reflog
2841 * entries.
2842 */
2843 int update = (flags & EXPIRE_REFLOGS_UPDATE_REF) &&
2844 !(type & REF_ISSYMREF) &&
2845 !is_null_oid(&cb.last_kept_oid);
2846
2847 if (close_lock_file(&reflog_lock)) {
2848 status |= error("couldn't write %s: %s", log_file,
2849 strerror(errno));
2850 } else if (update &&
2851 (write_in_full(get_lock_file_fd(lock->lk),
2852 oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
2853 write_str_in_full(get_lock_file_fd(lock->lk), "\n") != 1 ||
2854 close_ref(lock) < 0)) {
2855 status |= error("couldn't write %s",
2856 get_lock_file_path(lock->lk));
2857 rollback_lock_file(&reflog_lock);
2858 } else if (commit_lock_file(&reflog_lock)) {
2859 status |= error("unable to write reflog '%s' (%s)",
2860 log_file, strerror(errno));
2861 } else if (update && commit_ref(lock)) {
2862 status |= error("couldn't set %s", lock->ref_name);
2863 }
2864 }
2865 free(log_file);
2866 unlock_ref(lock);
2867 return status;
2868
2869 failure:
2870 rollback_lock_file(&reflog_lock);
2871 free(log_file);
2872 unlock_ref(lock);
2873 return -1;
2874 }
2875
2876 static int files_init_db(struct ref_store *ref_store, struct strbuf *err)
2877 {
2878 struct files_ref_store *refs =
2879 files_downcast(ref_store, REF_STORE_WRITE, "init_db");
2880 struct strbuf sb = STRBUF_INIT;
2881
2882 /*
2883 * Create .git/refs/{heads,tags}
2884 */
2885 files_ref_path(refs, &sb, "refs/heads");
2886 safe_create_dir(sb.buf, 1);
2887
2888 strbuf_reset(&sb);
2889 files_ref_path(refs, &sb, "refs/tags");
2890 safe_create_dir(sb.buf, 1);
2891
2892 strbuf_release(&sb);
2893 return 0;
2894 }
2895
2896 struct ref_storage_be refs_be_files = {
2897 NULL,
2898 "files",
2899 files_ref_store_create,
2900 files_init_db,
2901 files_transaction_prepare,
2902 files_transaction_finish,
2903 files_transaction_abort,
2904 files_initial_transaction_commit,
2905
2906 files_pack_refs,
2907 files_peel_ref,
2908 files_create_symref,
2909 files_delete_refs,
2910 files_rename_ref,
2911
2912 files_ref_iterator_begin,
2913 files_read_raw_ref,
2914
2915 files_reflog_iterator_begin,
2916 files_for_each_reflog_ent,
2917 files_for_each_reflog_ent_reverse,
2918 files_reflog_exists,
2919 files_create_reflog,
2920 files_delete_reflog,
2921 files_reflog_expire
2922 };