]> git.ipfire.org Git - thirdparty/git.git/blame_incremental - refs/files-backend.c
ref_iterator: keep track of whether the iterator output is ordered
[thirdparty/git.git] / refs / files-backend.c
... / ...
CommitLineData
1#include "../cache.h"
2#include "../config.h"
3#include "../refs.h"
4#include "refs-internal.h"
5#include "ref-cache.h"
6#include "packed-backend.h"
7#include "../iterator.h"
8#include "../dir-iterator.h"
9#include "../lockfile.h"
10#include "../object.h"
11#include "../dir.h"
12
13struct ref_lock {
14 char *ref_name;
15 struct lock_file *lk;
16 struct object_id old_oid;
17};
18
19/*
20 * Future: need to be in "struct repository"
21 * when doing a full libification.
22 */
23struct files_ref_store {
24 struct ref_store base;
25 unsigned int store_flags;
26
27 char *gitdir;
28 char *gitcommondir;
29
30 struct ref_cache *loose;
31
32 struct ref_store *packed_ref_store;
33};
34
35static void clear_loose_ref_cache(struct files_ref_store *refs)
36{
37 if (refs->loose) {
38 free_ref_cache(refs->loose);
39 refs->loose = NULL;
40 }
41}
42
43/*
44 * Create a new submodule ref cache and add it to the internal
45 * set of caches.
46 */
47static struct ref_store *files_ref_store_create(const char *gitdir,
48 unsigned int flags)
49{
50 struct files_ref_store *refs = xcalloc(1, sizeof(*refs));
51 struct ref_store *ref_store = (struct ref_store *)refs;
52 struct strbuf sb = STRBUF_INIT;
53
54 base_ref_store_init(ref_store, &refs_be_files);
55 refs->store_flags = flags;
56
57 refs->gitdir = xstrdup(gitdir);
58 get_common_dir_noenv(&sb, gitdir);
59 refs->gitcommondir = strbuf_detach(&sb, NULL);
60 strbuf_addf(&sb, "%s/packed-refs", refs->gitcommondir);
61 refs->packed_ref_store = packed_ref_store_create(sb.buf, flags);
62 strbuf_release(&sb);
63
64 return ref_store;
65}
66
67/*
68 * Die if refs is not the main ref store. caller is used in any
69 * necessary error messages.
70 */
71static void files_assert_main_repository(struct files_ref_store *refs,
72 const char *caller)
73{
74 if (refs->store_flags & REF_STORE_MAIN)
75 return;
76
77 die("BUG: operation %s only allowed for main ref store", caller);
78}
79
80/*
81 * Downcast ref_store to files_ref_store. Die if ref_store is not a
82 * files_ref_store. required_flags is compared with ref_store's
83 * store_flags to ensure the ref_store has all required capabilities.
84 * "caller" is used in any necessary error messages.
85 */
86static struct files_ref_store *files_downcast(struct ref_store *ref_store,
87 unsigned int required_flags,
88 const char *caller)
89{
90 struct files_ref_store *refs;
91
92 if (ref_store->be != &refs_be_files)
93 die("BUG: ref_store is type \"%s\" not \"files\" in %s",
94 ref_store->be->name, caller);
95
96 refs = (struct files_ref_store *)ref_store;
97
98 if ((refs->store_flags & required_flags) != required_flags)
99 die("BUG: operation %s requires abilities 0x%x, but only have 0x%x",
100 caller, required_flags, refs->store_flags);
101
102 return refs;
103}
104
105static void files_reflog_path(struct files_ref_store *refs,
106 struct strbuf *sb,
107 const char *refname)
108{
109 if (!refname) {
110 /*
111 * FIXME: of course this is wrong in multi worktree
112 * setting. To be fixed real soon.
113 */
114 strbuf_addf(sb, "%s/logs", refs->gitcommondir);
115 return;
116 }
117
118 switch (ref_type(refname)) {
119 case REF_TYPE_PER_WORKTREE:
120 case REF_TYPE_PSEUDOREF:
121 strbuf_addf(sb, "%s/logs/%s", refs->gitdir, refname);
122 break;
123 case REF_TYPE_NORMAL:
124 strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir, refname);
125 break;
126 default:
127 die("BUG: unknown ref type %d of ref %s",
128 ref_type(refname), refname);
129 }
130}
131
132static void files_ref_path(struct files_ref_store *refs,
133 struct strbuf *sb,
134 const char *refname)
135{
136 switch (ref_type(refname)) {
137 case REF_TYPE_PER_WORKTREE:
138 case REF_TYPE_PSEUDOREF:
139 strbuf_addf(sb, "%s/%s", refs->gitdir, refname);
140 break;
141 case REF_TYPE_NORMAL:
142 strbuf_addf(sb, "%s/%s", refs->gitcommondir, refname);
143 break;
144 default:
145 die("BUG: unknown ref type %d of ref %s",
146 ref_type(refname), refname);
147 }
148}
149
150/*
151 * Read the loose references from the namespace dirname into dir
152 * (without recursing). dirname must end with '/'. dir must be the
153 * directory entry corresponding to dirname.
154 */
155static void loose_fill_ref_dir(struct ref_store *ref_store,
156 struct ref_dir *dir, const char *dirname)
157{
158 struct files_ref_store *refs =
159 files_downcast(ref_store, REF_STORE_READ, "fill_ref_dir");
160 DIR *d;
161 struct dirent *de;
162 int dirnamelen = strlen(dirname);
163 struct strbuf refname;
164 struct strbuf path = STRBUF_INIT;
165 size_t path_baselen;
166
167 files_ref_path(refs, &path, dirname);
168 path_baselen = path.len;
169
170 d = opendir(path.buf);
171 if (!d) {
172 strbuf_release(&path);
173 return;
174 }
175
176 strbuf_init(&refname, dirnamelen + 257);
177 strbuf_add(&refname, dirname, dirnamelen);
178
179 while ((de = readdir(d)) != NULL) {
180 struct object_id oid;
181 struct stat st;
182 int flag;
183
184 if (de->d_name[0] == '.')
185 continue;
186 if (ends_with(de->d_name, ".lock"))
187 continue;
188 strbuf_addstr(&refname, de->d_name);
189 strbuf_addstr(&path, de->d_name);
190 if (stat(path.buf, &st) < 0) {
191 ; /* silently ignore */
192 } else if (S_ISDIR(st.st_mode)) {
193 strbuf_addch(&refname, '/');
194 add_entry_to_dir(dir,
195 create_dir_entry(dir->cache, refname.buf,
196 refname.len, 1));
197 } else {
198 if (!refs_resolve_ref_unsafe(&refs->base,
199 refname.buf,
200 RESOLVE_REF_READING,
201 oid.hash, &flag)) {
202 oidclr(&oid);
203 flag |= REF_ISBROKEN;
204 } else if (is_null_oid(&oid)) {
205 /*
206 * It is so astronomically unlikely
207 * that NULL_SHA1 is the SHA-1 of an
208 * actual object that we consider its
209 * appearance in a loose reference
210 * file to be repo corruption
211 * (probably due to a software bug).
212 */
213 flag |= REF_ISBROKEN;
214 }
215
216 if (check_refname_format(refname.buf,
217 REFNAME_ALLOW_ONELEVEL)) {
218 if (!refname_is_safe(refname.buf))
219 die("loose refname is dangerous: %s", refname.buf);
220 oidclr(&oid);
221 flag |= REF_BAD_NAME | REF_ISBROKEN;
222 }
223 add_entry_to_dir(dir,
224 create_ref_entry(refname.buf, &oid, flag));
225 }
226 strbuf_setlen(&refname, dirnamelen);
227 strbuf_setlen(&path, path_baselen);
228 }
229 strbuf_release(&refname);
230 strbuf_release(&path);
231 closedir(d);
232
233 /*
234 * Manually add refs/bisect, which, being per-worktree, might
235 * not appear in the directory listing for refs/ in the main
236 * repo.
237 */
238 if (!strcmp(dirname, "refs/")) {
239 int pos = search_ref_dir(dir, "refs/bisect/", 12);
240
241 if (pos < 0) {
242 struct ref_entry *child_entry = create_dir_entry(
243 dir->cache, "refs/bisect/", 12, 1);
244 add_entry_to_dir(dir, child_entry);
245 }
246 }
247}
248
249static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs)
250{
251 if (!refs->loose) {
252 /*
253 * Mark the top-level directory complete because we
254 * are about to read the only subdirectory that can
255 * hold references:
256 */
257 refs->loose = create_ref_cache(&refs->base, loose_fill_ref_dir);
258
259 /* We're going to fill the top level ourselves: */
260 refs->loose->root->flag &= ~REF_INCOMPLETE;
261
262 /*
263 * Add an incomplete entry for "refs/" (to be filled
264 * lazily):
265 */
266 add_entry_to_dir(get_ref_dir(refs->loose->root),
267 create_dir_entry(refs->loose, "refs/", 5, 1));
268 }
269 return refs->loose;
270}
271
272static int files_read_raw_ref(struct ref_store *ref_store,
273 const char *refname, unsigned char *sha1,
274 struct strbuf *referent, unsigned int *type)
275{
276 struct files_ref_store *refs =
277 files_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
278 struct strbuf sb_contents = STRBUF_INIT;
279 struct strbuf sb_path = STRBUF_INIT;
280 const char *path;
281 const char *buf;
282 struct stat st;
283 int fd;
284 int ret = -1;
285 int save_errno;
286 int remaining_retries = 3;
287
288 *type = 0;
289 strbuf_reset(&sb_path);
290
291 files_ref_path(refs, &sb_path, refname);
292
293 path = sb_path.buf;
294
295stat_ref:
296 /*
297 * We might have to loop back here to avoid a race
298 * condition: first we lstat() the file, then we try
299 * to read it as a link or as a file. But if somebody
300 * changes the type of the file (file <-> directory
301 * <-> symlink) between the lstat() and reading, then
302 * we don't want to report that as an error but rather
303 * try again starting with the lstat().
304 *
305 * We'll keep a count of the retries, though, just to avoid
306 * any confusing situation sending us into an infinite loop.
307 */
308
309 if (remaining_retries-- <= 0)
310 goto out;
311
312 if (lstat(path, &st) < 0) {
313 if (errno != ENOENT)
314 goto out;
315 if (refs_read_raw_ref(refs->packed_ref_store, refname,
316 sha1, referent, type)) {
317 errno = ENOENT;
318 goto out;
319 }
320 ret = 0;
321 goto out;
322 }
323
324 /* Follow "normalized" - ie "refs/.." symlinks by hand */
325 if (S_ISLNK(st.st_mode)) {
326 strbuf_reset(&sb_contents);
327 if (strbuf_readlink(&sb_contents, path, 0) < 0) {
328 if (errno == ENOENT || errno == EINVAL)
329 /* inconsistent with lstat; retry */
330 goto stat_ref;
331 else
332 goto out;
333 }
334 if (starts_with(sb_contents.buf, "refs/") &&
335 !check_refname_format(sb_contents.buf, 0)) {
336 strbuf_swap(&sb_contents, referent);
337 *type |= REF_ISSYMREF;
338 ret = 0;
339 goto out;
340 }
341 /*
342 * It doesn't look like a refname; fall through to just
343 * treating it like a non-symlink, and reading whatever it
344 * points to.
345 */
346 }
347
348 /* Is it a directory? */
349 if (S_ISDIR(st.st_mode)) {
350 /*
351 * Even though there is a directory where the loose
352 * ref is supposed to be, there could still be a
353 * packed ref:
354 */
355 if (refs_read_raw_ref(refs->packed_ref_store, refname,
356 sha1, referent, type)) {
357 errno = EISDIR;
358 goto out;
359 }
360 ret = 0;
361 goto out;
362 }
363
364 /*
365 * Anything else, just open it and try to use it as
366 * a ref
367 */
368 fd = open(path, O_RDONLY);
369 if (fd < 0) {
370 if (errno == ENOENT && !S_ISLNK(st.st_mode))
371 /* inconsistent with lstat; retry */
372 goto stat_ref;
373 else
374 goto out;
375 }
376 strbuf_reset(&sb_contents);
377 if (strbuf_read(&sb_contents, fd, 256) < 0) {
378 int save_errno = errno;
379 close(fd);
380 errno = save_errno;
381 goto out;
382 }
383 close(fd);
384 strbuf_rtrim(&sb_contents);
385 buf = sb_contents.buf;
386 if (starts_with(buf, "ref:")) {
387 buf += 4;
388 while (isspace(*buf))
389 buf++;
390
391 strbuf_reset(referent);
392 strbuf_addstr(referent, buf);
393 *type |= REF_ISSYMREF;
394 ret = 0;
395 goto out;
396 }
397
398 /*
399 * Please note that FETCH_HEAD has additional
400 * data after the sha.
401 */
402 if (get_sha1_hex(buf, sha1) ||
403 (buf[40] != '\0' && !isspace(buf[40]))) {
404 *type |= REF_ISBROKEN;
405 errno = EINVAL;
406 goto out;
407 }
408
409 ret = 0;
410
411out:
412 save_errno = errno;
413 strbuf_release(&sb_path);
414 strbuf_release(&sb_contents);
415 errno = save_errno;
416 return ret;
417}
418
419static void unlock_ref(struct ref_lock *lock)
420{
421 /* Do not free lock->lk -- atexit() still looks at them */
422 if (lock->lk)
423 rollback_lock_file(lock->lk);
424 free(lock->ref_name);
425 free(lock);
426}
427
428/*
429 * Lock refname, without following symrefs, and set *lock_p to point
430 * at a newly-allocated lock object. Fill in lock->old_oid, referent,
431 * and type similarly to read_raw_ref().
432 *
433 * The caller must verify that refname is a "safe" reference name (in
434 * the sense of refname_is_safe()) before calling this function.
435 *
436 * If the reference doesn't already exist, verify that refname doesn't
437 * have a D/F conflict with any existing references. extras and skip
438 * are passed to refs_verify_refname_available() for this check.
439 *
440 * If mustexist is not set and the reference is not found or is
441 * broken, lock the reference anyway but clear sha1.
442 *
443 * Return 0 on success. On failure, write an error message to err and
444 * return TRANSACTION_NAME_CONFLICT or TRANSACTION_GENERIC_ERROR.
445 *
446 * Implementation note: This function is basically
447 *
448 * lock reference
449 * read_raw_ref()
450 *
451 * but it includes a lot more code to
452 * - Deal with possible races with other processes
453 * - Avoid calling refs_verify_refname_available() when it can be
454 * avoided, namely if we were successfully able to read the ref
455 * - Generate informative error messages in the case of failure
456 */
457static int lock_raw_ref(struct files_ref_store *refs,
458 const char *refname, int mustexist,
459 const struct string_list *extras,
460 const struct string_list *skip,
461 struct ref_lock **lock_p,
462 struct strbuf *referent,
463 unsigned int *type,
464 struct strbuf *err)
465{
466 struct ref_lock *lock;
467 struct strbuf ref_file = STRBUF_INIT;
468 int attempts_remaining = 3;
469 int ret = TRANSACTION_GENERIC_ERROR;
470
471 assert(err);
472 files_assert_main_repository(refs, "lock_raw_ref");
473
474 *type = 0;
475
476 /* First lock the file so it can't change out from under us. */
477
478 *lock_p = lock = xcalloc(1, sizeof(*lock));
479
480 lock->ref_name = xstrdup(refname);
481 files_ref_path(refs, &ref_file, refname);
482
483retry:
484 switch (safe_create_leading_directories(ref_file.buf)) {
485 case SCLD_OK:
486 break; /* success */
487 case SCLD_EXISTS:
488 /*
489 * Suppose refname is "refs/foo/bar". We just failed
490 * to create the containing directory, "refs/foo",
491 * because there was a non-directory in the way. This
492 * indicates a D/F conflict, probably because of
493 * another reference such as "refs/foo". There is no
494 * reason to expect this error to be transitory.
495 */
496 if (refs_verify_refname_available(&refs->base, refname,
497 extras, skip, err)) {
498 if (mustexist) {
499 /*
500 * To the user the relevant error is
501 * that the "mustexist" reference is
502 * missing:
503 */
504 strbuf_reset(err);
505 strbuf_addf(err, "unable to resolve reference '%s'",
506 refname);
507 } else {
508 /*
509 * The error message set by
510 * refs_verify_refname_available() is
511 * OK.
512 */
513 ret = TRANSACTION_NAME_CONFLICT;
514 }
515 } else {
516 /*
517 * The file that is in the way isn't a loose
518 * reference. Report it as a low-level
519 * failure.
520 */
521 strbuf_addf(err, "unable to create lock file %s.lock; "
522 "non-directory in the way",
523 ref_file.buf);
524 }
525 goto error_return;
526 case SCLD_VANISHED:
527 /* Maybe another process was tidying up. Try again. */
528 if (--attempts_remaining > 0)
529 goto retry;
530 /* fall through */
531 default:
532 strbuf_addf(err, "unable to create directory for %s",
533 ref_file.buf);
534 goto error_return;
535 }
536
537 if (!lock->lk)
538 lock->lk = xcalloc(1, sizeof(struct lock_file));
539
540 if (hold_lock_file_for_update_timeout(
541 lock->lk, ref_file.buf, LOCK_NO_DEREF,
542 get_files_ref_lock_timeout_ms()) < 0) {
543 if (errno == ENOENT && --attempts_remaining > 0) {
544 /*
545 * Maybe somebody just deleted one of the
546 * directories leading to ref_file. Try
547 * again:
548 */
549 goto retry;
550 } else {
551 unable_to_lock_message(ref_file.buf, errno, err);
552 goto error_return;
553 }
554 }
555
556 /*
557 * Now we hold the lock and can read the reference without
558 * fear that its value will change.
559 */
560
561 if (files_read_raw_ref(&refs->base, refname,
562 lock->old_oid.hash, referent, type)) {
563 if (errno == ENOENT) {
564 if (mustexist) {
565 /* Garden variety missing reference. */
566 strbuf_addf(err, "unable to resolve reference '%s'",
567 refname);
568 goto error_return;
569 } else {
570 /*
571 * Reference is missing, but that's OK. We
572 * know that there is not a conflict with
573 * another loose reference because
574 * (supposing that we are trying to lock
575 * reference "refs/foo/bar"):
576 *
577 * - We were successfully able to create
578 * the lockfile refs/foo/bar.lock, so we
579 * know there cannot be a loose reference
580 * named "refs/foo".
581 *
582 * - We got ENOENT and not EISDIR, so we
583 * know that there cannot be a loose
584 * reference named "refs/foo/bar/baz".
585 */
586 }
587 } else if (errno == EISDIR) {
588 /*
589 * There is a directory in the way. It might have
590 * contained references that have been deleted. If
591 * we don't require that the reference already
592 * exists, try to remove the directory so that it
593 * doesn't cause trouble when we want to rename the
594 * lockfile into place later.
595 */
596 if (mustexist) {
597 /* Garden variety missing reference. */
598 strbuf_addf(err, "unable to resolve reference '%s'",
599 refname);
600 goto error_return;
601 } else if (remove_dir_recursively(&ref_file,
602 REMOVE_DIR_EMPTY_ONLY)) {
603 if (refs_verify_refname_available(
604 &refs->base, refname,
605 extras, skip, err)) {
606 /*
607 * The error message set by
608 * verify_refname_available() is OK.
609 */
610 ret = TRANSACTION_NAME_CONFLICT;
611 goto error_return;
612 } else {
613 /*
614 * We can't delete the directory,
615 * but we also don't know of any
616 * references that it should
617 * contain.
618 */
619 strbuf_addf(err, "there is a non-empty directory '%s' "
620 "blocking reference '%s'",
621 ref_file.buf, refname);
622 goto error_return;
623 }
624 }
625 } else if (errno == EINVAL && (*type & REF_ISBROKEN)) {
626 strbuf_addf(err, "unable to resolve reference '%s': "
627 "reference broken", refname);
628 goto error_return;
629 } else {
630 strbuf_addf(err, "unable to resolve reference '%s': %s",
631 refname, strerror(errno));
632 goto error_return;
633 }
634
635 /*
636 * If the ref did not exist and we are creating it,
637 * make sure there is no existing packed ref that
638 * conflicts with refname:
639 */
640 if (refs_verify_refname_available(
641 refs->packed_ref_store, refname,
642 extras, skip, err))
643 goto error_return;
644 }
645
646 ret = 0;
647 goto out;
648
649error_return:
650 unlock_ref(lock);
651 *lock_p = NULL;
652
653out:
654 strbuf_release(&ref_file);
655 return ret;
656}
657
658static int files_peel_ref(struct ref_store *ref_store,
659 const char *refname, unsigned char *sha1)
660{
661 struct files_ref_store *refs =
662 files_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
663 "peel_ref");
664 int flag;
665 unsigned char base[20];
666
667 if (current_ref_iter && current_ref_iter->refname == refname) {
668 struct object_id peeled;
669
670 if (ref_iterator_peel(current_ref_iter, &peeled))
671 return -1;
672 hashcpy(sha1, peeled.hash);
673 return 0;
674 }
675
676 if (refs_read_ref_full(ref_store, refname,
677 RESOLVE_REF_READING, base, &flag))
678 return -1;
679
680 /*
681 * If the reference is packed, read its ref_entry from the
682 * cache in the hope that we already know its peeled value.
683 * We only try this optimization on packed references because
684 * (a) forcing the filling of the loose reference cache could
685 * be expensive and (b) loose references anyway usually do not
686 * have REF_KNOWS_PEELED.
687 */
688 if (flag & REF_ISPACKED &&
689 !refs_peel_ref(refs->packed_ref_store, refname, sha1))
690 return 0;
691
692 return peel_object(base, sha1);
693}
694
695struct files_ref_iterator {
696 struct ref_iterator base;
697
698 struct ref_iterator *iter0;
699 unsigned int flags;
700};
701
702static int files_ref_iterator_advance(struct ref_iterator *ref_iterator)
703{
704 struct files_ref_iterator *iter =
705 (struct files_ref_iterator *)ref_iterator;
706 int ok;
707
708 while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
709 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
710 ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
711 continue;
712
713 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
714 !ref_resolves_to_object(iter->iter0->refname,
715 iter->iter0->oid,
716 iter->iter0->flags))
717 continue;
718
719 iter->base.refname = iter->iter0->refname;
720 iter->base.oid = iter->iter0->oid;
721 iter->base.flags = iter->iter0->flags;
722 return ITER_OK;
723 }
724
725 iter->iter0 = NULL;
726 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
727 ok = ITER_ERROR;
728
729 return ok;
730}
731
732static int files_ref_iterator_peel(struct ref_iterator *ref_iterator,
733 struct object_id *peeled)
734{
735 struct files_ref_iterator *iter =
736 (struct files_ref_iterator *)ref_iterator;
737
738 return ref_iterator_peel(iter->iter0, peeled);
739}
740
741static int files_ref_iterator_abort(struct ref_iterator *ref_iterator)
742{
743 struct files_ref_iterator *iter =
744 (struct files_ref_iterator *)ref_iterator;
745 int ok = ITER_DONE;
746
747 if (iter->iter0)
748 ok = ref_iterator_abort(iter->iter0);
749
750 base_ref_iterator_free(ref_iterator);
751 return ok;
752}
753
754static struct ref_iterator_vtable files_ref_iterator_vtable = {
755 files_ref_iterator_advance,
756 files_ref_iterator_peel,
757 files_ref_iterator_abort
758};
759
760static struct ref_iterator *files_ref_iterator_begin(
761 struct ref_store *ref_store,
762 const char *prefix, unsigned int flags)
763{
764 struct files_ref_store *refs;
765 struct ref_iterator *loose_iter, *packed_iter, *overlay_iter;
766 struct files_ref_iterator *iter;
767 struct ref_iterator *ref_iterator;
768 unsigned int required_flags = REF_STORE_READ;
769
770 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
771 required_flags |= REF_STORE_ODB;
772
773 refs = files_downcast(ref_store, required_flags, "ref_iterator_begin");
774
775 /*
776 * We must make sure that all loose refs are read before
777 * accessing the packed-refs file; this avoids a race
778 * condition if loose refs are migrated to the packed-refs
779 * file by a simultaneous process, but our in-memory view is
780 * from before the migration. We ensure this as follows:
781 * First, we call start the loose refs iteration with its
782 * `prime_ref` argument set to true. This causes the loose
783 * references in the subtree to be pre-read into the cache.
784 * (If they've already been read, that's OK; we only need to
785 * guarantee that they're read before the packed refs, not
786 * *how much* before.) After that, we call
787 * packed_ref_iterator_begin(), which internally checks
788 * whether the packed-ref cache is up to date with what is on
789 * disk, and re-reads it if not.
790 */
791
792 loose_iter = cache_ref_iterator_begin(get_loose_ref_cache(refs),
793 prefix, 1);
794
795 /*
796 * The packed-refs file might contain broken references, for
797 * example an old version of a reference that points at an
798 * object that has since been garbage-collected. This is OK as
799 * long as there is a corresponding loose reference that
800 * overrides it, and we don't want to emit an error message in
801 * this case. So ask the packed_ref_store for all of its
802 * references, and (if needed) do our own check for broken
803 * ones in files_ref_iterator_advance(), after we have merged
804 * the packed and loose references.
805 */
806 packed_iter = refs_ref_iterator_begin(
807 refs->packed_ref_store, prefix, 0,
808 DO_FOR_EACH_INCLUDE_BROKEN);
809
810 overlay_iter = overlay_ref_iterator_begin(loose_iter, packed_iter);
811
812 iter = xcalloc(1, sizeof(*iter));
813 ref_iterator = &iter->base;
814 base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable,
815 overlay_iter->ordered);
816 iter->iter0 = overlay_iter;
817 iter->flags = flags;
818
819 return ref_iterator;
820}
821
822/*
823 * Verify that the reference locked by lock has the value old_sha1.
824 * Fail if the reference doesn't exist and mustexist is set. Return 0
825 * on success. On error, write an error message to err, set errno, and
826 * return a negative value.
827 */
828static int verify_lock(struct ref_store *ref_store, struct ref_lock *lock,
829 const unsigned char *old_sha1, int mustexist,
830 struct strbuf *err)
831{
832 assert(err);
833
834 if (refs_read_ref_full(ref_store, lock->ref_name,
835 mustexist ? RESOLVE_REF_READING : 0,
836 lock->old_oid.hash, NULL)) {
837 if (old_sha1) {
838 int save_errno = errno;
839 strbuf_addf(err, "can't verify ref '%s'", lock->ref_name);
840 errno = save_errno;
841 return -1;
842 } else {
843 oidclr(&lock->old_oid);
844 return 0;
845 }
846 }
847 if (old_sha1 && hashcmp(lock->old_oid.hash, old_sha1)) {
848 strbuf_addf(err, "ref '%s' is at %s but expected %s",
849 lock->ref_name,
850 oid_to_hex(&lock->old_oid),
851 sha1_to_hex(old_sha1));
852 errno = EBUSY;
853 return -1;
854 }
855 return 0;
856}
857
858static int remove_empty_directories(struct strbuf *path)
859{
860 /*
861 * we want to create a file but there is a directory there;
862 * if that is an empty directory (or a directory that contains
863 * only empty directories), remove them.
864 */
865 return remove_dir_recursively(path, REMOVE_DIR_EMPTY_ONLY);
866}
867
868static int create_reflock(const char *path, void *cb)
869{
870 struct lock_file *lk = cb;
871
872 return hold_lock_file_for_update_timeout(
873 lk, path, LOCK_NO_DEREF,
874 get_files_ref_lock_timeout_ms()) < 0 ? -1 : 0;
875}
876
877/*
878 * Locks a ref returning the lock on success and NULL on failure.
879 * On failure errno is set to something meaningful.
880 */
881static struct ref_lock *lock_ref_sha1_basic(struct files_ref_store *refs,
882 const char *refname,
883 const unsigned char *old_sha1,
884 const struct string_list *extras,
885 const struct string_list *skip,
886 unsigned int flags, int *type,
887 struct strbuf *err)
888{
889 struct strbuf ref_file = STRBUF_INIT;
890 struct ref_lock *lock;
891 int last_errno = 0;
892 int mustexist = (old_sha1 && !is_null_sha1(old_sha1));
893 int resolve_flags = RESOLVE_REF_NO_RECURSE;
894 int resolved;
895
896 files_assert_main_repository(refs, "lock_ref_sha1_basic");
897 assert(err);
898
899 lock = xcalloc(1, sizeof(struct ref_lock));
900
901 if (mustexist)
902 resolve_flags |= RESOLVE_REF_READING;
903 if (flags & REF_DELETING)
904 resolve_flags |= RESOLVE_REF_ALLOW_BAD_NAME;
905
906 files_ref_path(refs, &ref_file, refname);
907 resolved = !!refs_resolve_ref_unsafe(&refs->base,
908 refname, resolve_flags,
909 lock->old_oid.hash, type);
910 if (!resolved && errno == EISDIR) {
911 /*
912 * we are trying to lock foo but we used to
913 * have foo/bar which now does not exist;
914 * it is normal for the empty directory 'foo'
915 * to remain.
916 */
917 if (remove_empty_directories(&ref_file)) {
918 last_errno = errno;
919 if (!refs_verify_refname_available(
920 &refs->base,
921 refname, extras, skip, err))
922 strbuf_addf(err, "there are still refs under '%s'",
923 refname);
924 goto error_return;
925 }
926 resolved = !!refs_resolve_ref_unsafe(&refs->base,
927 refname, resolve_flags,
928 lock->old_oid.hash, type);
929 }
930 if (!resolved) {
931 last_errno = errno;
932 if (last_errno != ENOTDIR ||
933 !refs_verify_refname_available(&refs->base, refname,
934 extras, skip, err))
935 strbuf_addf(err, "unable to resolve reference '%s': %s",
936 refname, strerror(last_errno));
937
938 goto error_return;
939 }
940
941 /*
942 * If the ref did not exist and we are creating it, make sure
943 * there is no existing packed ref whose name begins with our
944 * refname, nor a packed ref whose name is a proper prefix of
945 * our refname.
946 */
947 if (is_null_oid(&lock->old_oid) &&
948 refs_verify_refname_available(refs->packed_ref_store, refname,
949 extras, skip, err)) {
950 last_errno = ENOTDIR;
951 goto error_return;
952 }
953
954 lock->lk = xcalloc(1, sizeof(struct lock_file));
955
956 lock->ref_name = xstrdup(refname);
957
958 if (raceproof_create_file(ref_file.buf, create_reflock, lock->lk)) {
959 last_errno = errno;
960 unable_to_lock_message(ref_file.buf, errno, err);
961 goto error_return;
962 }
963
964 if (verify_lock(&refs->base, lock, old_sha1, mustexist, err)) {
965 last_errno = errno;
966 goto error_return;
967 }
968 goto out;
969
970 error_return:
971 unlock_ref(lock);
972 lock = NULL;
973
974 out:
975 strbuf_release(&ref_file);
976 errno = last_errno;
977 return lock;
978}
979
980struct ref_to_prune {
981 struct ref_to_prune *next;
982 unsigned char sha1[20];
983 char name[FLEX_ARRAY];
984};
985
986enum {
987 REMOVE_EMPTY_PARENTS_REF = 0x01,
988 REMOVE_EMPTY_PARENTS_REFLOG = 0x02
989};
990
991/*
992 * Remove empty parent directories associated with the specified
993 * reference and/or its reflog, but spare [logs/]refs/ and immediate
994 * subdirs. flags is a combination of REMOVE_EMPTY_PARENTS_REF and/or
995 * REMOVE_EMPTY_PARENTS_REFLOG.
996 */
997static void try_remove_empty_parents(struct files_ref_store *refs,
998 const char *refname,
999 unsigned int flags)
1000{
1001 struct strbuf buf = STRBUF_INIT;
1002 struct strbuf sb = STRBUF_INIT;
1003 char *p, *q;
1004 int i;
1005
1006 strbuf_addstr(&buf, refname);
1007 p = buf.buf;
1008 for (i = 0; i < 2; i++) { /* refs/{heads,tags,...}/ */
1009 while (*p && *p != '/')
1010 p++;
1011 /* tolerate duplicate slashes; see check_refname_format() */
1012 while (*p == '/')
1013 p++;
1014 }
1015 q = buf.buf + buf.len;
1016 while (flags & (REMOVE_EMPTY_PARENTS_REF | REMOVE_EMPTY_PARENTS_REFLOG)) {
1017 while (q > p && *q != '/')
1018 q--;
1019 while (q > p && *(q-1) == '/')
1020 q--;
1021 if (q == p)
1022 break;
1023 strbuf_setlen(&buf, q - buf.buf);
1024
1025 strbuf_reset(&sb);
1026 files_ref_path(refs, &sb, buf.buf);
1027 if ((flags & REMOVE_EMPTY_PARENTS_REF) && rmdir(sb.buf))
1028 flags &= ~REMOVE_EMPTY_PARENTS_REF;
1029
1030 strbuf_reset(&sb);
1031 files_reflog_path(refs, &sb, buf.buf);
1032 if ((flags & REMOVE_EMPTY_PARENTS_REFLOG) && rmdir(sb.buf))
1033 flags &= ~REMOVE_EMPTY_PARENTS_REFLOG;
1034 }
1035 strbuf_release(&buf);
1036 strbuf_release(&sb);
1037}
1038
1039/* make sure nobody touched the ref, and unlink */
1040static void prune_ref(struct files_ref_store *refs, struct ref_to_prune *r)
1041{
1042 struct ref_transaction *transaction;
1043 struct strbuf err = STRBUF_INIT;
1044
1045 if (check_refname_format(r->name, 0))
1046 return;
1047
1048 transaction = ref_store_transaction_begin(&refs->base, &err);
1049 if (!transaction ||
1050 ref_transaction_delete(transaction, r->name, r->sha1,
1051 REF_ISPRUNING | REF_NODEREF, NULL, &err) ||
1052 ref_transaction_commit(transaction, &err)) {
1053 ref_transaction_free(transaction);
1054 error("%s", err.buf);
1055 strbuf_release(&err);
1056 return;
1057 }
1058 ref_transaction_free(transaction);
1059 strbuf_release(&err);
1060}
1061
1062/*
1063 * Prune the loose versions of the references in the linked list
1064 * `*refs_to_prune`, freeing the entries in the list as we go.
1065 */
1066static void prune_refs(struct files_ref_store *refs, struct ref_to_prune **refs_to_prune)
1067{
1068 while (*refs_to_prune) {
1069 struct ref_to_prune *r = *refs_to_prune;
1070 *refs_to_prune = r->next;
1071 prune_ref(refs, r);
1072 free(r);
1073 }
1074}
1075
1076/*
1077 * Return true if the specified reference should be packed.
1078 */
1079static int should_pack_ref(const char *refname,
1080 const struct object_id *oid, unsigned int ref_flags,
1081 unsigned int pack_flags)
1082{
1083 /* Do not pack per-worktree refs: */
1084 if (ref_type(refname) != REF_TYPE_NORMAL)
1085 return 0;
1086
1087 /* Do not pack non-tags unless PACK_REFS_ALL is set: */
1088 if (!(pack_flags & PACK_REFS_ALL) && !starts_with(refname, "refs/tags/"))
1089 return 0;
1090
1091 /* Do not pack symbolic refs: */
1092 if (ref_flags & REF_ISSYMREF)
1093 return 0;
1094
1095 /* Do not pack broken refs: */
1096 if (!ref_resolves_to_object(refname, oid, ref_flags))
1097 return 0;
1098
1099 return 1;
1100}
1101
1102static int files_pack_refs(struct ref_store *ref_store, unsigned int flags)
1103{
1104 struct files_ref_store *refs =
1105 files_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB,
1106 "pack_refs");
1107 struct ref_iterator *iter;
1108 int ok;
1109 struct ref_to_prune *refs_to_prune = NULL;
1110 struct strbuf err = STRBUF_INIT;
1111 struct ref_transaction *transaction;
1112
1113 transaction = ref_store_transaction_begin(refs->packed_ref_store, &err);
1114 if (!transaction)
1115 return -1;
1116
1117 packed_refs_lock(refs->packed_ref_store, LOCK_DIE_ON_ERROR, &err);
1118
1119 iter = cache_ref_iterator_begin(get_loose_ref_cache(refs), NULL, 0);
1120 while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
1121 /*
1122 * If the loose reference can be packed, add an entry
1123 * in the packed ref cache. If the reference should be
1124 * pruned, also add it to refs_to_prune.
1125 */
1126 if (!should_pack_ref(iter->refname, iter->oid, iter->flags,
1127 flags))
1128 continue;
1129
1130 /*
1131 * Add a reference creation for this reference to the
1132 * packed-refs transaction:
1133 */
1134 if (ref_transaction_update(transaction, iter->refname,
1135 iter->oid->hash, NULL,
1136 REF_NODEREF, NULL, &err))
1137 die("failure preparing to create packed reference %s: %s",
1138 iter->refname, err.buf);
1139
1140 /* Schedule the loose reference for pruning if requested. */
1141 if ((flags & PACK_REFS_PRUNE)) {
1142 struct ref_to_prune *n;
1143 FLEX_ALLOC_STR(n, name, iter->refname);
1144 hashcpy(n->sha1, iter->oid->hash);
1145 n->next = refs_to_prune;
1146 refs_to_prune = n;
1147 }
1148 }
1149 if (ok != ITER_DONE)
1150 die("error while iterating over references");
1151
1152 if (ref_transaction_commit(transaction, &err))
1153 die("unable to write new packed-refs: %s", err.buf);
1154
1155 ref_transaction_free(transaction);
1156
1157 packed_refs_unlock(refs->packed_ref_store);
1158
1159 prune_refs(refs, &refs_to_prune);
1160 strbuf_release(&err);
1161 return 0;
1162}
1163
1164static int files_delete_refs(struct ref_store *ref_store, const char *msg,
1165 struct string_list *refnames, unsigned int flags)
1166{
1167 struct files_ref_store *refs =
1168 files_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
1169 struct strbuf err = STRBUF_INIT;
1170 int i, result = 0;
1171
1172 if (!refnames->nr)
1173 return 0;
1174
1175 if (packed_refs_lock(refs->packed_ref_store, 0, &err))
1176 goto error;
1177
1178 if (refs_delete_refs(refs->packed_ref_store, msg, refnames, flags)) {
1179 packed_refs_unlock(refs->packed_ref_store);
1180 goto error;
1181 }
1182
1183 packed_refs_unlock(refs->packed_ref_store);
1184
1185 for (i = 0; i < refnames->nr; i++) {
1186 const char *refname = refnames->items[i].string;
1187
1188 if (refs_delete_ref(&refs->base, msg, refname, NULL, flags))
1189 result |= error(_("could not remove reference %s"), refname);
1190 }
1191
1192 strbuf_release(&err);
1193 return result;
1194
1195error:
1196 /*
1197 * If we failed to rewrite the packed-refs file, then it is
1198 * unsafe to try to remove loose refs, because doing so might
1199 * expose an obsolete packed value for a reference that might
1200 * even point at an object that has been garbage collected.
1201 */
1202 if (refnames->nr == 1)
1203 error(_("could not delete reference %s: %s"),
1204 refnames->items[0].string, err.buf);
1205 else
1206 error(_("could not delete references: %s"), err.buf);
1207
1208 strbuf_release(&err);
1209 return -1;
1210}
1211
1212/*
1213 * People using contrib's git-new-workdir have .git/logs/refs ->
1214 * /some/other/path/.git/logs/refs, and that may live on another device.
1215 *
1216 * IOW, to avoid cross device rename errors, the temporary renamed log must
1217 * live into logs/refs.
1218 */
1219#define TMP_RENAMED_LOG "refs/.tmp-renamed-log"
1220
1221struct rename_cb {
1222 const char *tmp_renamed_log;
1223 int true_errno;
1224};
1225
1226static int rename_tmp_log_callback(const char *path, void *cb_data)
1227{
1228 struct rename_cb *cb = cb_data;
1229
1230 if (rename(cb->tmp_renamed_log, path)) {
1231 /*
1232 * rename(a, b) when b is an existing directory ought
1233 * to result in ISDIR, but Solaris 5.8 gives ENOTDIR.
1234 * Sheesh. Record the true errno for error reporting,
1235 * but report EISDIR to raceproof_create_file() so
1236 * that it knows to retry.
1237 */
1238 cb->true_errno = errno;
1239 if (errno == ENOTDIR)
1240 errno = EISDIR;
1241 return -1;
1242 } else {
1243 return 0;
1244 }
1245}
1246
1247static int rename_tmp_log(struct files_ref_store *refs, const char *newrefname)
1248{
1249 struct strbuf path = STRBUF_INIT;
1250 struct strbuf tmp = STRBUF_INIT;
1251 struct rename_cb cb;
1252 int ret;
1253
1254 files_reflog_path(refs, &path, newrefname);
1255 files_reflog_path(refs, &tmp, TMP_RENAMED_LOG);
1256 cb.tmp_renamed_log = tmp.buf;
1257 ret = raceproof_create_file(path.buf, rename_tmp_log_callback, &cb);
1258 if (ret) {
1259 if (errno == EISDIR)
1260 error("directory not empty: %s", path.buf);
1261 else
1262 error("unable to move logfile %s to %s: %s",
1263 tmp.buf, path.buf,
1264 strerror(cb.true_errno));
1265 }
1266
1267 strbuf_release(&path);
1268 strbuf_release(&tmp);
1269 return ret;
1270}
1271
1272static int write_ref_to_lockfile(struct ref_lock *lock,
1273 const struct object_id *oid, struct strbuf *err);
1274static int commit_ref_update(struct files_ref_store *refs,
1275 struct ref_lock *lock,
1276 const struct object_id *oid, const char *logmsg,
1277 struct strbuf *err);
1278
1279static int files_rename_ref(struct ref_store *ref_store,
1280 const char *oldrefname, const char *newrefname,
1281 const char *logmsg)
1282{
1283 struct files_ref_store *refs =
1284 files_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1285 struct object_id oid, orig_oid;
1286 int flag = 0, logmoved = 0;
1287 struct ref_lock *lock;
1288 struct stat loginfo;
1289 struct strbuf sb_oldref = STRBUF_INIT;
1290 struct strbuf sb_newref = STRBUF_INIT;
1291 struct strbuf tmp_renamed_log = STRBUF_INIT;
1292 int log, ret;
1293 struct strbuf err = STRBUF_INIT;
1294
1295 files_reflog_path(refs, &sb_oldref, oldrefname);
1296 files_reflog_path(refs, &sb_newref, newrefname);
1297 files_reflog_path(refs, &tmp_renamed_log, TMP_RENAMED_LOG);
1298
1299 log = !lstat(sb_oldref.buf, &loginfo);
1300 if (log && S_ISLNK(loginfo.st_mode)) {
1301 ret = error("reflog for %s is a symlink", oldrefname);
1302 goto out;
1303 }
1304
1305 if (!refs_resolve_ref_unsafe(&refs->base, oldrefname,
1306 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
1307 orig_oid.hash, &flag)) {
1308 ret = error("refname %s not found", oldrefname);
1309 goto out;
1310 }
1311
1312 if (flag & REF_ISSYMREF) {
1313 ret = error("refname %s is a symbolic ref, renaming it is not supported",
1314 oldrefname);
1315 goto out;
1316 }
1317 if (!refs_rename_ref_available(&refs->base, oldrefname, newrefname)) {
1318 ret = 1;
1319 goto out;
1320 }
1321
1322 if (log && rename(sb_oldref.buf, tmp_renamed_log.buf)) {
1323 ret = error("unable to move logfile logs/%s to logs/"TMP_RENAMED_LOG": %s",
1324 oldrefname, strerror(errno));
1325 goto out;
1326 }
1327
1328 if (refs_delete_ref(&refs->base, logmsg, oldrefname,
1329 orig_oid.hash, REF_NODEREF)) {
1330 error("unable to delete old %s", oldrefname);
1331 goto rollback;
1332 }
1333
1334 /*
1335 * Since we are doing a shallow lookup, oid is not the
1336 * correct value to pass to delete_ref as old_oid. But that
1337 * doesn't matter, because an old_oid check wouldn't add to
1338 * the safety anyway; we want to delete the reference whatever
1339 * its current value.
1340 */
1341 if (!refs_read_ref_full(&refs->base, newrefname,
1342 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
1343 oid.hash, NULL) &&
1344 refs_delete_ref(&refs->base, NULL, newrefname,
1345 NULL, REF_NODEREF)) {
1346 if (errno == EISDIR) {
1347 struct strbuf path = STRBUF_INIT;
1348 int result;
1349
1350 files_ref_path(refs, &path, newrefname);
1351 result = remove_empty_directories(&path);
1352 strbuf_release(&path);
1353
1354 if (result) {
1355 error("Directory not empty: %s", newrefname);
1356 goto rollback;
1357 }
1358 } else {
1359 error("unable to delete existing %s", newrefname);
1360 goto rollback;
1361 }
1362 }
1363
1364 if (log && rename_tmp_log(refs, newrefname))
1365 goto rollback;
1366
1367 logmoved = log;
1368
1369 lock = lock_ref_sha1_basic(refs, newrefname, NULL, NULL, NULL,
1370 REF_NODEREF, NULL, &err);
1371 if (!lock) {
1372 error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf);
1373 strbuf_release(&err);
1374 goto rollback;
1375 }
1376 oidcpy(&lock->old_oid, &orig_oid);
1377
1378 if (write_ref_to_lockfile(lock, &orig_oid, &err) ||
1379 commit_ref_update(refs, lock, &orig_oid, logmsg, &err)) {
1380 error("unable to write current sha1 into %s: %s", newrefname, err.buf);
1381 strbuf_release(&err);
1382 goto rollback;
1383 }
1384
1385 ret = 0;
1386 goto out;
1387
1388 rollback:
1389 lock = lock_ref_sha1_basic(refs, oldrefname, NULL, NULL, NULL,
1390 REF_NODEREF, NULL, &err);
1391 if (!lock) {
1392 error("unable to lock %s for rollback: %s", oldrefname, err.buf);
1393 strbuf_release(&err);
1394 goto rollbacklog;
1395 }
1396
1397 flag = log_all_ref_updates;
1398 log_all_ref_updates = LOG_REFS_NONE;
1399 if (write_ref_to_lockfile(lock, &orig_oid, &err) ||
1400 commit_ref_update(refs, lock, &orig_oid, NULL, &err)) {
1401 error("unable to write current sha1 into %s: %s", oldrefname, err.buf);
1402 strbuf_release(&err);
1403 }
1404 log_all_ref_updates = flag;
1405
1406 rollbacklog:
1407 if (logmoved && rename(sb_newref.buf, sb_oldref.buf))
1408 error("unable to restore logfile %s from %s: %s",
1409 oldrefname, newrefname, strerror(errno));
1410 if (!logmoved && log &&
1411 rename(tmp_renamed_log.buf, sb_oldref.buf))
1412 error("unable to restore logfile %s from logs/"TMP_RENAMED_LOG": %s",
1413 oldrefname, strerror(errno));
1414 ret = 1;
1415 out:
1416 strbuf_release(&sb_newref);
1417 strbuf_release(&sb_oldref);
1418 strbuf_release(&tmp_renamed_log);
1419
1420 return ret;
1421}
1422
1423static int close_ref(struct ref_lock *lock)
1424{
1425 if (close_lock_file(lock->lk))
1426 return -1;
1427 return 0;
1428}
1429
1430static int commit_ref(struct ref_lock *lock)
1431{
1432 char *path = get_locked_file_path(lock->lk);
1433 struct stat st;
1434
1435 if (!lstat(path, &st) && S_ISDIR(st.st_mode)) {
1436 /*
1437 * There is a directory at the path we want to rename
1438 * the lockfile to. Hopefully it is empty; try to
1439 * delete it.
1440 */
1441 size_t len = strlen(path);
1442 struct strbuf sb_path = STRBUF_INIT;
1443
1444 strbuf_attach(&sb_path, path, len, len);
1445
1446 /*
1447 * If this fails, commit_lock_file() will also fail
1448 * and will report the problem.
1449 */
1450 remove_empty_directories(&sb_path);
1451 strbuf_release(&sb_path);
1452 } else {
1453 free(path);
1454 }
1455
1456 if (commit_lock_file(lock->lk))
1457 return -1;
1458 return 0;
1459}
1460
1461static int open_or_create_logfile(const char *path, void *cb)
1462{
1463 int *fd = cb;
1464
1465 *fd = open(path, O_APPEND | O_WRONLY | O_CREAT, 0666);
1466 return (*fd < 0) ? -1 : 0;
1467}
1468
1469/*
1470 * Create a reflog for a ref. If force_create = 0, only create the
1471 * reflog for certain refs (those for which should_autocreate_reflog
1472 * returns non-zero). Otherwise, create it regardless of the reference
1473 * name. If the logfile already existed or was created, return 0 and
1474 * set *logfd to the file descriptor opened for appending to the file.
1475 * If no logfile exists and we decided not to create one, return 0 and
1476 * set *logfd to -1. On failure, fill in *err, set *logfd to -1, and
1477 * return -1.
1478 */
1479static int log_ref_setup(struct files_ref_store *refs,
1480 const char *refname, int force_create,
1481 int *logfd, struct strbuf *err)
1482{
1483 struct strbuf logfile_sb = STRBUF_INIT;
1484 char *logfile;
1485
1486 files_reflog_path(refs, &logfile_sb, refname);
1487 logfile = strbuf_detach(&logfile_sb, NULL);
1488
1489 if (force_create || should_autocreate_reflog(refname)) {
1490 if (raceproof_create_file(logfile, open_or_create_logfile, logfd)) {
1491 if (errno == ENOENT)
1492 strbuf_addf(err, "unable to create directory for '%s': "
1493 "%s", logfile, strerror(errno));
1494 else if (errno == EISDIR)
1495 strbuf_addf(err, "there are still logs under '%s'",
1496 logfile);
1497 else
1498 strbuf_addf(err, "unable to append to '%s': %s",
1499 logfile, strerror(errno));
1500
1501 goto error;
1502 }
1503 } else {
1504 *logfd = open(logfile, O_APPEND | O_WRONLY, 0666);
1505 if (*logfd < 0) {
1506 if (errno == ENOENT || errno == EISDIR) {
1507 /*
1508 * The logfile doesn't already exist,
1509 * but that is not an error; it only
1510 * means that we won't write log
1511 * entries to it.
1512 */
1513 ;
1514 } else {
1515 strbuf_addf(err, "unable to append to '%s': %s",
1516 logfile, strerror(errno));
1517 goto error;
1518 }
1519 }
1520 }
1521
1522 if (*logfd >= 0)
1523 adjust_shared_perm(logfile);
1524
1525 free(logfile);
1526 return 0;
1527
1528error:
1529 free(logfile);
1530 return -1;
1531}
1532
1533static int files_create_reflog(struct ref_store *ref_store,
1534 const char *refname, int force_create,
1535 struct strbuf *err)
1536{
1537 struct files_ref_store *refs =
1538 files_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1539 int fd;
1540
1541 if (log_ref_setup(refs, refname, force_create, &fd, err))
1542 return -1;
1543
1544 if (fd >= 0)
1545 close(fd);
1546
1547 return 0;
1548}
1549
1550static int log_ref_write_fd(int fd, const struct object_id *old_oid,
1551 const struct object_id *new_oid,
1552 const char *committer, const char *msg)
1553{
1554 int msglen, written;
1555 unsigned maxlen, len;
1556 char *logrec;
1557
1558 msglen = msg ? strlen(msg) : 0;
1559 maxlen = strlen(committer) + msglen + 100;
1560 logrec = xmalloc(maxlen);
1561 len = xsnprintf(logrec, maxlen, "%s %s %s\n",
1562 oid_to_hex(old_oid),
1563 oid_to_hex(new_oid),
1564 committer);
1565 if (msglen)
1566 len += copy_reflog_msg(logrec + len - 1, msg) - 1;
1567
1568 written = len <= maxlen ? write_in_full(fd, logrec, len) : -1;
1569 free(logrec);
1570 if (written != len)
1571 return -1;
1572
1573 return 0;
1574}
1575
1576static int files_log_ref_write(struct files_ref_store *refs,
1577 const char *refname, const struct object_id *old_oid,
1578 const struct object_id *new_oid, const char *msg,
1579 int flags, struct strbuf *err)
1580{
1581 int logfd, result;
1582
1583 if (log_all_ref_updates == LOG_REFS_UNSET)
1584 log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
1585
1586 result = log_ref_setup(refs, refname,
1587 flags & REF_FORCE_CREATE_REFLOG,
1588 &logfd, err);
1589
1590 if (result)
1591 return result;
1592
1593 if (logfd < 0)
1594 return 0;
1595 result = log_ref_write_fd(logfd, old_oid, new_oid,
1596 git_committer_info(0), msg);
1597 if (result) {
1598 struct strbuf sb = STRBUF_INIT;
1599 int save_errno = errno;
1600
1601 files_reflog_path(refs, &sb, refname);
1602 strbuf_addf(err, "unable to append to '%s': %s",
1603 sb.buf, strerror(save_errno));
1604 strbuf_release(&sb);
1605 close(logfd);
1606 return -1;
1607 }
1608 if (close(logfd)) {
1609 struct strbuf sb = STRBUF_INIT;
1610 int save_errno = errno;
1611
1612 files_reflog_path(refs, &sb, refname);
1613 strbuf_addf(err, "unable to append to '%s': %s",
1614 sb.buf, strerror(save_errno));
1615 strbuf_release(&sb);
1616 return -1;
1617 }
1618 return 0;
1619}
1620
1621/*
1622 * Write sha1 into the open lockfile, then close the lockfile. On
1623 * errors, rollback the lockfile, fill in *err and
1624 * return -1.
1625 */
1626static int write_ref_to_lockfile(struct ref_lock *lock,
1627 const struct object_id *oid, struct strbuf *err)
1628{
1629 static char term = '\n';
1630 struct object *o;
1631 int fd;
1632
1633 o = parse_object(oid);
1634 if (!o) {
1635 strbuf_addf(err,
1636 "trying to write ref '%s' with nonexistent object %s",
1637 lock->ref_name, oid_to_hex(oid));
1638 unlock_ref(lock);
1639 return -1;
1640 }
1641 if (o->type != OBJ_COMMIT && is_branch(lock->ref_name)) {
1642 strbuf_addf(err,
1643 "trying to write non-commit object %s to branch '%s'",
1644 oid_to_hex(oid), lock->ref_name);
1645 unlock_ref(lock);
1646 return -1;
1647 }
1648 fd = get_lock_file_fd(lock->lk);
1649 if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
1650 write_in_full(fd, &term, 1) != 1 ||
1651 close_ref(lock) < 0) {
1652 strbuf_addf(err,
1653 "couldn't write '%s'", get_lock_file_path(lock->lk));
1654 unlock_ref(lock);
1655 return -1;
1656 }
1657 return 0;
1658}
1659
1660/*
1661 * Commit a change to a loose reference that has already been written
1662 * to the loose reference lockfile. Also update the reflogs if
1663 * necessary, using the specified lockmsg (which can be NULL).
1664 */
1665static int commit_ref_update(struct files_ref_store *refs,
1666 struct ref_lock *lock,
1667 const struct object_id *oid, const char *logmsg,
1668 struct strbuf *err)
1669{
1670 files_assert_main_repository(refs, "commit_ref_update");
1671
1672 clear_loose_ref_cache(refs);
1673 if (files_log_ref_write(refs, lock->ref_name,
1674 &lock->old_oid, oid,
1675 logmsg, 0, err)) {
1676 char *old_msg = strbuf_detach(err, NULL);
1677 strbuf_addf(err, "cannot update the ref '%s': %s",
1678 lock->ref_name, old_msg);
1679 free(old_msg);
1680 unlock_ref(lock);
1681 return -1;
1682 }
1683
1684 if (strcmp(lock->ref_name, "HEAD") != 0) {
1685 /*
1686 * Special hack: If a branch is updated directly and HEAD
1687 * points to it (may happen on the remote side of a push
1688 * for example) then logically the HEAD reflog should be
1689 * updated too.
1690 * A generic solution implies reverse symref information,
1691 * but finding all symrefs pointing to the given branch
1692 * would be rather costly for this rare event (the direct
1693 * update of a branch) to be worth it. So let's cheat and
1694 * check with HEAD only which should cover 99% of all usage
1695 * scenarios (even 100% of the default ones).
1696 */
1697 struct object_id head_oid;
1698 int head_flag;
1699 const char *head_ref;
1700
1701 head_ref = refs_resolve_ref_unsafe(&refs->base, "HEAD",
1702 RESOLVE_REF_READING,
1703 head_oid.hash, &head_flag);
1704 if (head_ref && (head_flag & REF_ISSYMREF) &&
1705 !strcmp(head_ref, lock->ref_name)) {
1706 struct strbuf log_err = STRBUF_INIT;
1707 if (files_log_ref_write(refs, "HEAD",
1708 &lock->old_oid, oid,
1709 logmsg, 0, &log_err)) {
1710 error("%s", log_err.buf);
1711 strbuf_release(&log_err);
1712 }
1713 }
1714 }
1715
1716 if (commit_ref(lock)) {
1717 strbuf_addf(err, "couldn't set '%s'", lock->ref_name);
1718 unlock_ref(lock);
1719 return -1;
1720 }
1721
1722 unlock_ref(lock);
1723 return 0;
1724}
1725
1726static int create_ref_symlink(struct ref_lock *lock, const char *target)
1727{
1728 int ret = -1;
1729#ifndef NO_SYMLINK_HEAD
1730 char *ref_path = get_locked_file_path(lock->lk);
1731 unlink(ref_path);
1732 ret = symlink(target, ref_path);
1733 free(ref_path);
1734
1735 if (ret)
1736 fprintf(stderr, "no symlink - falling back to symbolic ref\n");
1737#endif
1738 return ret;
1739}
1740
1741static void update_symref_reflog(struct files_ref_store *refs,
1742 struct ref_lock *lock, const char *refname,
1743 const char *target, const char *logmsg)
1744{
1745 struct strbuf err = STRBUF_INIT;
1746 struct object_id new_oid;
1747 if (logmsg &&
1748 !refs_read_ref_full(&refs->base, target,
1749 RESOLVE_REF_READING, new_oid.hash, NULL) &&
1750 files_log_ref_write(refs, refname, &lock->old_oid,
1751 &new_oid, logmsg, 0, &err)) {
1752 error("%s", err.buf);
1753 strbuf_release(&err);
1754 }
1755}
1756
1757static int create_symref_locked(struct files_ref_store *refs,
1758 struct ref_lock *lock, const char *refname,
1759 const char *target, const char *logmsg)
1760{
1761 if (prefer_symlink_refs && !create_ref_symlink(lock, target)) {
1762 update_symref_reflog(refs, lock, refname, target, logmsg);
1763 return 0;
1764 }
1765
1766 if (!fdopen_lock_file(lock->lk, "w"))
1767 return error("unable to fdopen %s: %s",
1768 lock->lk->tempfile.filename.buf, strerror(errno));
1769
1770 update_symref_reflog(refs, lock, refname, target, logmsg);
1771
1772 /* no error check; commit_ref will check ferror */
1773 fprintf(lock->lk->tempfile.fp, "ref: %s\n", target);
1774 if (commit_ref(lock) < 0)
1775 return error("unable to write symref for %s: %s", refname,
1776 strerror(errno));
1777 return 0;
1778}
1779
1780static int files_create_symref(struct ref_store *ref_store,
1781 const char *refname, const char *target,
1782 const char *logmsg)
1783{
1784 struct files_ref_store *refs =
1785 files_downcast(ref_store, REF_STORE_WRITE, "create_symref");
1786 struct strbuf err = STRBUF_INIT;
1787 struct ref_lock *lock;
1788 int ret;
1789
1790 lock = lock_ref_sha1_basic(refs, refname, NULL,
1791 NULL, NULL, REF_NODEREF, NULL,
1792 &err);
1793 if (!lock) {
1794 error("%s", err.buf);
1795 strbuf_release(&err);
1796 return -1;
1797 }
1798
1799 ret = create_symref_locked(refs, lock, refname, target, logmsg);
1800 unlock_ref(lock);
1801 return ret;
1802}
1803
1804static int files_reflog_exists(struct ref_store *ref_store,
1805 const char *refname)
1806{
1807 struct files_ref_store *refs =
1808 files_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1809 struct strbuf sb = STRBUF_INIT;
1810 struct stat st;
1811 int ret;
1812
1813 files_reflog_path(refs, &sb, refname);
1814 ret = !lstat(sb.buf, &st) && S_ISREG(st.st_mode);
1815 strbuf_release(&sb);
1816 return ret;
1817}
1818
1819static int files_delete_reflog(struct ref_store *ref_store,
1820 const char *refname)
1821{
1822 struct files_ref_store *refs =
1823 files_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
1824 struct strbuf sb = STRBUF_INIT;
1825 int ret;
1826
1827 files_reflog_path(refs, &sb, refname);
1828 ret = remove_path(sb.buf);
1829 strbuf_release(&sb);
1830 return ret;
1831}
1832
1833static int show_one_reflog_ent(struct strbuf *sb, each_reflog_ent_fn fn, void *cb_data)
1834{
1835 struct object_id ooid, noid;
1836 char *email_end, *message;
1837 timestamp_t timestamp;
1838 int tz;
1839 const char *p = sb->buf;
1840
1841 /* old SP new SP name <email> SP time TAB msg LF */
1842 if (!sb->len || sb->buf[sb->len - 1] != '\n' ||
1843 parse_oid_hex(p, &ooid, &p) || *p++ != ' ' ||
1844 parse_oid_hex(p, &noid, &p) || *p++ != ' ' ||
1845 !(email_end = strchr(p, '>')) ||
1846 email_end[1] != ' ' ||
1847 !(timestamp = parse_timestamp(email_end + 2, &message, 10)) ||
1848 !message || message[0] != ' ' ||
1849 (message[1] != '+' && message[1] != '-') ||
1850 !isdigit(message[2]) || !isdigit(message[3]) ||
1851 !isdigit(message[4]) || !isdigit(message[5]))
1852 return 0; /* corrupt? */
1853 email_end[1] = '\0';
1854 tz = strtol(message + 1, NULL, 10);
1855 if (message[6] != '\t')
1856 message += 6;
1857 else
1858 message += 7;
1859 return fn(&ooid, &noid, p, timestamp, tz, message, cb_data);
1860}
1861
1862static char *find_beginning_of_line(char *bob, char *scan)
1863{
1864 while (bob < scan && *(--scan) != '\n')
1865 ; /* keep scanning backwards */
1866 /*
1867 * Return either beginning of the buffer, or LF at the end of
1868 * the previous line.
1869 */
1870 return scan;
1871}
1872
1873static int files_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1874 const char *refname,
1875 each_reflog_ent_fn fn,
1876 void *cb_data)
1877{
1878 struct files_ref_store *refs =
1879 files_downcast(ref_store, REF_STORE_READ,
1880 "for_each_reflog_ent_reverse");
1881 struct strbuf sb = STRBUF_INIT;
1882 FILE *logfp;
1883 long pos;
1884 int ret = 0, at_tail = 1;
1885
1886 files_reflog_path(refs, &sb, refname);
1887 logfp = fopen(sb.buf, "r");
1888 strbuf_release(&sb);
1889 if (!logfp)
1890 return -1;
1891
1892 /* Jump to the end */
1893 if (fseek(logfp, 0, SEEK_END) < 0)
1894 ret = error("cannot seek back reflog for %s: %s",
1895 refname, strerror(errno));
1896 pos = ftell(logfp);
1897 while (!ret && 0 < pos) {
1898 int cnt;
1899 size_t nread;
1900 char buf[BUFSIZ];
1901 char *endp, *scanp;
1902
1903 /* Fill next block from the end */
1904 cnt = (sizeof(buf) < pos) ? sizeof(buf) : pos;
1905 if (fseek(logfp, pos - cnt, SEEK_SET)) {
1906 ret = error("cannot seek back reflog for %s: %s",
1907 refname, strerror(errno));
1908 break;
1909 }
1910 nread = fread(buf, cnt, 1, logfp);
1911 if (nread != 1) {
1912 ret = error("cannot read %d bytes from reflog for %s: %s",
1913 cnt, refname, strerror(errno));
1914 break;
1915 }
1916 pos -= cnt;
1917
1918 scanp = endp = buf + cnt;
1919 if (at_tail && scanp[-1] == '\n')
1920 /* Looking at the final LF at the end of the file */
1921 scanp--;
1922 at_tail = 0;
1923
1924 while (buf < scanp) {
1925 /*
1926 * terminating LF of the previous line, or the beginning
1927 * of the buffer.
1928 */
1929 char *bp;
1930
1931 bp = find_beginning_of_line(buf, scanp);
1932
1933 if (*bp == '\n') {
1934 /*
1935 * The newline is the end of the previous line,
1936 * so we know we have complete line starting
1937 * at (bp + 1). Prefix it onto any prior data
1938 * we collected for the line and process it.
1939 */
1940 strbuf_splice(&sb, 0, 0, bp + 1, endp - (bp + 1));
1941 scanp = bp;
1942 endp = bp + 1;
1943 ret = show_one_reflog_ent(&sb, fn, cb_data);
1944 strbuf_reset(&sb);
1945 if (ret)
1946 break;
1947 } else if (!pos) {
1948 /*
1949 * We are at the start of the buffer, and the
1950 * start of the file; there is no previous
1951 * line, and we have everything for this one.
1952 * Process it, and we can end the loop.
1953 */
1954 strbuf_splice(&sb, 0, 0, buf, endp - buf);
1955 ret = show_one_reflog_ent(&sb, fn, cb_data);
1956 strbuf_reset(&sb);
1957 break;
1958 }
1959
1960 if (bp == buf) {
1961 /*
1962 * We are at the start of the buffer, and there
1963 * is more file to read backwards. Which means
1964 * we are in the middle of a line. Note that we
1965 * may get here even if *bp was a newline; that
1966 * just means we are at the exact end of the
1967 * previous line, rather than some spot in the
1968 * middle.
1969 *
1970 * Save away what we have to be combined with
1971 * the data from the next read.
1972 */
1973 strbuf_splice(&sb, 0, 0, buf, endp - buf);
1974 break;
1975 }
1976 }
1977
1978 }
1979 if (!ret && sb.len)
1980 die("BUG: reverse reflog parser had leftover data");
1981
1982 fclose(logfp);
1983 strbuf_release(&sb);
1984 return ret;
1985}
1986
1987static int files_for_each_reflog_ent(struct ref_store *ref_store,
1988 const char *refname,
1989 each_reflog_ent_fn fn, void *cb_data)
1990{
1991 struct files_ref_store *refs =
1992 files_downcast(ref_store, REF_STORE_READ,
1993 "for_each_reflog_ent");
1994 FILE *logfp;
1995 struct strbuf sb = STRBUF_INIT;
1996 int ret = 0;
1997
1998 files_reflog_path(refs, &sb, refname);
1999 logfp = fopen(sb.buf, "r");
2000 strbuf_release(&sb);
2001 if (!logfp)
2002 return -1;
2003
2004 while (!ret && !strbuf_getwholeline(&sb, logfp, '\n'))
2005 ret = show_one_reflog_ent(&sb, fn, cb_data);
2006 fclose(logfp);
2007 strbuf_release(&sb);
2008 return ret;
2009}
2010
2011struct files_reflog_iterator {
2012 struct ref_iterator base;
2013
2014 struct ref_store *ref_store;
2015 struct dir_iterator *dir_iterator;
2016 struct object_id oid;
2017};
2018
2019static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator)
2020{
2021 struct files_reflog_iterator *iter =
2022 (struct files_reflog_iterator *)ref_iterator;
2023 struct dir_iterator *diter = iter->dir_iterator;
2024 int ok;
2025
2026 while ((ok = dir_iterator_advance(diter)) == ITER_OK) {
2027 int flags;
2028
2029 if (!S_ISREG(diter->st.st_mode))
2030 continue;
2031 if (diter->basename[0] == '.')
2032 continue;
2033 if (ends_with(diter->basename, ".lock"))
2034 continue;
2035
2036 if (refs_read_ref_full(iter->ref_store,
2037 diter->relative_path, 0,
2038 iter->oid.hash, &flags)) {
2039 error("bad ref for %s", diter->path.buf);
2040 continue;
2041 }
2042
2043 iter->base.refname = diter->relative_path;
2044 iter->base.oid = &iter->oid;
2045 iter->base.flags = flags;
2046 return ITER_OK;
2047 }
2048
2049 iter->dir_iterator = NULL;
2050 if (ref_iterator_abort(ref_iterator) == ITER_ERROR)
2051 ok = ITER_ERROR;
2052 return ok;
2053}
2054
2055static int files_reflog_iterator_peel(struct ref_iterator *ref_iterator,
2056 struct object_id *peeled)
2057{
2058 die("BUG: ref_iterator_peel() called for reflog_iterator");
2059}
2060
2061static int files_reflog_iterator_abort(struct ref_iterator *ref_iterator)
2062{
2063 struct files_reflog_iterator *iter =
2064 (struct files_reflog_iterator *)ref_iterator;
2065 int ok = ITER_DONE;
2066
2067 if (iter->dir_iterator)
2068 ok = dir_iterator_abort(iter->dir_iterator);
2069
2070 base_ref_iterator_free(ref_iterator);
2071 return ok;
2072}
2073
2074static struct ref_iterator_vtable files_reflog_iterator_vtable = {
2075 files_reflog_iterator_advance,
2076 files_reflog_iterator_peel,
2077 files_reflog_iterator_abort
2078};
2079
2080static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)
2081{
2082 struct files_ref_store *refs =
2083 files_downcast(ref_store, REF_STORE_READ,
2084 "reflog_iterator_begin");
2085 struct files_reflog_iterator *iter = xcalloc(1, sizeof(*iter));
2086 struct ref_iterator *ref_iterator = &iter->base;
2087 struct strbuf sb = STRBUF_INIT;
2088
2089 base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable, 0);
2090 files_reflog_path(refs, &sb, NULL);
2091 iter->dir_iterator = dir_iterator_begin(sb.buf);
2092 iter->ref_store = ref_store;
2093 strbuf_release(&sb);
2094 return ref_iterator;
2095}
2096
2097/*
2098 * If update is a direct update of head_ref (the reference pointed to
2099 * by HEAD), then add an extra REF_LOG_ONLY update for HEAD.
2100 */
2101static int split_head_update(struct ref_update *update,
2102 struct ref_transaction *transaction,
2103 const char *head_ref,
2104 struct string_list *affected_refnames,
2105 struct strbuf *err)
2106{
2107 struct string_list_item *item;
2108 struct ref_update *new_update;
2109
2110 if ((update->flags & REF_LOG_ONLY) ||
2111 (update->flags & REF_ISPRUNING) ||
2112 (update->flags & REF_UPDATE_VIA_HEAD))
2113 return 0;
2114
2115 if (strcmp(update->refname, head_ref))
2116 return 0;
2117
2118 /*
2119 * First make sure that HEAD is not already in the
2120 * transaction. This insertion is O(N) in the transaction
2121 * size, but it happens at most once per transaction.
2122 */
2123 item = string_list_insert(affected_refnames, "HEAD");
2124 if (item->util) {
2125 /* An entry already existed */
2126 strbuf_addf(err,
2127 "multiple updates for 'HEAD' (including one "
2128 "via its referent '%s') are not allowed",
2129 update->refname);
2130 return TRANSACTION_NAME_CONFLICT;
2131 }
2132
2133 new_update = ref_transaction_add_update(
2134 transaction, "HEAD",
2135 update->flags | REF_LOG_ONLY | REF_NODEREF,
2136 update->new_oid.hash, update->old_oid.hash,
2137 update->msg);
2138
2139 item->util = new_update;
2140
2141 return 0;
2142}
2143
2144/*
2145 * update is for a symref that points at referent and doesn't have
2146 * REF_NODEREF set. Split it into two updates:
2147 * - The original update, but with REF_LOG_ONLY and REF_NODEREF set
2148 * - A new, separate update for the referent reference
2149 * Note that the new update will itself be subject to splitting when
2150 * the iteration gets to it.
2151 */
2152static int split_symref_update(struct files_ref_store *refs,
2153 struct ref_update *update,
2154 const char *referent,
2155 struct ref_transaction *transaction,
2156 struct string_list *affected_refnames,
2157 struct strbuf *err)
2158{
2159 struct string_list_item *item;
2160 struct ref_update *new_update;
2161 unsigned int new_flags;
2162
2163 /*
2164 * First make sure that referent is not already in the
2165 * transaction. This insertion is O(N) in the transaction
2166 * size, but it happens at most once per symref in a
2167 * transaction.
2168 */
2169 item = string_list_insert(affected_refnames, referent);
2170 if (item->util) {
2171 /* An entry already existed */
2172 strbuf_addf(err,
2173 "multiple updates for '%s' (including one "
2174 "via symref '%s') are not allowed",
2175 referent, update->refname);
2176 return TRANSACTION_NAME_CONFLICT;
2177 }
2178
2179 new_flags = update->flags;
2180 if (!strcmp(update->refname, "HEAD")) {
2181 /*
2182 * Record that the new update came via HEAD, so that
2183 * when we process it, split_head_update() doesn't try
2184 * to add another reflog update for HEAD. Note that
2185 * this bit will be propagated if the new_update
2186 * itself needs to be split.
2187 */
2188 new_flags |= REF_UPDATE_VIA_HEAD;
2189 }
2190
2191 new_update = ref_transaction_add_update(
2192 transaction, referent, new_flags,
2193 update->new_oid.hash, update->old_oid.hash,
2194 update->msg);
2195
2196 new_update->parent_update = update;
2197
2198 /*
2199 * Change the symbolic ref update to log only. Also, it
2200 * doesn't need to check its old SHA-1 value, as that will be
2201 * done when new_update is processed.
2202 */
2203 update->flags |= REF_LOG_ONLY | REF_NODEREF;
2204 update->flags &= ~REF_HAVE_OLD;
2205
2206 item->util = new_update;
2207
2208 return 0;
2209}
2210
2211/*
2212 * Return the refname under which update was originally requested.
2213 */
2214static const char *original_update_refname(struct ref_update *update)
2215{
2216 while (update->parent_update)
2217 update = update->parent_update;
2218
2219 return update->refname;
2220}
2221
2222/*
2223 * Check whether the REF_HAVE_OLD and old_oid values stored in update
2224 * are consistent with oid, which is the reference's current value. If
2225 * everything is OK, return 0; otherwise, write an error message to
2226 * err and return -1.
2227 */
2228static int check_old_oid(struct ref_update *update, struct object_id *oid,
2229 struct strbuf *err)
2230{
2231 if (!(update->flags & REF_HAVE_OLD) ||
2232 !oidcmp(oid, &update->old_oid))
2233 return 0;
2234
2235 if (is_null_oid(&update->old_oid))
2236 strbuf_addf(err, "cannot lock ref '%s': "
2237 "reference already exists",
2238 original_update_refname(update));
2239 else if (is_null_oid(oid))
2240 strbuf_addf(err, "cannot lock ref '%s': "
2241 "reference is missing but expected %s",
2242 original_update_refname(update),
2243 oid_to_hex(&update->old_oid));
2244 else
2245 strbuf_addf(err, "cannot lock ref '%s': "
2246 "is at %s but expected %s",
2247 original_update_refname(update),
2248 oid_to_hex(oid),
2249 oid_to_hex(&update->old_oid));
2250
2251 return -1;
2252}
2253
2254/*
2255 * Prepare for carrying out update:
2256 * - Lock the reference referred to by update.
2257 * - Read the reference under lock.
2258 * - Check that its old SHA-1 value (if specified) is correct, and in
2259 * any case record it in update->lock->old_oid for later use when
2260 * writing the reflog.
2261 * - If it is a symref update without REF_NODEREF, split it up into a
2262 * REF_LOG_ONLY update of the symref and add a separate update for
2263 * the referent to transaction.
2264 * - If it is an update of head_ref, add a corresponding REF_LOG_ONLY
2265 * update of HEAD.
2266 */
2267static int lock_ref_for_update(struct files_ref_store *refs,
2268 struct ref_update *update,
2269 struct ref_transaction *transaction,
2270 const char *head_ref,
2271 struct string_list *affected_refnames,
2272 struct strbuf *err)
2273{
2274 struct strbuf referent = STRBUF_INIT;
2275 int mustexist = (update->flags & REF_HAVE_OLD) &&
2276 !is_null_oid(&update->old_oid);
2277 int ret;
2278 struct ref_lock *lock;
2279
2280 files_assert_main_repository(refs, "lock_ref_for_update");
2281
2282 if ((update->flags & REF_HAVE_NEW) && is_null_oid(&update->new_oid))
2283 update->flags |= REF_DELETING;
2284
2285 if (head_ref) {
2286 ret = split_head_update(update, transaction, head_ref,
2287 affected_refnames, err);
2288 if (ret)
2289 return ret;
2290 }
2291
2292 ret = lock_raw_ref(refs, update->refname, mustexist,
2293 affected_refnames, NULL,
2294 &lock, &referent,
2295 &update->type, err);
2296 if (ret) {
2297 char *reason;
2298
2299 reason = strbuf_detach(err, NULL);
2300 strbuf_addf(err, "cannot lock ref '%s': %s",
2301 original_update_refname(update), reason);
2302 free(reason);
2303 return ret;
2304 }
2305
2306 update->backend_data = lock;
2307
2308 if (update->type & REF_ISSYMREF) {
2309 if (update->flags & REF_NODEREF) {
2310 /*
2311 * We won't be reading the referent as part of
2312 * the transaction, so we have to read it here
2313 * to record and possibly check old_sha1:
2314 */
2315 if (refs_read_ref_full(&refs->base,
2316 referent.buf, 0,
2317 lock->old_oid.hash, NULL)) {
2318 if (update->flags & REF_HAVE_OLD) {
2319 strbuf_addf(err, "cannot lock ref '%s': "
2320 "error reading reference",
2321 original_update_refname(update));
2322 return -1;
2323 }
2324 } else if (check_old_oid(update, &lock->old_oid, err)) {
2325 return TRANSACTION_GENERIC_ERROR;
2326 }
2327 } else {
2328 /*
2329 * Create a new update for the reference this
2330 * symref is pointing at. Also, we will record
2331 * and verify old_sha1 for this update as part
2332 * of processing the split-off update, so we
2333 * don't have to do it here.
2334 */
2335 ret = split_symref_update(refs, update,
2336 referent.buf, transaction,
2337 affected_refnames, err);
2338 if (ret)
2339 return ret;
2340 }
2341 } else {
2342 struct ref_update *parent_update;
2343
2344 if (check_old_oid(update, &lock->old_oid, err))
2345 return TRANSACTION_GENERIC_ERROR;
2346
2347 /*
2348 * If this update is happening indirectly because of a
2349 * symref update, record the old SHA-1 in the parent
2350 * update:
2351 */
2352 for (parent_update = update->parent_update;
2353 parent_update;
2354 parent_update = parent_update->parent_update) {
2355 struct ref_lock *parent_lock = parent_update->backend_data;
2356 oidcpy(&parent_lock->old_oid, &lock->old_oid);
2357 }
2358 }
2359
2360 if ((update->flags & REF_HAVE_NEW) &&
2361 !(update->flags & REF_DELETING) &&
2362 !(update->flags & REF_LOG_ONLY)) {
2363 if (!(update->type & REF_ISSYMREF) &&
2364 !oidcmp(&lock->old_oid, &update->new_oid)) {
2365 /*
2366 * The reference already has the desired
2367 * value, so we don't need to write it.
2368 */
2369 } else if (write_ref_to_lockfile(lock, &update->new_oid,
2370 err)) {
2371 char *write_err = strbuf_detach(err, NULL);
2372
2373 /*
2374 * The lock was freed upon failure of
2375 * write_ref_to_lockfile():
2376 */
2377 update->backend_data = NULL;
2378 strbuf_addf(err,
2379 "cannot update ref '%s': %s",
2380 update->refname, write_err);
2381 free(write_err);
2382 return TRANSACTION_GENERIC_ERROR;
2383 } else {
2384 update->flags |= REF_NEEDS_COMMIT;
2385 }
2386 }
2387 if (!(update->flags & REF_NEEDS_COMMIT)) {
2388 /*
2389 * We didn't call write_ref_to_lockfile(), so
2390 * the lockfile is still open. Close it to
2391 * free up the file descriptor:
2392 */
2393 if (close_ref(lock)) {
2394 strbuf_addf(err, "couldn't close '%s.lock'",
2395 update->refname);
2396 return TRANSACTION_GENERIC_ERROR;
2397 }
2398 }
2399 return 0;
2400}
2401
2402struct files_transaction_backend_data {
2403 struct ref_transaction *packed_transaction;
2404 int packed_refs_locked;
2405};
2406
2407/*
2408 * Unlock any references in `transaction` that are still locked, and
2409 * mark the transaction closed.
2410 */
2411static void files_transaction_cleanup(struct files_ref_store *refs,
2412 struct ref_transaction *transaction)
2413{
2414 size_t i;
2415 struct files_transaction_backend_data *backend_data =
2416 transaction->backend_data;
2417 struct strbuf err = STRBUF_INIT;
2418
2419 for (i = 0; i < transaction->nr; i++) {
2420 struct ref_update *update = transaction->updates[i];
2421 struct ref_lock *lock = update->backend_data;
2422
2423 if (lock) {
2424 unlock_ref(lock);
2425 update->backend_data = NULL;
2426 }
2427 }
2428
2429 if (backend_data->packed_transaction &&
2430 ref_transaction_abort(backend_data->packed_transaction, &err)) {
2431 error("error aborting transaction: %s", err.buf);
2432 strbuf_release(&err);
2433 }
2434
2435 if (backend_data->packed_refs_locked)
2436 packed_refs_unlock(refs->packed_ref_store);
2437
2438 free(backend_data);
2439
2440 transaction->state = REF_TRANSACTION_CLOSED;
2441}
2442
2443static int files_transaction_prepare(struct ref_store *ref_store,
2444 struct ref_transaction *transaction,
2445 struct strbuf *err)
2446{
2447 struct files_ref_store *refs =
2448 files_downcast(ref_store, REF_STORE_WRITE,
2449 "ref_transaction_prepare");
2450 size_t i;
2451 int ret = 0;
2452 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
2453 char *head_ref = NULL;
2454 int head_type;
2455 struct object_id head_oid;
2456 struct files_transaction_backend_data *backend_data;
2457 struct ref_transaction *packed_transaction = NULL;
2458
2459 assert(err);
2460
2461 if (!transaction->nr)
2462 goto cleanup;
2463
2464 backend_data = xcalloc(1, sizeof(*backend_data));
2465 transaction->backend_data = backend_data;
2466
2467 /*
2468 * Fail if a refname appears more than once in the
2469 * transaction. (If we end up splitting up any updates using
2470 * split_symref_update() or split_head_update(), those
2471 * functions will check that the new updates don't have the
2472 * same refname as any existing ones.)
2473 */
2474 for (i = 0; i < transaction->nr; i++) {
2475 struct ref_update *update = transaction->updates[i];
2476 struct string_list_item *item =
2477 string_list_append(&affected_refnames, update->refname);
2478
2479 /*
2480 * We store a pointer to update in item->util, but at
2481 * the moment we never use the value of this field
2482 * except to check whether it is non-NULL.
2483 */
2484 item->util = update;
2485 }
2486 string_list_sort(&affected_refnames);
2487 if (ref_update_reject_duplicates(&affected_refnames, err)) {
2488 ret = TRANSACTION_GENERIC_ERROR;
2489 goto cleanup;
2490 }
2491
2492 /*
2493 * Special hack: If a branch is updated directly and HEAD
2494 * points to it (may happen on the remote side of a push
2495 * for example) then logically the HEAD reflog should be
2496 * updated too.
2497 *
2498 * A generic solution would require reverse symref lookups,
2499 * but finding all symrefs pointing to a given branch would be
2500 * rather costly for this rare event (the direct update of a
2501 * branch) to be worth it. So let's cheat and check with HEAD
2502 * only, which should cover 99% of all usage scenarios (even
2503 * 100% of the default ones).
2504 *
2505 * So if HEAD is a symbolic reference, then record the name of
2506 * the reference that it points to. If we see an update of
2507 * head_ref within the transaction, then split_head_update()
2508 * arranges for the reflog of HEAD to be updated, too.
2509 */
2510 head_ref = refs_resolve_refdup(ref_store, "HEAD",
2511 RESOLVE_REF_NO_RECURSE,
2512 head_oid.hash, &head_type);
2513
2514 if (head_ref && !(head_type & REF_ISSYMREF)) {
2515 FREE_AND_NULL(head_ref);
2516 }
2517
2518 /*
2519 * Acquire all locks, verify old values if provided, check
2520 * that new values are valid, and write new values to the
2521 * lockfiles, ready to be activated. Only keep one lockfile
2522 * open at a time to avoid running out of file descriptors.
2523 * Note that lock_ref_for_update() might append more updates
2524 * to the transaction.
2525 */
2526 for (i = 0; i < transaction->nr; i++) {
2527 struct ref_update *update = transaction->updates[i];
2528
2529 ret = lock_ref_for_update(refs, update, transaction,
2530 head_ref, &affected_refnames, err);
2531 if (ret)
2532 break;
2533
2534 if (update->flags & REF_DELETING &&
2535 !(update->flags & REF_LOG_ONLY) &&
2536 !(update->flags & REF_ISPRUNING)) {
2537 /*
2538 * This reference has to be deleted from
2539 * packed-refs if it exists there.
2540 */
2541 if (!packed_transaction) {
2542 packed_transaction = ref_store_transaction_begin(
2543 refs->packed_ref_store, err);
2544 if (!packed_transaction) {
2545 ret = TRANSACTION_GENERIC_ERROR;
2546 goto cleanup;
2547 }
2548
2549 backend_data->packed_transaction =
2550 packed_transaction;
2551 }
2552
2553 ref_transaction_add_update(
2554 packed_transaction, update->refname,
2555 update->flags & ~REF_HAVE_OLD,
2556 update->new_oid.hash, update->old_oid.hash,
2557 NULL);
2558 }
2559 }
2560
2561 if (packed_transaction) {
2562 if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
2563 ret = TRANSACTION_GENERIC_ERROR;
2564 goto cleanup;
2565 }
2566 backend_data->packed_refs_locked = 1;
2567 ret = ref_transaction_prepare(packed_transaction, err);
2568 }
2569
2570cleanup:
2571 free(head_ref);
2572 string_list_clear(&affected_refnames, 0);
2573
2574 if (ret)
2575 files_transaction_cleanup(refs, transaction);
2576 else
2577 transaction->state = REF_TRANSACTION_PREPARED;
2578
2579 return ret;
2580}
2581
2582static int files_transaction_finish(struct ref_store *ref_store,
2583 struct ref_transaction *transaction,
2584 struct strbuf *err)
2585{
2586 struct files_ref_store *refs =
2587 files_downcast(ref_store, 0, "ref_transaction_finish");
2588 size_t i;
2589 int ret = 0;
2590 struct strbuf sb = STRBUF_INIT;
2591 struct files_transaction_backend_data *backend_data;
2592 struct ref_transaction *packed_transaction;
2593
2594
2595 assert(err);
2596
2597 if (!transaction->nr) {
2598 transaction->state = REF_TRANSACTION_CLOSED;
2599 return 0;
2600 }
2601
2602 backend_data = transaction->backend_data;
2603 packed_transaction = backend_data->packed_transaction;
2604
2605 /* Perform updates first so live commits remain referenced */
2606 for (i = 0; i < transaction->nr; i++) {
2607 struct ref_update *update = transaction->updates[i];
2608 struct ref_lock *lock = update->backend_data;
2609
2610 if (update->flags & REF_NEEDS_COMMIT ||
2611 update->flags & REF_LOG_ONLY) {
2612 if (files_log_ref_write(refs,
2613 lock->ref_name,
2614 &lock->old_oid,
2615 &update->new_oid,
2616 update->msg, update->flags,
2617 err)) {
2618 char *old_msg = strbuf_detach(err, NULL);
2619
2620 strbuf_addf(err, "cannot update the ref '%s': %s",
2621 lock->ref_name, old_msg);
2622 free(old_msg);
2623 unlock_ref(lock);
2624 update->backend_data = NULL;
2625 ret = TRANSACTION_GENERIC_ERROR;
2626 goto cleanup;
2627 }
2628 }
2629 if (update->flags & REF_NEEDS_COMMIT) {
2630 clear_loose_ref_cache(refs);
2631 if (commit_ref(lock)) {
2632 strbuf_addf(err, "couldn't set '%s'", lock->ref_name);
2633 unlock_ref(lock);
2634 update->backend_data = NULL;
2635 ret = TRANSACTION_GENERIC_ERROR;
2636 goto cleanup;
2637 }
2638 }
2639 }
2640
2641 /*
2642 * Now that updates are safely completed, we can perform
2643 * deletes. First delete the reflogs of any references that
2644 * will be deleted, since (in the unexpected event of an
2645 * error) leaving a reference without a reflog is less bad
2646 * than leaving a reflog without a reference (the latter is a
2647 * mildly invalid repository state):
2648 */
2649 for (i = 0; i < transaction->nr; i++) {
2650 struct ref_update *update = transaction->updates[i];
2651 if (update->flags & REF_DELETING &&
2652 !(update->flags & REF_LOG_ONLY) &&
2653 !(update->flags & REF_ISPRUNING)) {
2654 strbuf_reset(&sb);
2655 files_reflog_path(refs, &sb, update->refname);
2656 if (!unlink_or_warn(sb.buf))
2657 try_remove_empty_parents(refs, update->refname,
2658 REMOVE_EMPTY_PARENTS_REFLOG);
2659 }
2660 }
2661
2662 /*
2663 * Perform deletes now that updates are safely completed.
2664 *
2665 * First delete any packed versions of the references, while
2666 * retaining the packed-refs lock:
2667 */
2668 if (packed_transaction) {
2669 ret = ref_transaction_commit(packed_transaction, err);
2670 ref_transaction_free(packed_transaction);
2671 packed_transaction = NULL;
2672 backend_data->packed_transaction = NULL;
2673 if (ret)
2674 goto cleanup;
2675 }
2676
2677 /* Now delete the loose versions of the references: */
2678 for (i = 0; i < transaction->nr; i++) {
2679 struct ref_update *update = transaction->updates[i];
2680 struct ref_lock *lock = update->backend_data;
2681
2682 if (update->flags & REF_DELETING &&
2683 !(update->flags & REF_LOG_ONLY)) {
2684 if (!(update->type & REF_ISPACKED) ||
2685 update->type & REF_ISSYMREF) {
2686 /* It is a loose reference. */
2687 strbuf_reset(&sb);
2688 files_ref_path(refs, &sb, lock->ref_name);
2689 if (unlink_or_msg(sb.buf, err)) {
2690 ret = TRANSACTION_GENERIC_ERROR;
2691 goto cleanup;
2692 }
2693 update->flags |= REF_DELETED_LOOSE;
2694 }
2695 }
2696 }
2697
2698 clear_loose_ref_cache(refs);
2699
2700cleanup:
2701 files_transaction_cleanup(refs, transaction);
2702
2703 for (i = 0; i < transaction->nr; i++) {
2704 struct ref_update *update = transaction->updates[i];
2705
2706 if (update->flags & REF_DELETED_LOOSE) {
2707 /*
2708 * The loose reference was deleted. Delete any
2709 * empty parent directories. (Note that this
2710 * can only work because we have already
2711 * removed the lockfile.)
2712 */
2713 try_remove_empty_parents(refs, update->refname,
2714 REMOVE_EMPTY_PARENTS_REF);
2715 }
2716 }
2717
2718 strbuf_release(&sb);
2719 return ret;
2720}
2721
2722static int files_transaction_abort(struct ref_store *ref_store,
2723 struct ref_transaction *transaction,
2724 struct strbuf *err)
2725{
2726 struct files_ref_store *refs =
2727 files_downcast(ref_store, 0, "ref_transaction_abort");
2728
2729 files_transaction_cleanup(refs, transaction);
2730 return 0;
2731}
2732
2733static int ref_present(const char *refname,
2734 const struct object_id *oid, int flags, void *cb_data)
2735{
2736 struct string_list *affected_refnames = cb_data;
2737
2738 return string_list_has_string(affected_refnames, refname);
2739}
2740
2741static int files_initial_transaction_commit(struct ref_store *ref_store,
2742 struct ref_transaction *transaction,
2743 struct strbuf *err)
2744{
2745 struct files_ref_store *refs =
2746 files_downcast(ref_store, REF_STORE_WRITE,
2747 "initial_ref_transaction_commit");
2748 size_t i;
2749 int ret = 0;
2750 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
2751 struct ref_transaction *packed_transaction = NULL;
2752
2753 assert(err);
2754
2755 if (transaction->state != REF_TRANSACTION_OPEN)
2756 die("BUG: commit called for transaction that is not open");
2757
2758 /* Fail if a refname appears more than once in the transaction: */
2759 for (i = 0; i < transaction->nr; i++)
2760 string_list_append(&affected_refnames,
2761 transaction->updates[i]->refname);
2762 string_list_sort(&affected_refnames);
2763 if (ref_update_reject_duplicates(&affected_refnames, err)) {
2764 ret = TRANSACTION_GENERIC_ERROR;
2765 goto cleanup;
2766 }
2767
2768 /*
2769 * It's really undefined to call this function in an active
2770 * repository or when there are existing references: we are
2771 * only locking and changing packed-refs, so (1) any
2772 * simultaneous processes might try to change a reference at
2773 * the same time we do, and (2) any existing loose versions of
2774 * the references that we are setting would have precedence
2775 * over our values. But some remote helpers create the remote
2776 * "HEAD" and "master" branches before calling this function,
2777 * so here we really only check that none of the references
2778 * that we are creating already exists.
2779 */
2780 if (refs_for_each_rawref(&refs->base, ref_present,
2781 &affected_refnames))
2782 die("BUG: initial ref transaction called with existing refs");
2783
2784 packed_transaction = ref_store_transaction_begin(refs->packed_ref_store, err);
2785 if (!packed_transaction) {
2786 ret = TRANSACTION_GENERIC_ERROR;
2787 goto cleanup;
2788 }
2789
2790 for (i = 0; i < transaction->nr; i++) {
2791 struct ref_update *update = transaction->updates[i];
2792
2793 if ((update->flags & REF_HAVE_OLD) &&
2794 !is_null_oid(&update->old_oid))
2795 die("BUG: initial ref transaction with old_sha1 set");
2796 if (refs_verify_refname_available(&refs->base, update->refname,
2797 &affected_refnames, NULL,
2798 err)) {
2799 ret = TRANSACTION_NAME_CONFLICT;
2800 goto cleanup;
2801 }
2802
2803 /*
2804 * Add a reference creation for this reference to the
2805 * packed-refs transaction:
2806 */
2807 ref_transaction_add_update(packed_transaction, update->refname,
2808 update->flags & ~REF_HAVE_OLD,
2809 update->new_oid.hash, update->old_oid.hash,
2810 NULL);
2811 }
2812
2813 if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
2814 ret = TRANSACTION_GENERIC_ERROR;
2815 goto cleanup;
2816 }
2817
2818 if (initial_ref_transaction_commit(packed_transaction, err)) {
2819 ret = TRANSACTION_GENERIC_ERROR;
2820 goto cleanup;
2821 }
2822
2823cleanup:
2824 if (packed_transaction)
2825 ref_transaction_free(packed_transaction);
2826 packed_refs_unlock(refs->packed_ref_store);
2827 transaction->state = REF_TRANSACTION_CLOSED;
2828 string_list_clear(&affected_refnames, 0);
2829 return ret;
2830}
2831
2832struct expire_reflog_cb {
2833 unsigned int flags;
2834 reflog_expiry_should_prune_fn *should_prune_fn;
2835 void *policy_cb;
2836 FILE *newlog;
2837 struct object_id last_kept_oid;
2838};
2839
2840static int expire_reflog_ent(struct object_id *ooid, struct object_id *noid,
2841 const char *email, timestamp_t timestamp, int tz,
2842 const char *message, void *cb_data)
2843{
2844 struct expire_reflog_cb *cb = cb_data;
2845 struct expire_reflog_policy_cb *policy_cb = cb->policy_cb;
2846
2847 if (cb->flags & EXPIRE_REFLOGS_REWRITE)
2848 ooid = &cb->last_kept_oid;
2849
2850 if ((*cb->should_prune_fn)(ooid, noid, email, timestamp, tz,
2851 message, policy_cb)) {
2852 if (!cb->newlog)
2853 printf("would prune %s", message);
2854 else if (cb->flags & EXPIRE_REFLOGS_VERBOSE)
2855 printf("prune %s", message);
2856 } else {
2857 if (cb->newlog) {
2858 fprintf(cb->newlog, "%s %s %s %"PRItime" %+05d\t%s",
2859 oid_to_hex(ooid), oid_to_hex(noid),
2860 email, timestamp, tz, message);
2861 oidcpy(&cb->last_kept_oid, noid);
2862 }
2863 if (cb->flags & EXPIRE_REFLOGS_VERBOSE)
2864 printf("keep %s", message);
2865 }
2866 return 0;
2867}
2868
2869static int files_reflog_expire(struct ref_store *ref_store,
2870 const char *refname, const unsigned char *sha1,
2871 unsigned int flags,
2872 reflog_expiry_prepare_fn prepare_fn,
2873 reflog_expiry_should_prune_fn should_prune_fn,
2874 reflog_expiry_cleanup_fn cleanup_fn,
2875 void *policy_cb_data)
2876{
2877 struct files_ref_store *refs =
2878 files_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2879 static struct lock_file reflog_lock;
2880 struct expire_reflog_cb cb;
2881 struct ref_lock *lock;
2882 struct strbuf log_file_sb = STRBUF_INIT;
2883 char *log_file;
2884 int status = 0;
2885 int type;
2886 struct strbuf err = STRBUF_INIT;
2887 struct object_id oid;
2888
2889 memset(&cb, 0, sizeof(cb));
2890 cb.flags = flags;
2891 cb.policy_cb = policy_cb_data;
2892 cb.should_prune_fn = should_prune_fn;
2893
2894 /*
2895 * The reflog file is locked by holding the lock on the
2896 * reference itself, plus we might need to update the
2897 * reference if --updateref was specified:
2898 */
2899 lock = lock_ref_sha1_basic(refs, refname, sha1,
2900 NULL, NULL, REF_NODEREF,
2901 &type, &err);
2902 if (!lock) {
2903 error("cannot lock ref '%s': %s", refname, err.buf);
2904 strbuf_release(&err);
2905 return -1;
2906 }
2907 if (!refs_reflog_exists(ref_store, refname)) {
2908 unlock_ref(lock);
2909 return 0;
2910 }
2911
2912 files_reflog_path(refs, &log_file_sb, refname);
2913 log_file = strbuf_detach(&log_file_sb, NULL);
2914 if (!(flags & EXPIRE_REFLOGS_DRY_RUN)) {
2915 /*
2916 * Even though holding $GIT_DIR/logs/$reflog.lock has
2917 * no locking implications, we use the lock_file
2918 * machinery here anyway because it does a lot of the
2919 * work we need, including cleaning up if the program
2920 * exits unexpectedly.
2921 */
2922 if (hold_lock_file_for_update(&reflog_lock, log_file, 0) < 0) {
2923 struct strbuf err = STRBUF_INIT;
2924 unable_to_lock_message(log_file, errno, &err);
2925 error("%s", err.buf);
2926 strbuf_release(&err);
2927 goto failure;
2928 }
2929 cb.newlog = fdopen_lock_file(&reflog_lock, "w");
2930 if (!cb.newlog) {
2931 error("cannot fdopen %s (%s)",
2932 get_lock_file_path(&reflog_lock), strerror(errno));
2933 goto failure;
2934 }
2935 }
2936
2937 hashcpy(oid.hash, sha1);
2938
2939 (*prepare_fn)(refname, &oid, cb.policy_cb);
2940 refs_for_each_reflog_ent(ref_store, refname, expire_reflog_ent, &cb);
2941 (*cleanup_fn)(cb.policy_cb);
2942
2943 if (!(flags & EXPIRE_REFLOGS_DRY_RUN)) {
2944 /*
2945 * It doesn't make sense to adjust a reference pointed
2946 * to by a symbolic ref based on expiring entries in
2947 * the symbolic reference's reflog. Nor can we update
2948 * a reference if there are no remaining reflog
2949 * entries.
2950 */
2951 int update = (flags & EXPIRE_REFLOGS_UPDATE_REF) &&
2952 !(type & REF_ISSYMREF) &&
2953 !is_null_oid(&cb.last_kept_oid);
2954
2955 if (close_lock_file(&reflog_lock)) {
2956 status |= error("couldn't write %s: %s", log_file,
2957 strerror(errno));
2958 } else if (update &&
2959 (write_in_full(get_lock_file_fd(lock->lk),
2960 oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
2961 write_str_in_full(get_lock_file_fd(lock->lk), "\n") != 1 ||
2962 close_ref(lock) < 0)) {
2963 status |= error("couldn't write %s",
2964 get_lock_file_path(lock->lk));
2965 rollback_lock_file(&reflog_lock);
2966 } else if (commit_lock_file(&reflog_lock)) {
2967 status |= error("unable to write reflog '%s' (%s)",
2968 log_file, strerror(errno));
2969 } else if (update && commit_ref(lock)) {
2970 status |= error("couldn't set %s", lock->ref_name);
2971 }
2972 }
2973 free(log_file);
2974 unlock_ref(lock);
2975 return status;
2976
2977 failure:
2978 rollback_lock_file(&reflog_lock);
2979 free(log_file);
2980 unlock_ref(lock);
2981 return -1;
2982}
2983
2984static int files_init_db(struct ref_store *ref_store, struct strbuf *err)
2985{
2986 struct files_ref_store *refs =
2987 files_downcast(ref_store, REF_STORE_WRITE, "init_db");
2988 struct strbuf sb = STRBUF_INIT;
2989
2990 /*
2991 * Create .git/refs/{heads,tags}
2992 */
2993 files_ref_path(refs, &sb, "refs/heads");
2994 safe_create_dir(sb.buf, 1);
2995
2996 strbuf_reset(&sb);
2997 files_ref_path(refs, &sb, "refs/tags");
2998 safe_create_dir(sb.buf, 1);
2999
3000 strbuf_release(&sb);
3001 return 0;
3002}
3003
3004struct ref_storage_be refs_be_files = {
3005 NULL,
3006 "files",
3007 files_ref_store_create,
3008 files_init_db,
3009 files_transaction_prepare,
3010 files_transaction_finish,
3011 files_transaction_abort,
3012 files_initial_transaction_commit,
3013
3014 files_pack_refs,
3015 files_peel_ref,
3016 files_create_symref,
3017 files_delete_refs,
3018 files_rename_ref,
3019
3020 files_ref_iterator_begin,
3021 files_read_raw_ref,
3022
3023 files_reflog_iterator_begin,
3024 files_for_each_reflog_ent,
3025 files_for_each_reflog_ent_reverse,
3026 files_reflog_exists,
3027 files_create_reflog,
3028 files_delete_reflog,
3029 files_reflog_expire
3030};