]> git.ipfire.org Git - thirdparty/git.git/blob - refs/files-backend.c
test-lib-functions.sh: fix test_grep fail message wording
[thirdparty/git.git] / refs / files-backend.c
1 #include "../git-compat-util.h"
2 #include "../config.h"
3 #include "../copy.h"
4 #include "../environment.h"
5 #include "../gettext.h"
6 #include "../hash.h"
7 #include "../hex.h"
8 #include "../refs.h"
9 #include "refs-internal.h"
10 #include "ref-cache.h"
11 #include "packed-backend.h"
12 #include "../ident.h"
13 #include "../iterator.h"
14 #include "../dir-iterator.h"
15 #include "../lockfile.h"
16 #include "../object.h"
17 #include "../object-file.h"
18 #include "../path.h"
19 #include "../dir.h"
20 #include "../chdir-notify.h"
21 #include "../setup.h"
22 #include "../worktree.h"
23 #include "../wrapper.h"
24 #include "../write-or-die.h"
25 #include "../revision.h"
26 #include <wildmatch.h>
27
28 /*
29 * This backend uses the following flags in `ref_update::flags` for
30 * internal bookkeeping purposes. Their numerical values must not
31 * conflict with REF_NO_DEREF, REF_FORCE_CREATE_REFLOG, REF_HAVE_NEW,
32 * or REF_HAVE_OLD, which are also stored in `ref_update::flags`.
33 */
34
35 /*
36 * Used as a flag in ref_update::flags when a loose ref is being
37 * pruned. This flag must only be used when REF_NO_DEREF is set.
38 */
39 #define REF_IS_PRUNING (1 << 4)
40
41 /*
42 * Flag passed to lock_ref_sha1_basic() telling it to tolerate broken
43 * refs (i.e., because the reference is about to be deleted anyway).
44 */
45 #define REF_DELETING (1 << 5)
46
47 /*
48 * Used as a flag in ref_update::flags when the lockfile needs to be
49 * committed.
50 */
51 #define REF_NEEDS_COMMIT (1 << 6)
52
53 /*
54 * Used as a flag in ref_update::flags when the ref_update was via an
55 * update to HEAD.
56 */
57 #define REF_UPDATE_VIA_HEAD (1 << 8)
58
59 /*
60 * Used as a flag in ref_update::flags when a reference has been
61 * deleted and the ref's parent directories may need cleanup.
62 */
63 #define REF_DELETED_RMDIR (1 << 9)
64
65 struct ref_lock {
66 char *ref_name;
67 struct lock_file lk;
68 struct object_id old_oid;
69 };
70
71 struct files_ref_store {
72 struct ref_store base;
73 unsigned int store_flags;
74
75 char *gitcommondir;
76
77 struct ref_cache *loose;
78
79 struct ref_store *packed_ref_store;
80 };
81
82 static void clear_loose_ref_cache(struct files_ref_store *refs)
83 {
84 if (refs->loose) {
85 free_ref_cache(refs->loose);
86 refs->loose = NULL;
87 }
88 }
89
90 /*
91 * Create a new submodule ref cache and add it to the internal
92 * set of caches.
93 */
94 static struct ref_store *files_ref_store_create(struct repository *repo,
95 const char *gitdir,
96 unsigned int flags)
97 {
98 struct files_ref_store *refs = xcalloc(1, sizeof(*refs));
99 struct ref_store *ref_store = (struct ref_store *)refs;
100 struct strbuf sb = STRBUF_INIT;
101
102 base_ref_store_init(ref_store, repo, gitdir, &refs_be_files);
103 refs->store_flags = flags;
104 get_common_dir_noenv(&sb, gitdir);
105 refs->gitcommondir = strbuf_detach(&sb, NULL);
106 refs->packed_ref_store =
107 packed_ref_store_create(repo, refs->gitcommondir, flags);
108
109 chdir_notify_reparent("files-backend $GIT_DIR", &refs->base.gitdir);
110 chdir_notify_reparent("files-backend $GIT_COMMONDIR",
111 &refs->gitcommondir);
112
113 return ref_store;
114 }
115
116 /*
117 * Die if refs is not the main ref store. caller is used in any
118 * necessary error messages.
119 */
120 static void files_assert_main_repository(struct files_ref_store *refs,
121 const char *caller)
122 {
123 if (refs->store_flags & REF_STORE_MAIN)
124 return;
125
126 BUG("operation %s only allowed for main ref store", caller);
127 }
128
129 /*
130 * Downcast ref_store to files_ref_store. Die if ref_store is not a
131 * files_ref_store. required_flags is compared with ref_store's
132 * store_flags to ensure the ref_store has all required capabilities.
133 * "caller" is used in any necessary error messages.
134 */
135 static struct files_ref_store *files_downcast(struct ref_store *ref_store,
136 unsigned int required_flags,
137 const char *caller)
138 {
139 struct files_ref_store *refs;
140
141 if (ref_store->be != &refs_be_files)
142 BUG("ref_store is type \"%s\" not \"files\" in %s",
143 ref_store->be->name, caller);
144
145 refs = (struct files_ref_store *)ref_store;
146
147 if ((refs->store_flags & required_flags) != required_flags)
148 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
149 caller, required_flags, refs->store_flags);
150
151 return refs;
152 }
153
154 static void files_reflog_path(struct files_ref_store *refs,
155 struct strbuf *sb,
156 const char *refname)
157 {
158 const char *bare_refname;
159 const char *wtname;
160 int wtname_len;
161 enum ref_worktree_type wt_type = parse_worktree_ref(
162 refname, &wtname, &wtname_len, &bare_refname);
163
164 switch (wt_type) {
165 case REF_WORKTREE_CURRENT:
166 strbuf_addf(sb, "%s/logs/%s", refs->base.gitdir, refname);
167 break;
168 case REF_WORKTREE_SHARED:
169 case REF_WORKTREE_MAIN:
170 strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir, bare_refname);
171 break;
172 case REF_WORKTREE_OTHER:
173 strbuf_addf(sb, "%s/worktrees/%.*s/logs/%s", refs->gitcommondir,
174 wtname_len, wtname, bare_refname);
175 break;
176 default:
177 BUG("unknown ref type %d of ref %s", wt_type, refname);
178 }
179 }
180
181 static void files_ref_path(struct files_ref_store *refs,
182 struct strbuf *sb,
183 const char *refname)
184 {
185 const char *bare_refname;
186 const char *wtname;
187 int wtname_len;
188 enum ref_worktree_type wt_type = parse_worktree_ref(
189 refname, &wtname, &wtname_len, &bare_refname);
190 switch (wt_type) {
191 case REF_WORKTREE_CURRENT:
192 strbuf_addf(sb, "%s/%s", refs->base.gitdir, refname);
193 break;
194 case REF_WORKTREE_OTHER:
195 strbuf_addf(sb, "%s/worktrees/%.*s/%s", refs->gitcommondir,
196 wtname_len, wtname, bare_refname);
197 break;
198 case REF_WORKTREE_SHARED:
199 case REF_WORKTREE_MAIN:
200 strbuf_addf(sb, "%s/%s", refs->gitcommondir, bare_refname);
201 break;
202 default:
203 BUG("unknown ref type %d of ref %s", wt_type, refname);
204 }
205 }
206
207 /*
208 * Manually add refs/bisect, refs/rewritten and refs/worktree, which, being
209 * per-worktree, might not appear in the directory listing for
210 * refs/ in the main repo.
211 */
212 static void add_per_worktree_entries_to_dir(struct ref_dir *dir, const char *dirname)
213 {
214 const char *prefixes[] = { "refs/bisect/", "refs/worktree/", "refs/rewritten/" };
215 int ip;
216
217 if (strcmp(dirname, "refs/"))
218 return;
219
220 for (ip = 0; ip < ARRAY_SIZE(prefixes); ip++) {
221 const char *prefix = prefixes[ip];
222 int prefix_len = strlen(prefix);
223 struct ref_entry *child_entry;
224 int pos;
225
226 pos = search_ref_dir(dir, prefix, prefix_len);
227 if (pos >= 0)
228 continue;
229 child_entry = create_dir_entry(dir->cache, prefix, prefix_len);
230 add_entry_to_dir(dir, child_entry);
231 }
232 }
233
234 /*
235 * Read the loose references from the namespace dirname into dir
236 * (without recursing). dirname must end with '/'. dir must be the
237 * directory entry corresponding to dirname.
238 */
239 static void loose_fill_ref_dir(struct ref_store *ref_store,
240 struct ref_dir *dir, const char *dirname)
241 {
242 struct files_ref_store *refs =
243 files_downcast(ref_store, REF_STORE_READ, "fill_ref_dir");
244 DIR *d;
245 struct dirent *de;
246 int dirnamelen = strlen(dirname);
247 struct strbuf refname;
248 struct strbuf path = STRBUF_INIT;
249 size_t path_baselen;
250
251 files_ref_path(refs, &path, dirname);
252 path_baselen = path.len;
253
254 d = opendir(path.buf);
255 if (!d) {
256 strbuf_release(&path);
257 return;
258 }
259
260 strbuf_init(&refname, dirnamelen + 257);
261 strbuf_add(&refname, dirname, dirnamelen);
262
263 while ((de = readdir(d)) != NULL) {
264 struct object_id oid;
265 struct stat st;
266 int flag;
267
268 if (de->d_name[0] == '.')
269 continue;
270 if (ends_with(de->d_name, ".lock"))
271 continue;
272 strbuf_addstr(&refname, de->d_name);
273 strbuf_addstr(&path, de->d_name);
274 if (stat(path.buf, &st) < 0) {
275 ; /* silently ignore */
276 } else if (S_ISDIR(st.st_mode)) {
277 strbuf_addch(&refname, '/');
278 add_entry_to_dir(dir,
279 create_dir_entry(dir->cache, refname.buf,
280 refname.len));
281 } else {
282 if (!refs_resolve_ref_unsafe(&refs->base,
283 refname.buf,
284 RESOLVE_REF_READING,
285 &oid, &flag)) {
286 oidclr(&oid);
287 flag |= REF_ISBROKEN;
288 } else if (is_null_oid(&oid)) {
289 /*
290 * It is so astronomically unlikely
291 * that null_oid is the OID of an
292 * actual object that we consider its
293 * appearance in a loose reference
294 * file to be repo corruption
295 * (probably due to a software bug).
296 */
297 flag |= REF_ISBROKEN;
298 }
299
300 if (check_refname_format(refname.buf,
301 REFNAME_ALLOW_ONELEVEL)) {
302 if (!refname_is_safe(refname.buf))
303 die("loose refname is dangerous: %s", refname.buf);
304 oidclr(&oid);
305 flag |= REF_BAD_NAME | REF_ISBROKEN;
306 }
307 add_entry_to_dir(dir,
308 create_ref_entry(refname.buf, &oid, flag));
309 }
310 strbuf_setlen(&refname, dirnamelen);
311 strbuf_setlen(&path, path_baselen);
312 }
313 strbuf_release(&refname);
314 strbuf_release(&path);
315 closedir(d);
316
317 add_per_worktree_entries_to_dir(dir, dirname);
318 }
319
320 static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs)
321 {
322 if (!refs->loose) {
323 /*
324 * Mark the top-level directory complete because we
325 * are about to read the only subdirectory that can
326 * hold references:
327 */
328 refs->loose = create_ref_cache(&refs->base, loose_fill_ref_dir);
329
330 /* We're going to fill the top level ourselves: */
331 refs->loose->root->flag &= ~REF_INCOMPLETE;
332
333 /*
334 * Add an incomplete entry for "refs/" (to be filled
335 * lazily):
336 */
337 add_entry_to_dir(get_ref_dir(refs->loose->root),
338 create_dir_entry(refs->loose, "refs/", 5));
339 }
340 return refs->loose;
341 }
342
343 static int read_ref_internal(struct ref_store *ref_store, const char *refname,
344 struct object_id *oid, struct strbuf *referent,
345 unsigned int *type, int *failure_errno, int skip_packed_refs)
346 {
347 struct files_ref_store *refs =
348 files_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
349 struct strbuf sb_contents = STRBUF_INIT;
350 struct strbuf sb_path = STRBUF_INIT;
351 const char *path;
352 const char *buf;
353 struct stat st;
354 int fd;
355 int ret = -1;
356 int remaining_retries = 3;
357 int myerr = 0;
358
359 *type = 0;
360 strbuf_reset(&sb_path);
361
362 files_ref_path(refs, &sb_path, refname);
363
364 path = sb_path.buf;
365
366 stat_ref:
367 /*
368 * We might have to loop back here to avoid a race
369 * condition: first we lstat() the file, then we try
370 * to read it as a link or as a file. But if somebody
371 * changes the type of the file (file <-> directory
372 * <-> symlink) between the lstat() and reading, then
373 * we don't want to report that as an error but rather
374 * try again starting with the lstat().
375 *
376 * We'll keep a count of the retries, though, just to avoid
377 * any confusing situation sending us into an infinite loop.
378 */
379
380 if (remaining_retries-- <= 0)
381 goto out;
382
383 if (lstat(path, &st) < 0) {
384 int ignore_errno;
385 myerr = errno;
386 if (myerr != ENOENT || skip_packed_refs)
387 goto out;
388 if (refs_read_raw_ref(refs->packed_ref_store, refname, oid,
389 referent, type, &ignore_errno)) {
390 myerr = ENOENT;
391 goto out;
392 }
393 ret = 0;
394 goto out;
395 }
396
397 /* Follow "normalized" - ie "refs/.." symlinks by hand */
398 if (S_ISLNK(st.st_mode)) {
399 strbuf_reset(&sb_contents);
400 if (strbuf_readlink(&sb_contents, path, st.st_size) < 0) {
401 myerr = errno;
402 if (myerr == ENOENT || myerr == EINVAL)
403 /* inconsistent with lstat; retry */
404 goto stat_ref;
405 else
406 goto out;
407 }
408 if (starts_with(sb_contents.buf, "refs/") &&
409 !check_refname_format(sb_contents.buf, 0)) {
410 strbuf_swap(&sb_contents, referent);
411 *type |= REF_ISSYMREF;
412 ret = 0;
413 goto out;
414 }
415 /*
416 * It doesn't look like a refname; fall through to just
417 * treating it like a non-symlink, and reading whatever it
418 * points to.
419 */
420 }
421
422 /* Is it a directory? */
423 if (S_ISDIR(st.st_mode)) {
424 int ignore_errno;
425 /*
426 * Even though there is a directory where the loose
427 * ref is supposed to be, there could still be a
428 * packed ref:
429 */
430 if (skip_packed_refs ||
431 refs_read_raw_ref(refs->packed_ref_store, refname, oid,
432 referent, type, &ignore_errno)) {
433 myerr = EISDIR;
434 goto out;
435 }
436 ret = 0;
437 goto out;
438 }
439
440 /*
441 * Anything else, just open it and try to use it as
442 * a ref
443 */
444 fd = open(path, O_RDONLY);
445 if (fd < 0) {
446 myerr = errno;
447 if (myerr == ENOENT && !S_ISLNK(st.st_mode))
448 /* inconsistent with lstat; retry */
449 goto stat_ref;
450 else
451 goto out;
452 }
453 strbuf_reset(&sb_contents);
454 if (strbuf_read(&sb_contents, fd, 256) < 0) {
455 myerr = errno;
456 close(fd);
457 goto out;
458 }
459 close(fd);
460 strbuf_rtrim(&sb_contents);
461 buf = sb_contents.buf;
462
463 ret = parse_loose_ref_contents(buf, oid, referent, type, &myerr);
464
465 out:
466 if (ret && !myerr)
467 BUG("returning non-zero %d, should have set myerr!", ret);
468 *failure_errno = myerr;
469
470 strbuf_release(&sb_path);
471 strbuf_release(&sb_contents);
472 errno = 0;
473 return ret;
474 }
475
476 static int files_read_raw_ref(struct ref_store *ref_store, const char *refname,
477 struct object_id *oid, struct strbuf *referent,
478 unsigned int *type, int *failure_errno)
479 {
480 return read_ref_internal(ref_store, refname, oid, referent, type, failure_errno, 0);
481 }
482
483 static int files_read_symbolic_ref(struct ref_store *ref_store, const char *refname,
484 struct strbuf *referent)
485 {
486 struct object_id oid;
487 int failure_errno, ret;
488 unsigned int type;
489
490 ret = read_ref_internal(ref_store, refname, &oid, referent, &type, &failure_errno, 1);
491 if (ret)
492 return ret;
493
494 return !(type & REF_ISSYMREF);
495 }
496
497 int parse_loose_ref_contents(const char *buf, struct object_id *oid,
498 struct strbuf *referent, unsigned int *type,
499 int *failure_errno)
500 {
501 const char *p;
502 if (skip_prefix(buf, "ref:", &buf)) {
503 while (isspace(*buf))
504 buf++;
505
506 strbuf_reset(referent);
507 strbuf_addstr(referent, buf);
508 *type |= REF_ISSYMREF;
509 return 0;
510 }
511
512 /*
513 * FETCH_HEAD has additional data after the sha.
514 */
515 if (parse_oid_hex(buf, oid, &p) ||
516 (*p != '\0' && !isspace(*p))) {
517 *type |= REF_ISBROKEN;
518 *failure_errno = EINVAL;
519 return -1;
520 }
521 return 0;
522 }
523
524 static void unlock_ref(struct ref_lock *lock)
525 {
526 rollback_lock_file(&lock->lk);
527 free(lock->ref_name);
528 free(lock);
529 }
530
531 /*
532 * Lock refname, without following symrefs, and set *lock_p to point
533 * at a newly-allocated lock object. Fill in lock->old_oid, referent,
534 * and type similarly to read_raw_ref().
535 *
536 * The caller must verify that refname is a "safe" reference name (in
537 * the sense of refname_is_safe()) before calling this function.
538 *
539 * If the reference doesn't already exist, verify that refname doesn't
540 * have a D/F conflict with any existing references. extras and skip
541 * are passed to refs_verify_refname_available() for this check.
542 *
543 * If mustexist is not set and the reference is not found or is
544 * broken, lock the reference anyway but clear old_oid.
545 *
546 * Return 0 on success. On failure, write an error message to err and
547 * return TRANSACTION_NAME_CONFLICT or TRANSACTION_GENERIC_ERROR.
548 *
549 * Implementation note: This function is basically
550 *
551 * lock reference
552 * read_raw_ref()
553 *
554 * but it includes a lot more code to
555 * - Deal with possible races with other processes
556 * - Avoid calling refs_verify_refname_available() when it can be
557 * avoided, namely if we were successfully able to read the ref
558 * - Generate informative error messages in the case of failure
559 */
560 static int lock_raw_ref(struct files_ref_store *refs,
561 const char *refname, int mustexist,
562 const struct string_list *extras,
563 struct ref_lock **lock_p,
564 struct strbuf *referent,
565 unsigned int *type,
566 struct strbuf *err)
567 {
568 struct ref_lock *lock;
569 struct strbuf ref_file = STRBUF_INIT;
570 int attempts_remaining = 3;
571 int ret = TRANSACTION_GENERIC_ERROR;
572 int failure_errno;
573
574 assert(err);
575 files_assert_main_repository(refs, "lock_raw_ref");
576
577 *type = 0;
578
579 /* First lock the file so it can't change out from under us. */
580
581 *lock_p = CALLOC_ARRAY(lock, 1);
582
583 lock->ref_name = xstrdup(refname);
584 files_ref_path(refs, &ref_file, refname);
585
586 retry:
587 switch (safe_create_leading_directories(ref_file.buf)) {
588 case SCLD_OK:
589 break; /* success */
590 case SCLD_EXISTS:
591 /*
592 * Suppose refname is "refs/foo/bar". We just failed
593 * to create the containing directory, "refs/foo",
594 * because there was a non-directory in the way. This
595 * indicates a D/F conflict, probably because of
596 * another reference such as "refs/foo". There is no
597 * reason to expect this error to be transitory.
598 */
599 if (refs_verify_refname_available(&refs->base, refname,
600 extras, NULL, err)) {
601 if (mustexist) {
602 /*
603 * To the user the relevant error is
604 * that the "mustexist" reference is
605 * missing:
606 */
607 strbuf_reset(err);
608 strbuf_addf(err, "unable to resolve reference '%s'",
609 refname);
610 } else {
611 /*
612 * The error message set by
613 * refs_verify_refname_available() is
614 * OK.
615 */
616 ret = TRANSACTION_NAME_CONFLICT;
617 }
618 } else {
619 /*
620 * The file that is in the way isn't a loose
621 * reference. Report it as a low-level
622 * failure.
623 */
624 strbuf_addf(err, "unable to create lock file %s.lock; "
625 "non-directory in the way",
626 ref_file.buf);
627 }
628 goto error_return;
629 case SCLD_VANISHED:
630 /* Maybe another process was tidying up. Try again. */
631 if (--attempts_remaining > 0)
632 goto retry;
633 /* fall through */
634 default:
635 strbuf_addf(err, "unable to create directory for %s",
636 ref_file.buf);
637 goto error_return;
638 }
639
640 if (hold_lock_file_for_update_timeout(
641 &lock->lk, ref_file.buf, LOCK_NO_DEREF,
642 get_files_ref_lock_timeout_ms()) < 0) {
643 int myerr = errno;
644 errno = 0;
645 if (myerr == ENOENT && --attempts_remaining > 0) {
646 /*
647 * Maybe somebody just deleted one of the
648 * directories leading to ref_file. Try
649 * again:
650 */
651 goto retry;
652 } else {
653 unable_to_lock_message(ref_file.buf, myerr, err);
654 goto error_return;
655 }
656 }
657
658 /*
659 * Now we hold the lock and can read the reference without
660 * fear that its value will change.
661 */
662
663 if (files_read_raw_ref(&refs->base, refname, &lock->old_oid, referent,
664 type, &failure_errno)) {
665 if (failure_errno == ENOENT) {
666 if (mustexist) {
667 /* Garden variety missing reference. */
668 strbuf_addf(err, "unable to resolve reference '%s'",
669 refname);
670 goto error_return;
671 } else {
672 /*
673 * Reference is missing, but that's OK. We
674 * know that there is not a conflict with
675 * another loose reference because
676 * (supposing that we are trying to lock
677 * reference "refs/foo/bar"):
678 *
679 * - We were successfully able to create
680 * the lockfile refs/foo/bar.lock, so we
681 * know there cannot be a loose reference
682 * named "refs/foo".
683 *
684 * - We got ENOENT and not EISDIR, so we
685 * know that there cannot be a loose
686 * reference named "refs/foo/bar/baz".
687 */
688 }
689 } else if (failure_errno == EISDIR) {
690 /*
691 * There is a directory in the way. It might have
692 * contained references that have been deleted. If
693 * we don't require that the reference already
694 * exists, try to remove the directory so that it
695 * doesn't cause trouble when we want to rename the
696 * lockfile into place later.
697 */
698 if (mustexist) {
699 /* Garden variety missing reference. */
700 strbuf_addf(err, "unable to resolve reference '%s'",
701 refname);
702 goto error_return;
703 } else if (remove_dir_recursively(&ref_file,
704 REMOVE_DIR_EMPTY_ONLY)) {
705 if (refs_verify_refname_available(
706 &refs->base, refname,
707 extras, NULL, err)) {
708 /*
709 * The error message set by
710 * verify_refname_available() is OK.
711 */
712 ret = TRANSACTION_NAME_CONFLICT;
713 goto error_return;
714 } else {
715 /*
716 * We can't delete the directory,
717 * but we also don't know of any
718 * references that it should
719 * contain.
720 */
721 strbuf_addf(err, "there is a non-empty directory '%s' "
722 "blocking reference '%s'",
723 ref_file.buf, refname);
724 goto error_return;
725 }
726 }
727 } else if (failure_errno == EINVAL && (*type & REF_ISBROKEN)) {
728 strbuf_addf(err, "unable to resolve reference '%s': "
729 "reference broken", refname);
730 goto error_return;
731 } else {
732 strbuf_addf(err, "unable to resolve reference '%s': %s",
733 refname, strerror(failure_errno));
734 goto error_return;
735 }
736
737 /*
738 * If the ref did not exist and we are creating it,
739 * make sure there is no existing packed ref that
740 * conflicts with refname:
741 */
742 if (refs_verify_refname_available(
743 refs->packed_ref_store, refname,
744 extras, NULL, err))
745 goto error_return;
746 }
747
748 ret = 0;
749 goto out;
750
751 error_return:
752 unlock_ref(lock);
753 *lock_p = NULL;
754
755 out:
756 strbuf_release(&ref_file);
757 return ret;
758 }
759
760 struct files_ref_iterator {
761 struct ref_iterator base;
762
763 struct ref_iterator *iter0;
764 struct repository *repo;
765 unsigned int flags;
766 };
767
768 static int files_ref_iterator_advance(struct ref_iterator *ref_iterator)
769 {
770 struct files_ref_iterator *iter =
771 (struct files_ref_iterator *)ref_iterator;
772 int ok;
773
774 while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
775 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
776 parse_worktree_ref(iter->iter0->refname, NULL, NULL,
777 NULL) != REF_WORKTREE_CURRENT)
778 continue;
779
780 if ((iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS) &&
781 (iter->iter0->flags & REF_ISSYMREF) &&
782 (iter->iter0->flags & REF_ISBROKEN))
783 continue;
784
785 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
786 !ref_resolves_to_object(iter->iter0->refname,
787 iter->repo,
788 iter->iter0->oid,
789 iter->iter0->flags))
790 continue;
791
792 iter->base.refname = iter->iter0->refname;
793 iter->base.oid = iter->iter0->oid;
794 iter->base.flags = iter->iter0->flags;
795 return ITER_OK;
796 }
797
798 iter->iter0 = NULL;
799 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
800 ok = ITER_ERROR;
801
802 return ok;
803 }
804
805 static int files_ref_iterator_peel(struct ref_iterator *ref_iterator,
806 struct object_id *peeled)
807 {
808 struct files_ref_iterator *iter =
809 (struct files_ref_iterator *)ref_iterator;
810
811 return ref_iterator_peel(iter->iter0, peeled);
812 }
813
814 static int files_ref_iterator_abort(struct ref_iterator *ref_iterator)
815 {
816 struct files_ref_iterator *iter =
817 (struct files_ref_iterator *)ref_iterator;
818 int ok = ITER_DONE;
819
820 if (iter->iter0)
821 ok = ref_iterator_abort(iter->iter0);
822
823 base_ref_iterator_free(ref_iterator);
824 return ok;
825 }
826
827 static struct ref_iterator_vtable files_ref_iterator_vtable = {
828 .advance = files_ref_iterator_advance,
829 .peel = files_ref_iterator_peel,
830 .abort = files_ref_iterator_abort,
831 };
832
833 static struct ref_iterator *files_ref_iterator_begin(
834 struct ref_store *ref_store,
835 const char *prefix, const char **exclude_patterns,
836 unsigned int flags)
837 {
838 struct files_ref_store *refs;
839 struct ref_iterator *loose_iter, *packed_iter, *overlay_iter;
840 struct files_ref_iterator *iter;
841 struct ref_iterator *ref_iterator;
842 unsigned int required_flags = REF_STORE_READ;
843
844 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
845 required_flags |= REF_STORE_ODB;
846
847 refs = files_downcast(ref_store, required_flags, "ref_iterator_begin");
848
849 /*
850 * We must make sure that all loose refs are read before
851 * accessing the packed-refs file; this avoids a race
852 * condition if loose refs are migrated to the packed-refs
853 * file by a simultaneous process, but our in-memory view is
854 * from before the migration. We ensure this as follows:
855 * First, we call start the loose refs iteration with its
856 * `prime_ref` argument set to true. This causes the loose
857 * references in the subtree to be pre-read into the cache.
858 * (If they've already been read, that's OK; we only need to
859 * guarantee that they're read before the packed refs, not
860 * *how much* before.) After that, we call
861 * packed_ref_iterator_begin(), which internally checks
862 * whether the packed-ref cache is up to date with what is on
863 * disk, and re-reads it if not.
864 */
865
866 loose_iter = cache_ref_iterator_begin(get_loose_ref_cache(refs),
867 prefix, ref_store->repo, 1);
868
869 /*
870 * The packed-refs file might contain broken references, for
871 * example an old version of a reference that points at an
872 * object that has since been garbage-collected. This is OK as
873 * long as there is a corresponding loose reference that
874 * overrides it, and we don't want to emit an error message in
875 * this case. So ask the packed_ref_store for all of its
876 * references, and (if needed) do our own check for broken
877 * ones in files_ref_iterator_advance(), after we have merged
878 * the packed and loose references.
879 */
880 packed_iter = refs_ref_iterator_begin(
881 refs->packed_ref_store, prefix, exclude_patterns, 0,
882 DO_FOR_EACH_INCLUDE_BROKEN);
883
884 overlay_iter = overlay_ref_iterator_begin(loose_iter, packed_iter);
885
886 CALLOC_ARRAY(iter, 1);
887 ref_iterator = &iter->base;
888 base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable,
889 overlay_iter->ordered);
890 iter->iter0 = overlay_iter;
891 iter->repo = ref_store->repo;
892 iter->flags = flags;
893
894 return ref_iterator;
895 }
896
897 /*
898 * Callback function for raceproof_create_file(). This function is
899 * expected to do something that makes dirname(path) permanent despite
900 * the fact that other processes might be cleaning up empty
901 * directories at the same time. Usually it will create a file named
902 * path, but alternatively it could create another file in that
903 * directory, or even chdir() into that directory. The function should
904 * return 0 if the action was completed successfully. On error, it
905 * should return a nonzero result and set errno.
906 * raceproof_create_file() treats two errno values specially:
907 *
908 * - ENOENT -- dirname(path) does not exist. In this case,
909 * raceproof_create_file() tries creating dirname(path)
910 * (and any parent directories, if necessary) and calls
911 * the function again.
912 *
913 * - EISDIR -- the file already exists and is a directory. In this
914 * case, raceproof_create_file() removes the directory if
915 * it is empty (and recursively any empty directories that
916 * it contains) and calls the function again.
917 *
918 * Any other errno causes raceproof_create_file() to fail with the
919 * callback's return value and errno.
920 *
921 * Obviously, this function should be OK with being called again if it
922 * fails with ENOENT or EISDIR. In other scenarios it will not be
923 * called again.
924 */
925 typedef int create_file_fn(const char *path, void *cb);
926
927 /*
928 * Create a file in dirname(path) by calling fn, creating leading
929 * directories if necessary. Retry a few times in case we are racing
930 * with another process that is trying to clean up the directory that
931 * contains path. See the documentation for create_file_fn for more
932 * details.
933 *
934 * Return the value and set the errno that resulted from the most
935 * recent call of fn. fn is always called at least once, and will be
936 * called more than once if it returns ENOENT or EISDIR.
937 */
938 static int raceproof_create_file(const char *path, create_file_fn fn, void *cb)
939 {
940 /*
941 * The number of times we will try to remove empty directories
942 * in the way of path. This is only 1 because if another
943 * process is racily creating directories that conflict with
944 * us, we don't want to fight against them.
945 */
946 int remove_directories_remaining = 1;
947
948 /*
949 * The number of times that we will try to create the
950 * directories containing path. We are willing to attempt this
951 * more than once, because another process could be trying to
952 * clean up empty directories at the same time as we are
953 * trying to create them.
954 */
955 int create_directories_remaining = 3;
956
957 /* A scratch copy of path, filled lazily if we need it: */
958 struct strbuf path_copy = STRBUF_INIT;
959
960 int ret, save_errno;
961
962 /* Sanity check: */
963 assert(*path);
964
965 retry_fn:
966 ret = fn(path, cb);
967 save_errno = errno;
968 if (!ret)
969 goto out;
970
971 if (errno == EISDIR && remove_directories_remaining-- > 0) {
972 /*
973 * A directory is in the way. Maybe it is empty; try
974 * to remove it:
975 */
976 if (!path_copy.len)
977 strbuf_addstr(&path_copy, path);
978
979 if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY))
980 goto retry_fn;
981 } else if (errno == ENOENT && create_directories_remaining-- > 0) {
982 /*
983 * Maybe the containing directory didn't exist, or
984 * maybe it was just deleted by a process that is
985 * racing with us to clean up empty directories. Try
986 * to create it:
987 */
988 enum scld_error scld_result;
989
990 if (!path_copy.len)
991 strbuf_addstr(&path_copy, path);
992
993 do {
994 scld_result = safe_create_leading_directories(path_copy.buf);
995 if (scld_result == SCLD_OK)
996 goto retry_fn;
997 } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0);
998 }
999
1000 out:
1001 strbuf_release(&path_copy);
1002 errno = save_errno;
1003 return ret;
1004 }
1005
1006 static int remove_empty_directories(struct strbuf *path)
1007 {
1008 /*
1009 * we want to create a file but there is a directory there;
1010 * if that is an empty directory (or a directory that contains
1011 * only empty directories), remove them.
1012 */
1013 return remove_dir_recursively(path, REMOVE_DIR_EMPTY_ONLY);
1014 }
1015
1016 static int create_reflock(const char *path, void *cb)
1017 {
1018 struct lock_file *lk = cb;
1019
1020 return hold_lock_file_for_update_timeout(
1021 lk, path, LOCK_NO_DEREF,
1022 get_files_ref_lock_timeout_ms()) < 0 ? -1 : 0;
1023 }
1024
1025 /*
1026 * Locks a ref returning the lock on success and NULL on failure.
1027 */
1028 static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs,
1029 const char *refname,
1030 struct strbuf *err)
1031 {
1032 struct strbuf ref_file = STRBUF_INIT;
1033 struct ref_lock *lock;
1034
1035 files_assert_main_repository(refs, "lock_ref_oid_basic");
1036 assert(err);
1037
1038 CALLOC_ARRAY(lock, 1);
1039
1040 files_ref_path(refs, &ref_file, refname);
1041
1042 /*
1043 * If the ref did not exist and we are creating it, make sure
1044 * there is no existing packed ref whose name begins with our
1045 * refname, nor a packed ref whose name is a proper prefix of
1046 * our refname.
1047 */
1048 if (is_null_oid(&lock->old_oid) &&
1049 refs_verify_refname_available(refs->packed_ref_store, refname,
1050 NULL, NULL, err))
1051 goto error_return;
1052
1053 lock->ref_name = xstrdup(refname);
1054
1055 if (raceproof_create_file(ref_file.buf, create_reflock, &lock->lk)) {
1056 unable_to_lock_message(ref_file.buf, errno, err);
1057 goto error_return;
1058 }
1059
1060 if (!refs_resolve_ref_unsafe(&refs->base, lock->ref_name, 0,
1061 &lock->old_oid, NULL))
1062 oidclr(&lock->old_oid);
1063 goto out;
1064
1065 error_return:
1066 unlock_ref(lock);
1067 lock = NULL;
1068
1069 out:
1070 strbuf_release(&ref_file);
1071 return lock;
1072 }
1073
1074 struct ref_to_prune {
1075 struct ref_to_prune *next;
1076 struct object_id oid;
1077 char name[FLEX_ARRAY];
1078 };
1079
1080 enum {
1081 REMOVE_EMPTY_PARENTS_REF = 0x01,
1082 REMOVE_EMPTY_PARENTS_REFLOG = 0x02
1083 };
1084
1085 /*
1086 * Remove empty parent directories associated with the specified
1087 * reference and/or its reflog, but spare [logs/]refs/ and immediate
1088 * subdirs. flags is a combination of REMOVE_EMPTY_PARENTS_REF and/or
1089 * REMOVE_EMPTY_PARENTS_REFLOG.
1090 */
1091 static void try_remove_empty_parents(struct files_ref_store *refs,
1092 const char *refname,
1093 unsigned int flags)
1094 {
1095 struct strbuf buf = STRBUF_INIT;
1096 struct strbuf sb = STRBUF_INIT;
1097 char *p, *q;
1098 int i;
1099
1100 strbuf_addstr(&buf, refname);
1101 p = buf.buf;
1102 for (i = 0; i < 2; i++) { /* refs/{heads,tags,...}/ */
1103 while (*p && *p != '/')
1104 p++;
1105 /* tolerate duplicate slashes; see check_refname_format() */
1106 while (*p == '/')
1107 p++;
1108 }
1109 q = buf.buf + buf.len;
1110 while (flags & (REMOVE_EMPTY_PARENTS_REF | REMOVE_EMPTY_PARENTS_REFLOG)) {
1111 while (q > p && *q != '/')
1112 q--;
1113 while (q > p && *(q-1) == '/')
1114 q--;
1115 if (q == p)
1116 break;
1117 strbuf_setlen(&buf, q - buf.buf);
1118
1119 strbuf_reset(&sb);
1120 files_ref_path(refs, &sb, buf.buf);
1121 if ((flags & REMOVE_EMPTY_PARENTS_REF) && rmdir(sb.buf))
1122 flags &= ~REMOVE_EMPTY_PARENTS_REF;
1123
1124 strbuf_reset(&sb);
1125 files_reflog_path(refs, &sb, buf.buf);
1126 if ((flags & REMOVE_EMPTY_PARENTS_REFLOG) && rmdir(sb.buf))
1127 flags &= ~REMOVE_EMPTY_PARENTS_REFLOG;
1128 }
1129 strbuf_release(&buf);
1130 strbuf_release(&sb);
1131 }
1132
1133 /* make sure nobody touched the ref, and unlink */
1134 static void prune_ref(struct files_ref_store *refs, struct ref_to_prune *r)
1135 {
1136 struct ref_transaction *transaction;
1137 struct strbuf err = STRBUF_INIT;
1138 int ret = -1;
1139
1140 if (check_refname_format(r->name, 0))
1141 return;
1142
1143 transaction = ref_store_transaction_begin(&refs->base, &err);
1144 if (!transaction)
1145 goto cleanup;
1146 ref_transaction_add_update(
1147 transaction, r->name,
1148 REF_NO_DEREF | REF_HAVE_NEW | REF_HAVE_OLD | REF_IS_PRUNING,
1149 null_oid(), &r->oid, NULL);
1150 if (ref_transaction_commit(transaction, &err))
1151 goto cleanup;
1152
1153 ret = 0;
1154
1155 cleanup:
1156 if (ret)
1157 error("%s", err.buf);
1158 strbuf_release(&err);
1159 ref_transaction_free(transaction);
1160 return;
1161 }
1162
1163 /*
1164 * Prune the loose versions of the references in the linked list
1165 * `*refs_to_prune`, freeing the entries in the list as we go.
1166 */
1167 static void prune_refs(struct files_ref_store *refs, struct ref_to_prune **refs_to_prune)
1168 {
1169 while (*refs_to_prune) {
1170 struct ref_to_prune *r = *refs_to_prune;
1171 *refs_to_prune = r->next;
1172 prune_ref(refs, r);
1173 free(r);
1174 }
1175 }
1176
1177 /*
1178 * Return true if the specified reference should be packed.
1179 */
1180 static int should_pack_ref(const char *refname,
1181 const struct object_id *oid, unsigned int ref_flags,
1182 struct pack_refs_opts *opts)
1183 {
1184 struct string_list_item *item;
1185
1186 /* Do not pack per-worktree refs: */
1187 if (parse_worktree_ref(refname, NULL, NULL, NULL) !=
1188 REF_WORKTREE_SHARED)
1189 return 0;
1190
1191 /* Do not pack symbolic refs: */
1192 if (ref_flags & REF_ISSYMREF)
1193 return 0;
1194
1195 /* Do not pack broken refs: */
1196 if (!ref_resolves_to_object(refname, the_repository, oid, ref_flags))
1197 return 0;
1198
1199 if (ref_excluded(opts->exclusions, refname))
1200 return 0;
1201
1202 for_each_string_list_item(item, opts->includes)
1203 if (!wildmatch(item->string, refname, 0))
1204 return 1;
1205
1206 return 0;
1207 }
1208
1209 static int files_pack_refs(struct ref_store *ref_store,
1210 struct pack_refs_opts *opts)
1211 {
1212 struct files_ref_store *refs =
1213 files_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB,
1214 "pack_refs");
1215 struct ref_iterator *iter;
1216 int ok;
1217 struct ref_to_prune *refs_to_prune = NULL;
1218 struct strbuf err = STRBUF_INIT;
1219 struct ref_transaction *transaction;
1220
1221 transaction = ref_store_transaction_begin(refs->packed_ref_store, &err);
1222 if (!transaction)
1223 return -1;
1224
1225 packed_refs_lock(refs->packed_ref_store, LOCK_DIE_ON_ERROR, &err);
1226
1227 iter = cache_ref_iterator_begin(get_loose_ref_cache(refs), NULL,
1228 the_repository, 0);
1229 while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
1230 /*
1231 * If the loose reference can be packed, add an entry
1232 * in the packed ref cache. If the reference should be
1233 * pruned, also add it to refs_to_prune.
1234 */
1235 if (!should_pack_ref(iter->refname, iter->oid, iter->flags, opts))
1236 continue;
1237
1238 /*
1239 * Add a reference creation for this reference to the
1240 * packed-refs transaction:
1241 */
1242 if (ref_transaction_update(transaction, iter->refname,
1243 iter->oid, NULL,
1244 REF_NO_DEREF, NULL, &err))
1245 die("failure preparing to create packed reference %s: %s",
1246 iter->refname, err.buf);
1247
1248 /* Schedule the loose reference for pruning if requested. */
1249 if ((opts->flags & PACK_REFS_PRUNE)) {
1250 struct ref_to_prune *n;
1251 FLEX_ALLOC_STR(n, name, iter->refname);
1252 oidcpy(&n->oid, iter->oid);
1253 n->next = refs_to_prune;
1254 refs_to_prune = n;
1255 }
1256 }
1257 if (ok != ITER_DONE)
1258 die("error while iterating over references");
1259
1260 if (ref_transaction_commit(transaction, &err))
1261 die("unable to write new packed-refs: %s", err.buf);
1262
1263 ref_transaction_free(transaction);
1264
1265 packed_refs_unlock(refs->packed_ref_store);
1266
1267 prune_refs(refs, &refs_to_prune);
1268 strbuf_release(&err);
1269 return 0;
1270 }
1271
1272 static int files_delete_refs(struct ref_store *ref_store, const char *msg,
1273 struct string_list *refnames, unsigned int flags)
1274 {
1275 struct files_ref_store *refs =
1276 files_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
1277 struct strbuf err = STRBUF_INIT;
1278 int i, result = 0;
1279
1280 if (!refnames->nr)
1281 return 0;
1282
1283 if (packed_refs_lock(refs->packed_ref_store, 0, &err))
1284 goto error;
1285
1286 if (refs_delete_refs(refs->packed_ref_store, msg, refnames, flags)) {
1287 packed_refs_unlock(refs->packed_ref_store);
1288 goto error;
1289 }
1290
1291 packed_refs_unlock(refs->packed_ref_store);
1292
1293 for (i = 0; i < refnames->nr; i++) {
1294 const char *refname = refnames->items[i].string;
1295
1296 if (refs_delete_ref(&refs->base, msg, refname, NULL, flags))
1297 result |= error(_("could not remove reference %s"), refname);
1298 }
1299
1300 strbuf_release(&err);
1301 return result;
1302
1303 error:
1304 /*
1305 * If we failed to rewrite the packed-refs file, then it is
1306 * unsafe to try to remove loose refs, because doing so might
1307 * expose an obsolete packed value for a reference that might
1308 * even point at an object that has been garbage collected.
1309 */
1310 if (refnames->nr == 1)
1311 error(_("could not delete reference %s: %s"),
1312 refnames->items[0].string, err.buf);
1313 else
1314 error(_("could not delete references: %s"), err.buf);
1315
1316 strbuf_release(&err);
1317 return -1;
1318 }
1319
1320 /*
1321 * People using contrib's git-new-workdir have .git/logs/refs ->
1322 * /some/other/path/.git/logs/refs, and that may live on another device.
1323 *
1324 * IOW, to avoid cross device rename errors, the temporary renamed log must
1325 * live into logs/refs.
1326 */
1327 #define TMP_RENAMED_LOG "refs/.tmp-renamed-log"
1328
1329 struct rename_cb {
1330 const char *tmp_renamed_log;
1331 int true_errno;
1332 };
1333
1334 static int rename_tmp_log_callback(const char *path, void *cb_data)
1335 {
1336 struct rename_cb *cb = cb_data;
1337
1338 if (rename(cb->tmp_renamed_log, path)) {
1339 /*
1340 * rename(a, b) when b is an existing directory ought
1341 * to result in ISDIR, but Solaris 5.8 gives ENOTDIR.
1342 * Sheesh. Record the true errno for error reporting,
1343 * but report EISDIR to raceproof_create_file() so
1344 * that it knows to retry.
1345 */
1346 cb->true_errno = errno;
1347 if (errno == ENOTDIR)
1348 errno = EISDIR;
1349 return -1;
1350 } else {
1351 return 0;
1352 }
1353 }
1354
1355 static int rename_tmp_log(struct files_ref_store *refs, const char *newrefname)
1356 {
1357 struct strbuf path = STRBUF_INIT;
1358 struct strbuf tmp = STRBUF_INIT;
1359 struct rename_cb cb;
1360 int ret;
1361
1362 files_reflog_path(refs, &path, newrefname);
1363 files_reflog_path(refs, &tmp, TMP_RENAMED_LOG);
1364 cb.tmp_renamed_log = tmp.buf;
1365 ret = raceproof_create_file(path.buf, rename_tmp_log_callback, &cb);
1366 if (ret) {
1367 if (errno == EISDIR)
1368 error("directory not empty: %s", path.buf);
1369 else
1370 error("unable to move logfile %s to %s: %s",
1371 tmp.buf, path.buf,
1372 strerror(cb.true_errno));
1373 }
1374
1375 strbuf_release(&path);
1376 strbuf_release(&tmp);
1377 return ret;
1378 }
1379
1380 static int write_ref_to_lockfile(struct ref_lock *lock,
1381 const struct object_id *oid,
1382 int skip_oid_verification, struct strbuf *err);
1383 static int commit_ref_update(struct files_ref_store *refs,
1384 struct ref_lock *lock,
1385 const struct object_id *oid, const char *logmsg,
1386 struct strbuf *err);
1387
1388 /*
1389 * Emit a better error message than lockfile.c's
1390 * unable_to_lock_message() would in case there is a D/F conflict with
1391 * another existing reference. If there would be a conflict, emit an error
1392 * message and return false; otherwise, return true.
1393 *
1394 * Note that this function is not safe against all races with other
1395 * processes, and that's not its job. We'll emit a more verbose error on D/f
1396 * conflicts if we get past it into lock_ref_oid_basic().
1397 */
1398 static int refs_rename_ref_available(struct ref_store *refs,
1399 const char *old_refname,
1400 const char *new_refname)
1401 {
1402 struct string_list skip = STRING_LIST_INIT_NODUP;
1403 struct strbuf err = STRBUF_INIT;
1404 int ok;
1405
1406 string_list_insert(&skip, old_refname);
1407 ok = !refs_verify_refname_available(refs, new_refname,
1408 NULL, &skip, &err);
1409 if (!ok)
1410 error("%s", err.buf);
1411
1412 string_list_clear(&skip, 0);
1413 strbuf_release(&err);
1414 return ok;
1415 }
1416
1417 static int files_copy_or_rename_ref(struct ref_store *ref_store,
1418 const char *oldrefname, const char *newrefname,
1419 const char *logmsg, int copy)
1420 {
1421 struct files_ref_store *refs =
1422 files_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1423 struct object_id orig_oid;
1424 int flag = 0, logmoved = 0;
1425 struct ref_lock *lock;
1426 struct stat loginfo;
1427 struct strbuf sb_oldref = STRBUF_INIT;
1428 struct strbuf sb_newref = STRBUF_INIT;
1429 struct strbuf tmp_renamed_log = STRBUF_INIT;
1430 int log, ret;
1431 struct strbuf err = STRBUF_INIT;
1432
1433 files_reflog_path(refs, &sb_oldref, oldrefname);
1434 files_reflog_path(refs, &sb_newref, newrefname);
1435 files_reflog_path(refs, &tmp_renamed_log, TMP_RENAMED_LOG);
1436
1437 log = !lstat(sb_oldref.buf, &loginfo);
1438 if (log && S_ISLNK(loginfo.st_mode)) {
1439 ret = error("reflog for %s is a symlink", oldrefname);
1440 goto out;
1441 }
1442
1443 if (!refs_resolve_ref_unsafe(&refs->base, oldrefname,
1444 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
1445 &orig_oid, &flag)) {
1446 ret = error("refname %s not found", oldrefname);
1447 goto out;
1448 }
1449
1450 if (flag & REF_ISSYMREF) {
1451 if (copy)
1452 ret = error("refname %s is a symbolic ref, copying it is not supported",
1453 oldrefname);
1454 else
1455 ret = error("refname %s is a symbolic ref, renaming it is not supported",
1456 oldrefname);
1457 goto out;
1458 }
1459 if (!refs_rename_ref_available(&refs->base, oldrefname, newrefname)) {
1460 ret = 1;
1461 goto out;
1462 }
1463
1464 if (!copy && log && rename(sb_oldref.buf, tmp_renamed_log.buf)) {
1465 ret = error("unable to move logfile logs/%s to logs/"TMP_RENAMED_LOG": %s",
1466 oldrefname, strerror(errno));
1467 goto out;
1468 }
1469
1470 if (copy && log && copy_file(tmp_renamed_log.buf, sb_oldref.buf, 0644)) {
1471 ret = error("unable to copy logfile logs/%s to logs/"TMP_RENAMED_LOG": %s",
1472 oldrefname, strerror(errno));
1473 goto out;
1474 }
1475
1476 if (!copy && refs_delete_ref(&refs->base, logmsg, oldrefname,
1477 &orig_oid, REF_NO_DEREF)) {
1478 error("unable to delete old %s", oldrefname);
1479 goto rollback;
1480 }
1481
1482 /*
1483 * Since we are doing a shallow lookup, oid is not the
1484 * correct value to pass to delete_ref as old_oid. But that
1485 * doesn't matter, because an old_oid check wouldn't add to
1486 * the safety anyway; we want to delete the reference whatever
1487 * its current value.
1488 */
1489 if (!copy && refs_resolve_ref_unsafe(&refs->base, newrefname,
1490 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
1491 NULL, NULL) &&
1492 refs_delete_ref(&refs->base, NULL, newrefname,
1493 NULL, REF_NO_DEREF)) {
1494 if (errno == EISDIR) {
1495 struct strbuf path = STRBUF_INIT;
1496 int result;
1497
1498 files_ref_path(refs, &path, newrefname);
1499 result = remove_empty_directories(&path);
1500 strbuf_release(&path);
1501
1502 if (result) {
1503 error("Directory not empty: %s", newrefname);
1504 goto rollback;
1505 }
1506 } else {
1507 error("unable to delete existing %s", newrefname);
1508 goto rollback;
1509 }
1510 }
1511
1512 if (log && rename_tmp_log(refs, newrefname))
1513 goto rollback;
1514
1515 logmoved = log;
1516
1517 lock = lock_ref_oid_basic(refs, newrefname, &err);
1518 if (!lock) {
1519 if (copy)
1520 error("unable to copy '%s' to '%s': %s", oldrefname, newrefname, err.buf);
1521 else
1522 error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf);
1523 strbuf_release(&err);
1524 goto rollback;
1525 }
1526 oidcpy(&lock->old_oid, &orig_oid);
1527
1528 if (write_ref_to_lockfile(lock, &orig_oid, 0, &err) ||
1529 commit_ref_update(refs, lock, &orig_oid, logmsg, &err)) {
1530 error("unable to write current sha1 into %s: %s", newrefname, err.buf);
1531 strbuf_release(&err);
1532 goto rollback;
1533 }
1534
1535 ret = 0;
1536 goto out;
1537
1538 rollback:
1539 lock = lock_ref_oid_basic(refs, oldrefname, &err);
1540 if (!lock) {
1541 error("unable to lock %s for rollback: %s", oldrefname, err.buf);
1542 strbuf_release(&err);
1543 goto rollbacklog;
1544 }
1545
1546 flag = log_all_ref_updates;
1547 log_all_ref_updates = LOG_REFS_NONE;
1548 if (write_ref_to_lockfile(lock, &orig_oid, 0, &err) ||
1549 commit_ref_update(refs, lock, &orig_oid, NULL, &err)) {
1550 error("unable to write current sha1 into %s: %s", oldrefname, err.buf);
1551 strbuf_release(&err);
1552 }
1553 log_all_ref_updates = flag;
1554
1555 rollbacklog:
1556 if (logmoved && rename(sb_newref.buf, sb_oldref.buf))
1557 error("unable to restore logfile %s from %s: %s",
1558 oldrefname, newrefname, strerror(errno));
1559 if (!logmoved && log &&
1560 rename(tmp_renamed_log.buf, sb_oldref.buf))
1561 error("unable to restore logfile %s from logs/"TMP_RENAMED_LOG": %s",
1562 oldrefname, strerror(errno));
1563 ret = 1;
1564 out:
1565 strbuf_release(&sb_newref);
1566 strbuf_release(&sb_oldref);
1567 strbuf_release(&tmp_renamed_log);
1568
1569 return ret;
1570 }
1571
1572 static int files_rename_ref(struct ref_store *ref_store,
1573 const char *oldrefname, const char *newrefname,
1574 const char *logmsg)
1575 {
1576 return files_copy_or_rename_ref(ref_store, oldrefname,
1577 newrefname, logmsg, 0);
1578 }
1579
1580 static int files_copy_ref(struct ref_store *ref_store,
1581 const char *oldrefname, const char *newrefname,
1582 const char *logmsg)
1583 {
1584 return files_copy_or_rename_ref(ref_store, oldrefname,
1585 newrefname, logmsg, 1);
1586 }
1587
1588 static int close_ref_gently(struct ref_lock *lock)
1589 {
1590 if (close_lock_file_gently(&lock->lk))
1591 return -1;
1592 return 0;
1593 }
1594
1595 static int commit_ref(struct ref_lock *lock)
1596 {
1597 char *path = get_locked_file_path(&lock->lk);
1598 struct stat st;
1599
1600 if (!lstat(path, &st) && S_ISDIR(st.st_mode)) {
1601 /*
1602 * There is a directory at the path we want to rename
1603 * the lockfile to. Hopefully it is empty; try to
1604 * delete it.
1605 */
1606 size_t len = strlen(path);
1607 struct strbuf sb_path = STRBUF_INIT;
1608
1609 strbuf_attach(&sb_path, path, len, len);
1610
1611 /*
1612 * If this fails, commit_lock_file() will also fail
1613 * and will report the problem.
1614 */
1615 remove_empty_directories(&sb_path);
1616 strbuf_release(&sb_path);
1617 } else {
1618 free(path);
1619 }
1620
1621 if (commit_lock_file(&lock->lk))
1622 return -1;
1623 return 0;
1624 }
1625
1626 static int open_or_create_logfile(const char *path, void *cb)
1627 {
1628 int *fd = cb;
1629
1630 *fd = open(path, O_APPEND | O_WRONLY | O_CREAT, 0666);
1631 return (*fd < 0) ? -1 : 0;
1632 }
1633
1634 /*
1635 * Create a reflog for a ref. If force_create = 0, only create the
1636 * reflog for certain refs (those for which should_autocreate_reflog
1637 * returns non-zero). Otherwise, create it regardless of the reference
1638 * name. If the logfile already existed or was created, return 0 and
1639 * set *logfd to the file descriptor opened for appending to the file.
1640 * If no logfile exists and we decided not to create one, return 0 and
1641 * set *logfd to -1. On failure, fill in *err, set *logfd to -1, and
1642 * return -1.
1643 */
1644 static int log_ref_setup(struct files_ref_store *refs,
1645 const char *refname, int force_create,
1646 int *logfd, struct strbuf *err)
1647 {
1648 struct strbuf logfile_sb = STRBUF_INIT;
1649 char *logfile;
1650
1651 files_reflog_path(refs, &logfile_sb, refname);
1652 logfile = strbuf_detach(&logfile_sb, NULL);
1653
1654 if (force_create || should_autocreate_reflog(refname)) {
1655 if (raceproof_create_file(logfile, open_or_create_logfile, logfd)) {
1656 if (errno == ENOENT)
1657 strbuf_addf(err, "unable to create directory for '%s': "
1658 "%s", logfile, strerror(errno));
1659 else if (errno == EISDIR)
1660 strbuf_addf(err, "there are still logs under '%s'",
1661 logfile);
1662 else
1663 strbuf_addf(err, "unable to append to '%s': %s",
1664 logfile, strerror(errno));
1665
1666 goto error;
1667 }
1668 } else {
1669 *logfd = open(logfile, O_APPEND | O_WRONLY);
1670 if (*logfd < 0) {
1671 if (errno == ENOENT || errno == EISDIR) {
1672 /*
1673 * The logfile doesn't already exist,
1674 * but that is not an error; it only
1675 * means that we won't write log
1676 * entries to it.
1677 */
1678 ;
1679 } else {
1680 strbuf_addf(err, "unable to append to '%s': %s",
1681 logfile, strerror(errno));
1682 goto error;
1683 }
1684 }
1685 }
1686
1687 if (*logfd >= 0)
1688 adjust_shared_perm(logfile);
1689
1690 free(logfile);
1691 return 0;
1692
1693 error:
1694 free(logfile);
1695 return -1;
1696 }
1697
1698 static int files_create_reflog(struct ref_store *ref_store, const char *refname,
1699 struct strbuf *err)
1700 {
1701 struct files_ref_store *refs =
1702 files_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1703 int fd;
1704
1705 if (log_ref_setup(refs, refname, 1, &fd, err))
1706 return -1;
1707
1708 if (fd >= 0)
1709 close(fd);
1710
1711 return 0;
1712 }
1713
1714 static int log_ref_write_fd(int fd, const struct object_id *old_oid,
1715 const struct object_id *new_oid,
1716 const char *committer, const char *msg)
1717 {
1718 struct strbuf sb = STRBUF_INIT;
1719 int ret = 0;
1720
1721 strbuf_addf(&sb, "%s %s %s", oid_to_hex(old_oid), oid_to_hex(new_oid), committer);
1722 if (msg && *msg) {
1723 strbuf_addch(&sb, '\t');
1724 strbuf_addstr(&sb, msg);
1725 }
1726 strbuf_addch(&sb, '\n');
1727 if (write_in_full(fd, sb.buf, sb.len) < 0)
1728 ret = -1;
1729 strbuf_release(&sb);
1730 return ret;
1731 }
1732
1733 static int files_log_ref_write(struct files_ref_store *refs,
1734 const char *refname, const struct object_id *old_oid,
1735 const struct object_id *new_oid, const char *msg,
1736 int flags, struct strbuf *err)
1737 {
1738 int logfd, result;
1739
1740 if (log_all_ref_updates == LOG_REFS_UNSET)
1741 log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
1742
1743 result = log_ref_setup(refs, refname,
1744 flags & REF_FORCE_CREATE_REFLOG,
1745 &logfd, err);
1746
1747 if (result)
1748 return result;
1749
1750 if (logfd < 0)
1751 return 0;
1752 result = log_ref_write_fd(logfd, old_oid, new_oid,
1753 git_committer_info(0), msg);
1754 if (result) {
1755 struct strbuf sb = STRBUF_INIT;
1756 int save_errno = errno;
1757
1758 files_reflog_path(refs, &sb, refname);
1759 strbuf_addf(err, "unable to append to '%s': %s",
1760 sb.buf, strerror(save_errno));
1761 strbuf_release(&sb);
1762 close(logfd);
1763 return -1;
1764 }
1765 if (close(logfd)) {
1766 struct strbuf sb = STRBUF_INIT;
1767 int save_errno = errno;
1768
1769 files_reflog_path(refs, &sb, refname);
1770 strbuf_addf(err, "unable to append to '%s': %s",
1771 sb.buf, strerror(save_errno));
1772 strbuf_release(&sb);
1773 return -1;
1774 }
1775 return 0;
1776 }
1777
1778 /*
1779 * Write oid into the open lockfile, then close the lockfile. On
1780 * errors, rollback the lockfile, fill in *err and return -1.
1781 */
1782 static int write_ref_to_lockfile(struct ref_lock *lock,
1783 const struct object_id *oid,
1784 int skip_oid_verification, struct strbuf *err)
1785 {
1786 static char term = '\n';
1787 struct object *o;
1788 int fd;
1789
1790 if (!skip_oid_verification) {
1791 o = parse_object(the_repository, oid);
1792 if (!o) {
1793 strbuf_addf(
1794 err,
1795 "trying to write ref '%s' with nonexistent object %s",
1796 lock->ref_name, oid_to_hex(oid));
1797 unlock_ref(lock);
1798 return -1;
1799 }
1800 if (o->type != OBJ_COMMIT && is_branch(lock->ref_name)) {
1801 strbuf_addf(
1802 err,
1803 "trying to write non-commit object %s to branch '%s'",
1804 oid_to_hex(oid), lock->ref_name);
1805 unlock_ref(lock);
1806 return -1;
1807 }
1808 }
1809 fd = get_lock_file_fd(&lock->lk);
1810 if (write_in_full(fd, oid_to_hex(oid), the_hash_algo->hexsz) < 0 ||
1811 write_in_full(fd, &term, 1) < 0 ||
1812 fsync_component(FSYNC_COMPONENT_REFERENCE, get_lock_file_fd(&lock->lk)) < 0 ||
1813 close_ref_gently(lock) < 0) {
1814 strbuf_addf(err,
1815 "couldn't write '%s'", get_lock_file_path(&lock->lk));
1816 unlock_ref(lock);
1817 return -1;
1818 }
1819 return 0;
1820 }
1821
1822 /*
1823 * Commit a change to a loose reference that has already been written
1824 * to the loose reference lockfile. Also update the reflogs if
1825 * necessary, using the specified lockmsg (which can be NULL).
1826 */
1827 static int commit_ref_update(struct files_ref_store *refs,
1828 struct ref_lock *lock,
1829 const struct object_id *oid, const char *logmsg,
1830 struct strbuf *err)
1831 {
1832 files_assert_main_repository(refs, "commit_ref_update");
1833
1834 clear_loose_ref_cache(refs);
1835 if (files_log_ref_write(refs, lock->ref_name,
1836 &lock->old_oid, oid,
1837 logmsg, 0, err)) {
1838 char *old_msg = strbuf_detach(err, NULL);
1839 strbuf_addf(err, "cannot update the ref '%s': %s",
1840 lock->ref_name, old_msg);
1841 free(old_msg);
1842 unlock_ref(lock);
1843 return -1;
1844 }
1845
1846 if (strcmp(lock->ref_name, "HEAD") != 0) {
1847 /*
1848 * Special hack: If a branch is updated directly and HEAD
1849 * points to it (may happen on the remote side of a push
1850 * for example) then logically the HEAD reflog should be
1851 * updated too.
1852 * A generic solution implies reverse symref information,
1853 * but finding all symrefs pointing to the given branch
1854 * would be rather costly for this rare event (the direct
1855 * update of a branch) to be worth it. So let's cheat and
1856 * check with HEAD only which should cover 99% of all usage
1857 * scenarios (even 100% of the default ones).
1858 */
1859 int head_flag;
1860 const char *head_ref;
1861
1862 head_ref = refs_resolve_ref_unsafe(&refs->base, "HEAD",
1863 RESOLVE_REF_READING,
1864 NULL, &head_flag);
1865 if (head_ref && (head_flag & REF_ISSYMREF) &&
1866 !strcmp(head_ref, lock->ref_name)) {
1867 struct strbuf log_err = STRBUF_INIT;
1868 if (files_log_ref_write(refs, "HEAD",
1869 &lock->old_oid, oid,
1870 logmsg, 0, &log_err)) {
1871 error("%s", log_err.buf);
1872 strbuf_release(&log_err);
1873 }
1874 }
1875 }
1876
1877 if (commit_ref(lock)) {
1878 strbuf_addf(err, "couldn't set '%s'", lock->ref_name);
1879 unlock_ref(lock);
1880 return -1;
1881 }
1882
1883 unlock_ref(lock);
1884 return 0;
1885 }
1886
1887 static int create_ref_symlink(struct ref_lock *lock, const char *target)
1888 {
1889 int ret = -1;
1890 #ifndef NO_SYMLINK_HEAD
1891 char *ref_path = get_locked_file_path(&lock->lk);
1892 unlink(ref_path);
1893 ret = symlink(target, ref_path);
1894 free(ref_path);
1895
1896 if (ret)
1897 fprintf(stderr, "no symlink - falling back to symbolic ref\n");
1898 #endif
1899 return ret;
1900 }
1901
1902 static void update_symref_reflog(struct files_ref_store *refs,
1903 struct ref_lock *lock, const char *refname,
1904 const char *target, const char *logmsg)
1905 {
1906 struct strbuf err = STRBUF_INIT;
1907 struct object_id new_oid;
1908
1909 if (logmsg &&
1910 refs_resolve_ref_unsafe(&refs->base, target,
1911 RESOLVE_REF_READING, &new_oid, NULL) &&
1912 files_log_ref_write(refs, refname, &lock->old_oid,
1913 &new_oid, logmsg, 0, &err)) {
1914 error("%s", err.buf);
1915 strbuf_release(&err);
1916 }
1917 }
1918
1919 static int create_symref_locked(struct files_ref_store *refs,
1920 struct ref_lock *lock, const char *refname,
1921 const char *target, const char *logmsg)
1922 {
1923 if (prefer_symlink_refs && !create_ref_symlink(lock, target)) {
1924 update_symref_reflog(refs, lock, refname, target, logmsg);
1925 return 0;
1926 }
1927
1928 if (!fdopen_lock_file(&lock->lk, "w"))
1929 return error("unable to fdopen %s: %s",
1930 get_lock_file_path(&lock->lk), strerror(errno));
1931
1932 update_symref_reflog(refs, lock, refname, target, logmsg);
1933
1934 /* no error check; commit_ref will check ferror */
1935 fprintf(get_lock_file_fp(&lock->lk), "ref: %s\n", target);
1936 if (commit_ref(lock) < 0)
1937 return error("unable to write symref for %s: %s", refname,
1938 strerror(errno));
1939 return 0;
1940 }
1941
1942 static int files_create_symref(struct ref_store *ref_store,
1943 const char *refname, const char *target,
1944 const char *logmsg)
1945 {
1946 struct files_ref_store *refs =
1947 files_downcast(ref_store, REF_STORE_WRITE, "create_symref");
1948 struct strbuf err = STRBUF_INIT;
1949 struct ref_lock *lock;
1950 int ret;
1951
1952 lock = lock_ref_oid_basic(refs, refname, &err);
1953 if (!lock) {
1954 error("%s", err.buf);
1955 strbuf_release(&err);
1956 return -1;
1957 }
1958
1959 ret = create_symref_locked(refs, lock, refname, target, logmsg);
1960 unlock_ref(lock);
1961 return ret;
1962 }
1963
1964 static int files_reflog_exists(struct ref_store *ref_store,
1965 const char *refname)
1966 {
1967 struct files_ref_store *refs =
1968 files_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1969 struct strbuf sb = STRBUF_INIT;
1970 struct stat st;
1971 int ret;
1972
1973 files_reflog_path(refs, &sb, refname);
1974 ret = !lstat(sb.buf, &st) && S_ISREG(st.st_mode);
1975 strbuf_release(&sb);
1976 return ret;
1977 }
1978
1979 static int files_delete_reflog(struct ref_store *ref_store,
1980 const char *refname)
1981 {
1982 struct files_ref_store *refs =
1983 files_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
1984 struct strbuf sb = STRBUF_INIT;
1985 int ret;
1986
1987 files_reflog_path(refs, &sb, refname);
1988 ret = remove_path(sb.buf);
1989 strbuf_release(&sb);
1990 return ret;
1991 }
1992
1993 static int show_one_reflog_ent(struct strbuf *sb, each_reflog_ent_fn fn, void *cb_data)
1994 {
1995 struct object_id ooid, noid;
1996 char *email_end, *message;
1997 timestamp_t timestamp;
1998 int tz;
1999 const char *p = sb->buf;
2000
2001 /* old SP new SP name <email> SP time TAB msg LF */
2002 if (!sb->len || sb->buf[sb->len - 1] != '\n' ||
2003 parse_oid_hex(p, &ooid, &p) || *p++ != ' ' ||
2004 parse_oid_hex(p, &noid, &p) || *p++ != ' ' ||
2005 !(email_end = strchr(p, '>')) ||
2006 email_end[1] != ' ' ||
2007 !(timestamp = parse_timestamp(email_end + 2, &message, 10)) ||
2008 !message || message[0] != ' ' ||
2009 (message[1] != '+' && message[1] != '-') ||
2010 !isdigit(message[2]) || !isdigit(message[3]) ||
2011 !isdigit(message[4]) || !isdigit(message[5]))
2012 return 0; /* corrupt? */
2013 email_end[1] = '\0';
2014 tz = strtol(message + 1, NULL, 10);
2015 if (message[6] != '\t')
2016 message += 6;
2017 else
2018 message += 7;
2019 return fn(&ooid, &noid, p, timestamp, tz, message, cb_data);
2020 }
2021
2022 static char *find_beginning_of_line(char *bob, char *scan)
2023 {
2024 while (bob < scan && *(--scan) != '\n')
2025 ; /* keep scanning backwards */
2026 /*
2027 * Return either beginning of the buffer, or LF at the end of
2028 * the previous line.
2029 */
2030 return scan;
2031 }
2032
2033 static int files_for_each_reflog_ent_reverse(struct ref_store *ref_store,
2034 const char *refname,
2035 each_reflog_ent_fn fn,
2036 void *cb_data)
2037 {
2038 struct files_ref_store *refs =
2039 files_downcast(ref_store, REF_STORE_READ,
2040 "for_each_reflog_ent_reverse");
2041 struct strbuf sb = STRBUF_INIT;
2042 FILE *logfp;
2043 long pos;
2044 int ret = 0, at_tail = 1;
2045
2046 files_reflog_path(refs, &sb, refname);
2047 logfp = fopen(sb.buf, "r");
2048 strbuf_release(&sb);
2049 if (!logfp)
2050 return -1;
2051
2052 /* Jump to the end */
2053 if (fseek(logfp, 0, SEEK_END) < 0)
2054 ret = error("cannot seek back reflog for %s: %s",
2055 refname, strerror(errno));
2056 pos = ftell(logfp);
2057 while (!ret && 0 < pos) {
2058 int cnt;
2059 size_t nread;
2060 char buf[BUFSIZ];
2061 char *endp, *scanp;
2062
2063 /* Fill next block from the end */
2064 cnt = (sizeof(buf) < pos) ? sizeof(buf) : pos;
2065 if (fseek(logfp, pos - cnt, SEEK_SET)) {
2066 ret = error("cannot seek back reflog for %s: %s",
2067 refname, strerror(errno));
2068 break;
2069 }
2070 nread = fread(buf, cnt, 1, logfp);
2071 if (nread != 1) {
2072 ret = error("cannot read %d bytes from reflog for %s: %s",
2073 cnt, refname, strerror(errno));
2074 break;
2075 }
2076 pos -= cnt;
2077
2078 scanp = endp = buf + cnt;
2079 if (at_tail && scanp[-1] == '\n')
2080 /* Looking at the final LF at the end of the file */
2081 scanp--;
2082 at_tail = 0;
2083
2084 while (buf < scanp) {
2085 /*
2086 * terminating LF of the previous line, or the beginning
2087 * of the buffer.
2088 */
2089 char *bp;
2090
2091 bp = find_beginning_of_line(buf, scanp);
2092
2093 if (*bp == '\n') {
2094 /*
2095 * The newline is the end of the previous line,
2096 * so we know we have complete line starting
2097 * at (bp + 1). Prefix it onto any prior data
2098 * we collected for the line and process it.
2099 */
2100 strbuf_splice(&sb, 0, 0, bp + 1, endp - (bp + 1));
2101 scanp = bp;
2102 endp = bp + 1;
2103 ret = show_one_reflog_ent(&sb, fn, cb_data);
2104 strbuf_reset(&sb);
2105 if (ret)
2106 break;
2107 } else if (!pos) {
2108 /*
2109 * We are at the start of the buffer, and the
2110 * start of the file; there is no previous
2111 * line, and we have everything for this one.
2112 * Process it, and we can end the loop.
2113 */
2114 strbuf_splice(&sb, 0, 0, buf, endp - buf);
2115 ret = show_one_reflog_ent(&sb, fn, cb_data);
2116 strbuf_reset(&sb);
2117 break;
2118 }
2119
2120 if (bp == buf) {
2121 /*
2122 * We are at the start of the buffer, and there
2123 * is more file to read backwards. Which means
2124 * we are in the middle of a line. Note that we
2125 * may get here even if *bp was a newline; that
2126 * just means we are at the exact end of the
2127 * previous line, rather than some spot in the
2128 * middle.
2129 *
2130 * Save away what we have to be combined with
2131 * the data from the next read.
2132 */
2133 strbuf_splice(&sb, 0, 0, buf, endp - buf);
2134 break;
2135 }
2136 }
2137
2138 }
2139 if (!ret && sb.len)
2140 BUG("reverse reflog parser had leftover data");
2141
2142 fclose(logfp);
2143 strbuf_release(&sb);
2144 return ret;
2145 }
2146
2147 static int files_for_each_reflog_ent(struct ref_store *ref_store,
2148 const char *refname,
2149 each_reflog_ent_fn fn, void *cb_data)
2150 {
2151 struct files_ref_store *refs =
2152 files_downcast(ref_store, REF_STORE_READ,
2153 "for_each_reflog_ent");
2154 FILE *logfp;
2155 struct strbuf sb = STRBUF_INIT;
2156 int ret = 0;
2157
2158 files_reflog_path(refs, &sb, refname);
2159 logfp = fopen(sb.buf, "r");
2160 strbuf_release(&sb);
2161 if (!logfp)
2162 return -1;
2163
2164 while (!ret && !strbuf_getwholeline(&sb, logfp, '\n'))
2165 ret = show_one_reflog_ent(&sb, fn, cb_data);
2166 fclose(logfp);
2167 strbuf_release(&sb);
2168 return ret;
2169 }
2170
2171 struct files_reflog_iterator {
2172 struct ref_iterator base;
2173
2174 struct ref_store *ref_store;
2175 struct dir_iterator *dir_iterator;
2176 struct object_id oid;
2177 };
2178
2179 static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator)
2180 {
2181 struct files_reflog_iterator *iter =
2182 (struct files_reflog_iterator *)ref_iterator;
2183 struct dir_iterator *diter = iter->dir_iterator;
2184 int ok;
2185
2186 while ((ok = dir_iterator_advance(diter)) == ITER_OK) {
2187 int flags;
2188
2189 if (!S_ISREG(diter->st.st_mode))
2190 continue;
2191 if (diter->basename[0] == '.')
2192 continue;
2193 if (ends_with(diter->basename, ".lock"))
2194 continue;
2195
2196 if (!refs_resolve_ref_unsafe(iter->ref_store,
2197 diter->relative_path, 0,
2198 &iter->oid, &flags)) {
2199 error("bad ref for %s", diter->path.buf);
2200 continue;
2201 }
2202
2203 iter->base.refname = diter->relative_path;
2204 iter->base.oid = &iter->oid;
2205 iter->base.flags = flags;
2206 return ITER_OK;
2207 }
2208
2209 iter->dir_iterator = NULL;
2210 if (ref_iterator_abort(ref_iterator) == ITER_ERROR)
2211 ok = ITER_ERROR;
2212 return ok;
2213 }
2214
2215 static int files_reflog_iterator_peel(struct ref_iterator *ref_iterator UNUSED,
2216 struct object_id *peeled UNUSED)
2217 {
2218 BUG("ref_iterator_peel() called for reflog_iterator");
2219 }
2220
2221 static int files_reflog_iterator_abort(struct ref_iterator *ref_iterator)
2222 {
2223 struct files_reflog_iterator *iter =
2224 (struct files_reflog_iterator *)ref_iterator;
2225 int ok = ITER_DONE;
2226
2227 if (iter->dir_iterator)
2228 ok = dir_iterator_abort(iter->dir_iterator);
2229
2230 base_ref_iterator_free(ref_iterator);
2231 return ok;
2232 }
2233
2234 static struct ref_iterator_vtable files_reflog_iterator_vtable = {
2235 .advance = files_reflog_iterator_advance,
2236 .peel = files_reflog_iterator_peel,
2237 .abort = files_reflog_iterator_abort,
2238 };
2239
2240 static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
2241 const char *gitdir)
2242 {
2243 struct dir_iterator *diter;
2244 struct files_reflog_iterator *iter;
2245 struct ref_iterator *ref_iterator;
2246 struct strbuf sb = STRBUF_INIT;
2247
2248 strbuf_addf(&sb, "%s/logs", gitdir);
2249
2250 diter = dir_iterator_begin(sb.buf, 0);
2251 if (!diter) {
2252 strbuf_release(&sb);
2253 return empty_ref_iterator_begin();
2254 }
2255
2256 CALLOC_ARRAY(iter, 1);
2257 ref_iterator = &iter->base;
2258
2259 base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable, 0);
2260 iter->dir_iterator = diter;
2261 iter->ref_store = ref_store;
2262 strbuf_release(&sb);
2263
2264 return ref_iterator;
2265 }
2266
2267 static enum iterator_selection reflog_iterator_select(
2268 struct ref_iterator *iter_worktree,
2269 struct ref_iterator *iter_common,
2270 void *cb_data UNUSED)
2271 {
2272 if (iter_worktree) {
2273 /*
2274 * We're a bit loose here. We probably should ignore
2275 * common refs if they are accidentally added as
2276 * per-worktree refs.
2277 */
2278 return ITER_SELECT_0;
2279 } else if (iter_common) {
2280 if (parse_worktree_ref(iter_common->refname, NULL, NULL,
2281 NULL) == REF_WORKTREE_SHARED)
2282 return ITER_SELECT_1;
2283
2284 /*
2285 * The main ref store may contain main worktree's
2286 * per-worktree refs, which should be ignored
2287 */
2288 return ITER_SKIP_1;
2289 } else
2290 return ITER_DONE;
2291 }
2292
2293 static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)
2294 {
2295 struct files_ref_store *refs =
2296 files_downcast(ref_store, REF_STORE_READ,
2297 "reflog_iterator_begin");
2298
2299 if (!strcmp(refs->base.gitdir, refs->gitcommondir)) {
2300 return reflog_iterator_begin(ref_store, refs->gitcommondir);
2301 } else {
2302 return merge_ref_iterator_begin(
2303 0, reflog_iterator_begin(ref_store, refs->base.gitdir),
2304 reflog_iterator_begin(ref_store, refs->gitcommondir),
2305 reflog_iterator_select, refs);
2306 }
2307 }
2308
2309 /*
2310 * If update is a direct update of head_ref (the reference pointed to
2311 * by HEAD), then add an extra REF_LOG_ONLY update for HEAD.
2312 */
2313 static int split_head_update(struct ref_update *update,
2314 struct ref_transaction *transaction,
2315 const char *head_ref,
2316 struct string_list *affected_refnames,
2317 struct strbuf *err)
2318 {
2319 struct string_list_item *item;
2320 struct ref_update *new_update;
2321
2322 if ((update->flags & REF_LOG_ONLY) ||
2323 (update->flags & REF_IS_PRUNING) ||
2324 (update->flags & REF_UPDATE_VIA_HEAD))
2325 return 0;
2326
2327 if (strcmp(update->refname, head_ref))
2328 return 0;
2329
2330 /*
2331 * First make sure that HEAD is not already in the
2332 * transaction. This check is O(lg N) in the transaction
2333 * size, but it happens at most once per transaction.
2334 */
2335 if (string_list_has_string(affected_refnames, "HEAD")) {
2336 /* An entry already existed */
2337 strbuf_addf(err,
2338 "multiple updates for 'HEAD' (including one "
2339 "via its referent '%s') are not allowed",
2340 update->refname);
2341 return TRANSACTION_NAME_CONFLICT;
2342 }
2343
2344 new_update = ref_transaction_add_update(
2345 transaction, "HEAD",
2346 update->flags | REF_LOG_ONLY | REF_NO_DEREF,
2347 &update->new_oid, &update->old_oid,
2348 update->msg);
2349
2350 /*
2351 * Add "HEAD". This insertion is O(N) in the transaction
2352 * size, but it happens at most once per transaction.
2353 * Add new_update->refname instead of a literal "HEAD".
2354 */
2355 if (strcmp(new_update->refname, "HEAD"))
2356 BUG("%s unexpectedly not 'HEAD'", new_update->refname);
2357 item = string_list_insert(affected_refnames, new_update->refname);
2358 item->util = new_update;
2359
2360 return 0;
2361 }
2362
2363 /*
2364 * update is for a symref that points at referent and doesn't have
2365 * REF_NO_DEREF set. Split it into two updates:
2366 * - The original update, but with REF_LOG_ONLY and REF_NO_DEREF set
2367 * - A new, separate update for the referent reference
2368 * Note that the new update will itself be subject to splitting when
2369 * the iteration gets to it.
2370 */
2371 static int split_symref_update(struct ref_update *update,
2372 const char *referent,
2373 struct ref_transaction *transaction,
2374 struct string_list *affected_refnames,
2375 struct strbuf *err)
2376 {
2377 struct string_list_item *item;
2378 struct ref_update *new_update;
2379 unsigned int new_flags;
2380
2381 /*
2382 * First make sure that referent is not already in the
2383 * transaction. This check is O(lg N) in the transaction
2384 * size, but it happens at most once per symref in a
2385 * transaction.
2386 */
2387 if (string_list_has_string(affected_refnames, referent)) {
2388 /* An entry already exists */
2389 strbuf_addf(err,
2390 "multiple updates for '%s' (including one "
2391 "via symref '%s') are not allowed",
2392 referent, update->refname);
2393 return TRANSACTION_NAME_CONFLICT;
2394 }
2395
2396 new_flags = update->flags;
2397 if (!strcmp(update->refname, "HEAD")) {
2398 /*
2399 * Record that the new update came via HEAD, so that
2400 * when we process it, split_head_update() doesn't try
2401 * to add another reflog update for HEAD. Note that
2402 * this bit will be propagated if the new_update
2403 * itself needs to be split.
2404 */
2405 new_flags |= REF_UPDATE_VIA_HEAD;
2406 }
2407
2408 new_update = ref_transaction_add_update(
2409 transaction, referent, new_flags,
2410 &update->new_oid, &update->old_oid,
2411 update->msg);
2412
2413 new_update->parent_update = update;
2414
2415 /*
2416 * Change the symbolic ref update to log only. Also, it
2417 * doesn't need to check its old OID value, as that will be
2418 * done when new_update is processed.
2419 */
2420 update->flags |= REF_LOG_ONLY | REF_NO_DEREF;
2421 update->flags &= ~REF_HAVE_OLD;
2422
2423 /*
2424 * Add the referent. This insertion is O(N) in the transaction
2425 * size, but it happens at most once per symref in a
2426 * transaction. Make sure to add new_update->refname, which will
2427 * be valid as long as affected_refnames is in use, and NOT
2428 * referent, which might soon be freed by our caller.
2429 */
2430 item = string_list_insert(affected_refnames, new_update->refname);
2431 if (item->util)
2432 BUG("%s unexpectedly found in affected_refnames",
2433 new_update->refname);
2434 item->util = new_update;
2435
2436 return 0;
2437 }
2438
2439 /*
2440 * Return the refname under which update was originally requested.
2441 */
2442 static const char *original_update_refname(struct ref_update *update)
2443 {
2444 while (update->parent_update)
2445 update = update->parent_update;
2446
2447 return update->refname;
2448 }
2449
2450 /*
2451 * Check whether the REF_HAVE_OLD and old_oid values stored in update
2452 * are consistent with oid, which is the reference's current value. If
2453 * everything is OK, return 0; otherwise, write an error message to
2454 * err and return -1.
2455 */
2456 static int check_old_oid(struct ref_update *update, struct object_id *oid,
2457 struct strbuf *err)
2458 {
2459 if (!(update->flags & REF_HAVE_OLD) ||
2460 oideq(oid, &update->old_oid))
2461 return 0;
2462
2463 if (is_null_oid(&update->old_oid))
2464 strbuf_addf(err, "cannot lock ref '%s': "
2465 "reference already exists",
2466 original_update_refname(update));
2467 else if (is_null_oid(oid))
2468 strbuf_addf(err, "cannot lock ref '%s': "
2469 "reference is missing but expected %s",
2470 original_update_refname(update),
2471 oid_to_hex(&update->old_oid));
2472 else
2473 strbuf_addf(err, "cannot lock ref '%s': "
2474 "is at %s but expected %s",
2475 original_update_refname(update),
2476 oid_to_hex(oid),
2477 oid_to_hex(&update->old_oid));
2478
2479 return -1;
2480 }
2481
2482 /*
2483 * Prepare for carrying out update:
2484 * - Lock the reference referred to by update.
2485 * - Read the reference under lock.
2486 * - Check that its old OID value (if specified) is correct, and in
2487 * any case record it in update->lock->old_oid for later use when
2488 * writing the reflog.
2489 * - If it is a symref update without REF_NO_DEREF, split it up into a
2490 * REF_LOG_ONLY update of the symref and add a separate update for
2491 * the referent to transaction.
2492 * - If it is an update of head_ref, add a corresponding REF_LOG_ONLY
2493 * update of HEAD.
2494 */
2495 static int lock_ref_for_update(struct files_ref_store *refs,
2496 struct ref_update *update,
2497 struct ref_transaction *transaction,
2498 const char *head_ref,
2499 struct string_list *affected_refnames,
2500 struct strbuf *err)
2501 {
2502 struct strbuf referent = STRBUF_INIT;
2503 int mustexist = (update->flags & REF_HAVE_OLD) &&
2504 !is_null_oid(&update->old_oid);
2505 int ret = 0;
2506 struct ref_lock *lock;
2507
2508 files_assert_main_repository(refs, "lock_ref_for_update");
2509
2510 if ((update->flags & REF_HAVE_NEW) && is_null_oid(&update->new_oid))
2511 update->flags |= REF_DELETING;
2512
2513 if (head_ref) {
2514 ret = split_head_update(update, transaction, head_ref,
2515 affected_refnames, err);
2516 if (ret)
2517 goto out;
2518 }
2519
2520 ret = lock_raw_ref(refs, update->refname, mustexist,
2521 affected_refnames,
2522 &lock, &referent,
2523 &update->type, err);
2524 if (ret) {
2525 char *reason;
2526
2527 reason = strbuf_detach(err, NULL);
2528 strbuf_addf(err, "cannot lock ref '%s': %s",
2529 original_update_refname(update), reason);
2530 free(reason);
2531 goto out;
2532 }
2533
2534 update->backend_data = lock;
2535
2536 if (update->type & REF_ISSYMREF) {
2537 if (update->flags & REF_NO_DEREF) {
2538 /*
2539 * We won't be reading the referent as part of
2540 * the transaction, so we have to read it here
2541 * to record and possibly check old_oid:
2542 */
2543 if (!refs_resolve_ref_unsafe(&refs->base,
2544 referent.buf, 0,
2545 &lock->old_oid, NULL)) {
2546 if (update->flags & REF_HAVE_OLD) {
2547 strbuf_addf(err, "cannot lock ref '%s': "
2548 "error reading reference",
2549 original_update_refname(update));
2550 ret = TRANSACTION_GENERIC_ERROR;
2551 goto out;
2552 }
2553 } else if (check_old_oid(update, &lock->old_oid, err)) {
2554 ret = TRANSACTION_GENERIC_ERROR;
2555 goto out;
2556 }
2557 } else {
2558 /*
2559 * Create a new update for the reference this
2560 * symref is pointing at. Also, we will record
2561 * and verify old_oid for this update as part
2562 * of processing the split-off update, so we
2563 * don't have to do it here.
2564 */
2565 ret = split_symref_update(update,
2566 referent.buf, transaction,
2567 affected_refnames, err);
2568 if (ret)
2569 goto out;
2570 }
2571 } else {
2572 struct ref_update *parent_update;
2573
2574 if (check_old_oid(update, &lock->old_oid, err)) {
2575 ret = TRANSACTION_GENERIC_ERROR;
2576 goto out;
2577 }
2578
2579 /*
2580 * If this update is happening indirectly because of a
2581 * symref update, record the old OID in the parent
2582 * update:
2583 */
2584 for (parent_update = update->parent_update;
2585 parent_update;
2586 parent_update = parent_update->parent_update) {
2587 struct ref_lock *parent_lock = parent_update->backend_data;
2588 oidcpy(&parent_lock->old_oid, &lock->old_oid);
2589 }
2590 }
2591
2592 if ((update->flags & REF_HAVE_NEW) &&
2593 !(update->flags & REF_DELETING) &&
2594 !(update->flags & REF_LOG_ONLY)) {
2595 if (!(update->type & REF_ISSYMREF) &&
2596 oideq(&lock->old_oid, &update->new_oid)) {
2597 /*
2598 * The reference already has the desired
2599 * value, so we don't need to write it.
2600 */
2601 } else if (write_ref_to_lockfile(
2602 lock, &update->new_oid,
2603 update->flags & REF_SKIP_OID_VERIFICATION,
2604 err)) {
2605 char *write_err = strbuf_detach(err, NULL);
2606
2607 /*
2608 * The lock was freed upon failure of
2609 * write_ref_to_lockfile():
2610 */
2611 update->backend_data = NULL;
2612 strbuf_addf(err,
2613 "cannot update ref '%s': %s",
2614 update->refname, write_err);
2615 free(write_err);
2616 ret = TRANSACTION_GENERIC_ERROR;
2617 goto out;
2618 } else {
2619 update->flags |= REF_NEEDS_COMMIT;
2620 }
2621 }
2622 if (!(update->flags & REF_NEEDS_COMMIT)) {
2623 /*
2624 * We didn't call write_ref_to_lockfile(), so
2625 * the lockfile is still open. Close it to
2626 * free up the file descriptor:
2627 */
2628 if (close_ref_gently(lock)) {
2629 strbuf_addf(err, "couldn't close '%s.lock'",
2630 update->refname);
2631 ret = TRANSACTION_GENERIC_ERROR;
2632 goto out;
2633 }
2634 }
2635
2636 out:
2637 strbuf_release(&referent);
2638 return ret;
2639 }
2640
2641 struct files_transaction_backend_data {
2642 struct ref_transaction *packed_transaction;
2643 int packed_refs_locked;
2644 };
2645
2646 /*
2647 * Unlock any references in `transaction` that are still locked, and
2648 * mark the transaction closed.
2649 */
2650 static void files_transaction_cleanup(struct files_ref_store *refs,
2651 struct ref_transaction *transaction)
2652 {
2653 size_t i;
2654 struct files_transaction_backend_data *backend_data =
2655 transaction->backend_data;
2656 struct strbuf err = STRBUF_INIT;
2657
2658 for (i = 0; i < transaction->nr; i++) {
2659 struct ref_update *update = transaction->updates[i];
2660 struct ref_lock *lock = update->backend_data;
2661
2662 if (lock) {
2663 unlock_ref(lock);
2664 update->backend_data = NULL;
2665 }
2666 }
2667
2668 if (backend_data) {
2669 if (backend_data->packed_transaction &&
2670 ref_transaction_abort(backend_data->packed_transaction, &err)) {
2671 error("error aborting transaction: %s", err.buf);
2672 strbuf_release(&err);
2673 }
2674
2675 if (backend_data->packed_refs_locked)
2676 packed_refs_unlock(refs->packed_ref_store);
2677
2678 free(backend_data);
2679 }
2680
2681 transaction->state = REF_TRANSACTION_CLOSED;
2682 }
2683
2684 static int files_transaction_prepare(struct ref_store *ref_store,
2685 struct ref_transaction *transaction,
2686 struct strbuf *err)
2687 {
2688 struct files_ref_store *refs =
2689 files_downcast(ref_store, REF_STORE_WRITE,
2690 "ref_transaction_prepare");
2691 size_t i;
2692 int ret = 0;
2693 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
2694 char *head_ref = NULL;
2695 int head_type;
2696 struct files_transaction_backend_data *backend_data;
2697 struct ref_transaction *packed_transaction = NULL;
2698
2699 assert(err);
2700
2701 if (!transaction->nr)
2702 goto cleanup;
2703
2704 CALLOC_ARRAY(backend_data, 1);
2705 transaction->backend_data = backend_data;
2706
2707 /*
2708 * Fail if a refname appears more than once in the
2709 * transaction. (If we end up splitting up any updates using
2710 * split_symref_update() or split_head_update(), those
2711 * functions will check that the new updates don't have the
2712 * same refname as any existing ones.) Also fail if any of the
2713 * updates use REF_IS_PRUNING without REF_NO_DEREF.
2714 */
2715 for (i = 0; i < transaction->nr; i++) {
2716 struct ref_update *update = transaction->updates[i];
2717 struct string_list_item *item =
2718 string_list_append(&affected_refnames, update->refname);
2719
2720 if ((update->flags & REF_IS_PRUNING) &&
2721 !(update->flags & REF_NO_DEREF))
2722 BUG("REF_IS_PRUNING set without REF_NO_DEREF");
2723
2724 /*
2725 * We store a pointer to update in item->util, but at
2726 * the moment we never use the value of this field
2727 * except to check whether it is non-NULL.
2728 */
2729 item->util = update;
2730 }
2731 string_list_sort(&affected_refnames);
2732 if (ref_update_reject_duplicates(&affected_refnames, err)) {
2733 ret = TRANSACTION_GENERIC_ERROR;
2734 goto cleanup;
2735 }
2736
2737 /*
2738 * Special hack: If a branch is updated directly and HEAD
2739 * points to it (may happen on the remote side of a push
2740 * for example) then logically the HEAD reflog should be
2741 * updated too.
2742 *
2743 * A generic solution would require reverse symref lookups,
2744 * but finding all symrefs pointing to a given branch would be
2745 * rather costly for this rare event (the direct update of a
2746 * branch) to be worth it. So let's cheat and check with HEAD
2747 * only, which should cover 99% of all usage scenarios (even
2748 * 100% of the default ones).
2749 *
2750 * So if HEAD is a symbolic reference, then record the name of
2751 * the reference that it points to. If we see an update of
2752 * head_ref within the transaction, then split_head_update()
2753 * arranges for the reflog of HEAD to be updated, too.
2754 */
2755 head_ref = refs_resolve_refdup(ref_store, "HEAD",
2756 RESOLVE_REF_NO_RECURSE,
2757 NULL, &head_type);
2758
2759 if (head_ref && !(head_type & REF_ISSYMREF)) {
2760 FREE_AND_NULL(head_ref);
2761 }
2762
2763 /*
2764 * Acquire all locks, verify old values if provided, check
2765 * that new values are valid, and write new values to the
2766 * lockfiles, ready to be activated. Only keep one lockfile
2767 * open at a time to avoid running out of file descriptors.
2768 * Note that lock_ref_for_update() might append more updates
2769 * to the transaction.
2770 */
2771 for (i = 0; i < transaction->nr; i++) {
2772 struct ref_update *update = transaction->updates[i];
2773
2774 ret = lock_ref_for_update(refs, update, transaction,
2775 head_ref, &affected_refnames, err);
2776 if (ret)
2777 goto cleanup;
2778
2779 if (update->flags & REF_DELETING &&
2780 !(update->flags & REF_LOG_ONLY) &&
2781 !(update->flags & REF_IS_PRUNING)) {
2782 /*
2783 * This reference has to be deleted from
2784 * packed-refs if it exists there.
2785 */
2786 if (!packed_transaction) {
2787 packed_transaction = ref_store_transaction_begin(
2788 refs->packed_ref_store, err);
2789 if (!packed_transaction) {
2790 ret = TRANSACTION_GENERIC_ERROR;
2791 goto cleanup;
2792 }
2793
2794 backend_data->packed_transaction =
2795 packed_transaction;
2796 }
2797
2798 ref_transaction_add_update(
2799 packed_transaction, update->refname,
2800 REF_HAVE_NEW | REF_NO_DEREF,
2801 &update->new_oid, NULL,
2802 NULL);
2803 }
2804 }
2805
2806 if (packed_transaction) {
2807 if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
2808 ret = TRANSACTION_GENERIC_ERROR;
2809 goto cleanup;
2810 }
2811 backend_data->packed_refs_locked = 1;
2812
2813 if (is_packed_transaction_needed(refs->packed_ref_store,
2814 packed_transaction)) {
2815 ret = ref_transaction_prepare(packed_transaction, err);
2816 /*
2817 * A failure during the prepare step will abort
2818 * itself, but not free. Do that now, and disconnect
2819 * from the files_transaction so it does not try to
2820 * abort us when we hit the cleanup code below.
2821 */
2822 if (ret) {
2823 ref_transaction_free(packed_transaction);
2824 backend_data->packed_transaction = NULL;
2825 }
2826 } else {
2827 /*
2828 * We can skip rewriting the `packed-refs`
2829 * file. But we do need to leave it locked, so
2830 * that somebody else doesn't pack a reference
2831 * that we are trying to delete.
2832 *
2833 * We need to disconnect our transaction from
2834 * backend_data, since the abort (whether successful or
2835 * not) will free it.
2836 */
2837 backend_data->packed_transaction = NULL;
2838 if (ref_transaction_abort(packed_transaction, err)) {
2839 ret = TRANSACTION_GENERIC_ERROR;
2840 goto cleanup;
2841 }
2842 }
2843 }
2844
2845 cleanup:
2846 free(head_ref);
2847 string_list_clear(&affected_refnames, 0);
2848
2849 if (ret)
2850 files_transaction_cleanup(refs, transaction);
2851 else
2852 transaction->state = REF_TRANSACTION_PREPARED;
2853
2854 return ret;
2855 }
2856
2857 static int files_transaction_finish(struct ref_store *ref_store,
2858 struct ref_transaction *transaction,
2859 struct strbuf *err)
2860 {
2861 struct files_ref_store *refs =
2862 files_downcast(ref_store, 0, "ref_transaction_finish");
2863 size_t i;
2864 int ret = 0;
2865 struct strbuf sb = STRBUF_INIT;
2866 struct files_transaction_backend_data *backend_data;
2867 struct ref_transaction *packed_transaction;
2868
2869
2870 assert(err);
2871
2872 if (!transaction->nr) {
2873 transaction->state = REF_TRANSACTION_CLOSED;
2874 return 0;
2875 }
2876
2877 backend_data = transaction->backend_data;
2878 packed_transaction = backend_data->packed_transaction;
2879
2880 /* Perform updates first so live commits remain referenced */
2881 for (i = 0; i < transaction->nr; i++) {
2882 struct ref_update *update = transaction->updates[i];
2883 struct ref_lock *lock = update->backend_data;
2884
2885 if (update->flags & REF_NEEDS_COMMIT ||
2886 update->flags & REF_LOG_ONLY) {
2887 if (files_log_ref_write(refs,
2888 lock->ref_name,
2889 &lock->old_oid,
2890 &update->new_oid,
2891 update->msg, update->flags,
2892 err)) {
2893 char *old_msg = strbuf_detach(err, NULL);
2894
2895 strbuf_addf(err, "cannot update the ref '%s': %s",
2896 lock->ref_name, old_msg);
2897 free(old_msg);
2898 unlock_ref(lock);
2899 update->backend_data = NULL;
2900 ret = TRANSACTION_GENERIC_ERROR;
2901 goto cleanup;
2902 }
2903 }
2904 if (update->flags & REF_NEEDS_COMMIT) {
2905 clear_loose_ref_cache(refs);
2906 if (commit_ref(lock)) {
2907 strbuf_addf(err, "couldn't set '%s'", lock->ref_name);
2908 unlock_ref(lock);
2909 update->backend_data = NULL;
2910 ret = TRANSACTION_GENERIC_ERROR;
2911 goto cleanup;
2912 }
2913 }
2914 }
2915
2916 /*
2917 * Now that updates are safely completed, we can perform
2918 * deletes. First delete the reflogs of any references that
2919 * will be deleted, since (in the unexpected event of an
2920 * error) leaving a reference without a reflog is less bad
2921 * than leaving a reflog without a reference (the latter is a
2922 * mildly invalid repository state):
2923 */
2924 for (i = 0; i < transaction->nr; i++) {
2925 struct ref_update *update = transaction->updates[i];
2926 if (update->flags & REF_DELETING &&
2927 !(update->flags & REF_LOG_ONLY) &&
2928 !(update->flags & REF_IS_PRUNING)) {
2929 strbuf_reset(&sb);
2930 files_reflog_path(refs, &sb, update->refname);
2931 if (!unlink_or_warn(sb.buf))
2932 try_remove_empty_parents(refs, update->refname,
2933 REMOVE_EMPTY_PARENTS_REFLOG);
2934 }
2935 }
2936
2937 /*
2938 * Perform deletes now that updates are safely completed.
2939 *
2940 * First delete any packed versions of the references, while
2941 * retaining the packed-refs lock:
2942 */
2943 if (packed_transaction) {
2944 ret = ref_transaction_commit(packed_transaction, err);
2945 ref_transaction_free(packed_transaction);
2946 packed_transaction = NULL;
2947 backend_data->packed_transaction = NULL;
2948 if (ret)
2949 goto cleanup;
2950 }
2951
2952 /* Now delete the loose versions of the references: */
2953 for (i = 0; i < transaction->nr; i++) {
2954 struct ref_update *update = transaction->updates[i];
2955 struct ref_lock *lock = update->backend_data;
2956
2957 if (update->flags & REF_DELETING &&
2958 !(update->flags & REF_LOG_ONLY)) {
2959 update->flags |= REF_DELETED_RMDIR;
2960 if (!(update->type & REF_ISPACKED) ||
2961 update->type & REF_ISSYMREF) {
2962 /* It is a loose reference. */
2963 strbuf_reset(&sb);
2964 files_ref_path(refs, &sb, lock->ref_name);
2965 if (unlink_or_msg(sb.buf, err)) {
2966 ret = TRANSACTION_GENERIC_ERROR;
2967 goto cleanup;
2968 }
2969 }
2970 }
2971 }
2972
2973 clear_loose_ref_cache(refs);
2974
2975 cleanup:
2976 files_transaction_cleanup(refs, transaction);
2977
2978 for (i = 0; i < transaction->nr; i++) {
2979 struct ref_update *update = transaction->updates[i];
2980
2981 if (update->flags & REF_DELETED_RMDIR) {
2982 /*
2983 * The reference was deleted. Delete any
2984 * empty parent directories. (Note that this
2985 * can only work because we have already
2986 * removed the lockfile.)
2987 */
2988 try_remove_empty_parents(refs, update->refname,
2989 REMOVE_EMPTY_PARENTS_REF);
2990 }
2991 }
2992
2993 strbuf_release(&sb);
2994 return ret;
2995 }
2996
2997 static int files_transaction_abort(struct ref_store *ref_store,
2998 struct ref_transaction *transaction,
2999 struct strbuf *err UNUSED)
3000 {
3001 struct files_ref_store *refs =
3002 files_downcast(ref_store, 0, "ref_transaction_abort");
3003
3004 files_transaction_cleanup(refs, transaction);
3005 return 0;
3006 }
3007
3008 static int ref_present(const char *refname,
3009 const struct object_id *oid UNUSED,
3010 int flags UNUSED,
3011 void *cb_data)
3012 {
3013 struct string_list *affected_refnames = cb_data;
3014
3015 return string_list_has_string(affected_refnames, refname);
3016 }
3017
3018 static int files_initial_transaction_commit(struct ref_store *ref_store,
3019 struct ref_transaction *transaction,
3020 struct strbuf *err)
3021 {
3022 struct files_ref_store *refs =
3023 files_downcast(ref_store, REF_STORE_WRITE,
3024 "initial_ref_transaction_commit");
3025 size_t i;
3026 int ret = 0;
3027 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
3028 struct ref_transaction *packed_transaction = NULL;
3029
3030 assert(err);
3031
3032 if (transaction->state != REF_TRANSACTION_OPEN)
3033 BUG("commit called for transaction that is not open");
3034
3035 /* Fail if a refname appears more than once in the transaction: */
3036 for (i = 0; i < transaction->nr; i++)
3037 string_list_append(&affected_refnames,
3038 transaction->updates[i]->refname);
3039 string_list_sort(&affected_refnames);
3040 if (ref_update_reject_duplicates(&affected_refnames, err)) {
3041 ret = TRANSACTION_GENERIC_ERROR;
3042 goto cleanup;
3043 }
3044
3045 /*
3046 * It's really undefined to call this function in an active
3047 * repository or when there are existing references: we are
3048 * only locking and changing packed-refs, so (1) any
3049 * simultaneous processes might try to change a reference at
3050 * the same time we do, and (2) any existing loose versions of
3051 * the references that we are setting would have precedence
3052 * over our values. But some remote helpers create the remote
3053 * "HEAD" and "master" branches before calling this function,
3054 * so here we really only check that none of the references
3055 * that we are creating already exists.
3056 */
3057 if (refs_for_each_rawref(&refs->base, ref_present,
3058 &affected_refnames))
3059 BUG("initial ref transaction called with existing refs");
3060
3061 packed_transaction = ref_store_transaction_begin(refs->packed_ref_store, err);
3062 if (!packed_transaction) {
3063 ret = TRANSACTION_GENERIC_ERROR;
3064 goto cleanup;
3065 }
3066
3067 for (i = 0; i < transaction->nr; i++) {
3068 struct ref_update *update = transaction->updates[i];
3069
3070 if ((update->flags & REF_HAVE_OLD) &&
3071 !is_null_oid(&update->old_oid))
3072 BUG("initial ref transaction with old_sha1 set");
3073 if (refs_verify_refname_available(&refs->base, update->refname,
3074 &affected_refnames, NULL,
3075 err)) {
3076 ret = TRANSACTION_NAME_CONFLICT;
3077 goto cleanup;
3078 }
3079
3080 /*
3081 * Add a reference creation for this reference to the
3082 * packed-refs transaction:
3083 */
3084 ref_transaction_add_update(packed_transaction, update->refname,
3085 update->flags & ~REF_HAVE_OLD,
3086 &update->new_oid, &update->old_oid,
3087 NULL);
3088 }
3089
3090 if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
3091 ret = TRANSACTION_GENERIC_ERROR;
3092 goto cleanup;
3093 }
3094
3095 if (initial_ref_transaction_commit(packed_transaction, err)) {
3096 ret = TRANSACTION_GENERIC_ERROR;
3097 }
3098
3099 packed_refs_unlock(refs->packed_ref_store);
3100 cleanup:
3101 if (packed_transaction)
3102 ref_transaction_free(packed_transaction);
3103 transaction->state = REF_TRANSACTION_CLOSED;
3104 string_list_clear(&affected_refnames, 0);
3105 return ret;
3106 }
3107
3108 struct expire_reflog_cb {
3109 reflog_expiry_should_prune_fn *should_prune_fn;
3110 void *policy_cb;
3111 FILE *newlog;
3112 struct object_id last_kept_oid;
3113 unsigned int rewrite:1,
3114 dry_run:1;
3115 };
3116
3117 static int expire_reflog_ent(struct object_id *ooid, struct object_id *noid,
3118 const char *email, timestamp_t timestamp, int tz,
3119 const char *message, void *cb_data)
3120 {
3121 struct expire_reflog_cb *cb = cb_data;
3122 reflog_expiry_should_prune_fn *fn = cb->should_prune_fn;
3123
3124 if (cb->rewrite)
3125 ooid = &cb->last_kept_oid;
3126
3127 if (fn(ooid, noid, email, timestamp, tz, message, cb->policy_cb))
3128 return 0;
3129
3130 if (cb->dry_run)
3131 return 0; /* --dry-run */
3132
3133 fprintf(cb->newlog, "%s %s %s %"PRItime" %+05d\t%s", oid_to_hex(ooid),
3134 oid_to_hex(noid), email, timestamp, tz, message);
3135 oidcpy(&cb->last_kept_oid, noid);
3136
3137 return 0;
3138 }
3139
3140 static int files_reflog_expire(struct ref_store *ref_store,
3141 const char *refname,
3142 unsigned int expire_flags,
3143 reflog_expiry_prepare_fn prepare_fn,
3144 reflog_expiry_should_prune_fn should_prune_fn,
3145 reflog_expiry_cleanup_fn cleanup_fn,
3146 void *policy_cb_data)
3147 {
3148 struct files_ref_store *refs =
3149 files_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
3150 struct lock_file reflog_lock = LOCK_INIT;
3151 struct expire_reflog_cb cb;
3152 struct ref_lock *lock;
3153 struct strbuf log_file_sb = STRBUF_INIT;
3154 char *log_file;
3155 int status = 0;
3156 struct strbuf err = STRBUF_INIT;
3157 const struct object_id *oid;
3158
3159 memset(&cb, 0, sizeof(cb));
3160 cb.rewrite = !!(expire_flags & EXPIRE_REFLOGS_REWRITE);
3161 cb.dry_run = !!(expire_flags & EXPIRE_REFLOGS_DRY_RUN);
3162 cb.policy_cb = policy_cb_data;
3163 cb.should_prune_fn = should_prune_fn;
3164
3165 /*
3166 * The reflog file is locked by holding the lock on the
3167 * reference itself, plus we might need to update the
3168 * reference if --updateref was specified:
3169 */
3170 lock = lock_ref_oid_basic(refs, refname, &err);
3171 if (!lock) {
3172 error("cannot lock ref '%s': %s", refname, err.buf);
3173 strbuf_release(&err);
3174 return -1;
3175 }
3176 oid = &lock->old_oid;
3177
3178 /*
3179 * When refs are deleted, their reflog is deleted before the
3180 * ref itself is deleted. This is because there is no separate
3181 * lock for reflog; instead we take a lock on the ref with
3182 * lock_ref_oid_basic().
3183 *
3184 * If a race happens and the reflog doesn't exist after we've
3185 * acquired the lock that's OK. We've got nothing more to do;
3186 * We were asked to delete the reflog, but someone else
3187 * deleted it! The caller doesn't care that we deleted it,
3188 * just that it is deleted. So we can return successfully.
3189 */
3190 if (!refs_reflog_exists(ref_store, refname)) {
3191 unlock_ref(lock);
3192 return 0;
3193 }
3194
3195 files_reflog_path(refs, &log_file_sb, refname);
3196 log_file = strbuf_detach(&log_file_sb, NULL);
3197 if (!cb.dry_run) {
3198 /*
3199 * Even though holding $GIT_DIR/logs/$reflog.lock has
3200 * no locking implications, we use the lock_file
3201 * machinery here anyway because it does a lot of the
3202 * work we need, including cleaning up if the program
3203 * exits unexpectedly.
3204 */
3205 if (hold_lock_file_for_update(&reflog_lock, log_file, 0) < 0) {
3206 struct strbuf err = STRBUF_INIT;
3207 unable_to_lock_message(log_file, errno, &err);
3208 error("%s", err.buf);
3209 strbuf_release(&err);
3210 goto failure;
3211 }
3212 cb.newlog = fdopen_lock_file(&reflog_lock, "w");
3213 if (!cb.newlog) {
3214 error("cannot fdopen %s (%s)",
3215 get_lock_file_path(&reflog_lock), strerror(errno));
3216 goto failure;
3217 }
3218 }
3219
3220 (*prepare_fn)(refname, oid, cb.policy_cb);
3221 refs_for_each_reflog_ent(ref_store, refname, expire_reflog_ent, &cb);
3222 (*cleanup_fn)(cb.policy_cb);
3223
3224 if (!cb.dry_run) {
3225 /*
3226 * It doesn't make sense to adjust a reference pointed
3227 * to by a symbolic ref based on expiring entries in
3228 * the symbolic reference's reflog. Nor can we update
3229 * a reference if there are no remaining reflog
3230 * entries.
3231 */
3232 int update = 0;
3233
3234 if ((expire_flags & EXPIRE_REFLOGS_UPDATE_REF) &&
3235 !is_null_oid(&cb.last_kept_oid)) {
3236 int type;
3237 const char *ref;
3238
3239 ref = refs_resolve_ref_unsafe(&refs->base, refname,
3240 RESOLVE_REF_NO_RECURSE,
3241 NULL, &type);
3242 update = !!(ref && !(type & REF_ISSYMREF));
3243 }
3244
3245 if (close_lock_file_gently(&reflog_lock)) {
3246 status |= error("couldn't write %s: %s", log_file,
3247 strerror(errno));
3248 rollback_lock_file(&reflog_lock);
3249 } else if (update &&
3250 (write_in_full(get_lock_file_fd(&lock->lk),
3251 oid_to_hex(&cb.last_kept_oid), the_hash_algo->hexsz) < 0 ||
3252 write_str_in_full(get_lock_file_fd(&lock->lk), "\n") < 0 ||
3253 close_ref_gently(lock) < 0)) {
3254 status |= error("couldn't write %s",
3255 get_lock_file_path(&lock->lk));
3256 rollback_lock_file(&reflog_lock);
3257 } else if (commit_lock_file(&reflog_lock)) {
3258 status |= error("unable to write reflog '%s' (%s)",
3259 log_file, strerror(errno));
3260 } else if (update && commit_ref(lock)) {
3261 status |= error("couldn't set %s", lock->ref_name);
3262 }
3263 }
3264 free(log_file);
3265 unlock_ref(lock);
3266 return status;
3267
3268 failure:
3269 rollback_lock_file(&reflog_lock);
3270 free(log_file);
3271 unlock_ref(lock);
3272 return -1;
3273 }
3274
3275 static int files_init_db(struct ref_store *ref_store, struct strbuf *err UNUSED)
3276 {
3277 struct files_ref_store *refs =
3278 files_downcast(ref_store, REF_STORE_WRITE, "init_db");
3279 struct strbuf sb = STRBUF_INIT;
3280
3281 /*
3282 * Create .git/refs/{heads,tags}
3283 */
3284 files_ref_path(refs, &sb, "refs/heads");
3285 safe_create_dir(sb.buf, 1);
3286
3287 strbuf_reset(&sb);
3288 files_ref_path(refs, &sb, "refs/tags");
3289 safe_create_dir(sb.buf, 1);
3290
3291 strbuf_release(&sb);
3292 return 0;
3293 }
3294
3295 struct ref_storage_be refs_be_files = {
3296 .next = NULL,
3297 .name = "files",
3298 .init = files_ref_store_create,
3299 .init_db = files_init_db,
3300 .transaction_prepare = files_transaction_prepare,
3301 .transaction_finish = files_transaction_finish,
3302 .transaction_abort = files_transaction_abort,
3303 .initial_transaction_commit = files_initial_transaction_commit,
3304
3305 .pack_refs = files_pack_refs,
3306 .create_symref = files_create_symref,
3307 .delete_refs = files_delete_refs,
3308 .rename_ref = files_rename_ref,
3309 .copy_ref = files_copy_ref,
3310
3311 .iterator_begin = files_ref_iterator_begin,
3312 .read_raw_ref = files_read_raw_ref,
3313 .read_symbolic_ref = files_read_symbolic_ref,
3314
3315 .reflog_iterator_begin = files_reflog_iterator_begin,
3316 .for_each_reflog_ent = files_for_each_reflog_ent,
3317 .for_each_reflog_ent_reverse = files_for_each_reflog_ent_reverse,
3318 .reflog_exists = files_reflog_exists,
3319 .create_reflog = files_create_reflog,
3320 .delete_reflog = files_delete_reflog,
3321 .reflog_expire = files_reflog_expire
3322 };