]> git.ipfire.org Git - thirdparty/git.git/blob - refs/files-backend.c
clone: allow "--bare" with "-o"
[thirdparty/git.git] / refs / files-backend.c
1 #include "../cache.h"
2 #include "../config.h"
3 #include "../refs.h"
4 #include "refs-internal.h"
5 #include "ref-cache.h"
6 #include "packed-backend.h"
7 #include "../iterator.h"
8 #include "../dir-iterator.h"
9 #include "../lockfile.h"
10 #include "../object.h"
11 #include "../dir.h"
12 #include "../chdir-notify.h"
13 #include "worktree.h"
14
15 /*
16 * This backend uses the following flags in `ref_update::flags` for
17 * internal bookkeeping purposes. Their numerical values must not
18 * conflict with REF_NO_DEREF, REF_FORCE_CREATE_REFLOG, REF_HAVE_NEW,
19 * or REF_HAVE_OLD, which are also stored in `ref_update::flags`.
20 */
21
22 /*
23 * Used as a flag in ref_update::flags when a loose ref is being
24 * pruned. This flag must only be used when REF_NO_DEREF is set.
25 */
26 #define REF_IS_PRUNING (1 << 4)
27
28 /*
29 * Flag passed to lock_ref_sha1_basic() telling it to tolerate broken
30 * refs (i.e., because the reference is about to be deleted anyway).
31 */
32 #define REF_DELETING (1 << 5)
33
34 /*
35 * Used as a flag in ref_update::flags when the lockfile needs to be
36 * committed.
37 */
38 #define REF_NEEDS_COMMIT (1 << 6)
39
40 /*
41 * Used as a flag in ref_update::flags when the ref_update was via an
42 * update to HEAD.
43 */
44 #define REF_UPDATE_VIA_HEAD (1 << 8)
45
46 /*
47 * Used as a flag in ref_update::flags when a reference has been
48 * deleted and the ref's parent directories may need cleanup.
49 */
50 #define REF_DELETED_RMDIR (1 << 9)
51
52 struct ref_lock {
53 char *ref_name;
54 struct lock_file lk;
55 struct object_id old_oid;
56 };
57
58 struct files_ref_store {
59 struct ref_store base;
60 unsigned int store_flags;
61
62 char *gitcommondir;
63
64 struct ref_cache *loose;
65
66 struct ref_store *packed_ref_store;
67 };
68
69 static void clear_loose_ref_cache(struct files_ref_store *refs)
70 {
71 if (refs->loose) {
72 free_ref_cache(refs->loose);
73 refs->loose = NULL;
74 }
75 }
76
77 /*
78 * Create a new submodule ref cache and add it to the internal
79 * set of caches.
80 */
81 static struct ref_store *files_ref_store_create(struct repository *repo,
82 const char *gitdir,
83 unsigned int flags)
84 {
85 struct files_ref_store *refs = xcalloc(1, sizeof(*refs));
86 struct ref_store *ref_store = (struct ref_store *)refs;
87 struct strbuf sb = STRBUF_INIT;
88
89 base_ref_store_init(ref_store, repo, gitdir, &refs_be_files);
90 refs->store_flags = flags;
91 get_common_dir_noenv(&sb, gitdir);
92 refs->gitcommondir = strbuf_detach(&sb, NULL);
93 refs->packed_ref_store =
94 packed_ref_store_create(repo, refs->gitcommondir, flags);
95
96 chdir_notify_reparent("files-backend $GIT_DIR", &refs->base.gitdir);
97 chdir_notify_reparent("files-backend $GIT_COMMONDIR",
98 &refs->gitcommondir);
99
100 return ref_store;
101 }
102
103 /*
104 * Die if refs is not the main ref store. caller is used in any
105 * necessary error messages.
106 */
107 static void files_assert_main_repository(struct files_ref_store *refs,
108 const char *caller)
109 {
110 if (refs->store_flags & REF_STORE_MAIN)
111 return;
112
113 BUG("operation %s only allowed for main ref store", caller);
114 }
115
116 /*
117 * Downcast ref_store to files_ref_store. Die if ref_store is not a
118 * files_ref_store. required_flags is compared with ref_store's
119 * store_flags to ensure the ref_store has all required capabilities.
120 * "caller" is used in any necessary error messages.
121 */
122 static struct files_ref_store *files_downcast(struct ref_store *ref_store,
123 unsigned int required_flags,
124 const char *caller)
125 {
126 struct files_ref_store *refs;
127
128 if (ref_store->be != &refs_be_files)
129 BUG("ref_store is type \"%s\" not \"files\" in %s",
130 ref_store->be->name, caller);
131
132 refs = (struct files_ref_store *)ref_store;
133
134 if ((refs->store_flags & required_flags) != required_flags)
135 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
136 caller, required_flags, refs->store_flags);
137
138 return refs;
139 }
140
141 static void files_reflog_path_other_worktrees(struct files_ref_store *refs,
142 struct strbuf *sb,
143 const char *refname)
144 {
145 const char *real_ref;
146 const char *worktree_name;
147 int length;
148
149 if (parse_worktree_ref(refname, &worktree_name, &length, &real_ref))
150 BUG("refname %s is not a other-worktree ref", refname);
151
152 if (worktree_name)
153 strbuf_addf(sb, "%s/worktrees/%.*s/logs/%s", refs->gitcommondir,
154 length, worktree_name, real_ref);
155 else
156 strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir,
157 real_ref);
158 }
159
160 static void files_reflog_path(struct files_ref_store *refs,
161 struct strbuf *sb,
162 const char *refname)
163 {
164 switch (ref_type(refname)) {
165 case REF_TYPE_PER_WORKTREE:
166 case REF_TYPE_PSEUDOREF:
167 strbuf_addf(sb, "%s/logs/%s", refs->base.gitdir, refname);
168 break;
169 case REF_TYPE_OTHER_PSEUDOREF:
170 case REF_TYPE_MAIN_PSEUDOREF:
171 files_reflog_path_other_worktrees(refs, sb, refname);
172 break;
173 case REF_TYPE_NORMAL:
174 strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir, refname);
175 break;
176 default:
177 BUG("unknown ref type %d of ref %s",
178 ref_type(refname), refname);
179 }
180 }
181
182 static void files_ref_path(struct files_ref_store *refs,
183 struct strbuf *sb,
184 const char *refname)
185 {
186 switch (ref_type(refname)) {
187 case REF_TYPE_PER_WORKTREE:
188 case REF_TYPE_PSEUDOREF:
189 strbuf_addf(sb, "%s/%s", refs->base.gitdir, refname);
190 break;
191 case REF_TYPE_MAIN_PSEUDOREF:
192 if (!skip_prefix(refname, "main-worktree/", &refname))
193 BUG("ref %s is not a main pseudoref", refname);
194 /* fallthrough */
195 case REF_TYPE_OTHER_PSEUDOREF:
196 case REF_TYPE_NORMAL:
197 strbuf_addf(sb, "%s/%s", refs->gitcommondir, refname);
198 break;
199 default:
200 BUG("unknown ref type %d of ref %s",
201 ref_type(refname), refname);
202 }
203 }
204
205 /*
206 * Manually add refs/bisect, refs/rewritten and refs/worktree, which, being
207 * per-worktree, might not appear in the directory listing for
208 * refs/ in the main repo.
209 */
210 static void add_per_worktree_entries_to_dir(struct ref_dir *dir, const char *dirname)
211 {
212 const char *prefixes[] = { "refs/bisect/", "refs/worktree/", "refs/rewritten/" };
213 int ip;
214
215 if (strcmp(dirname, "refs/"))
216 return;
217
218 for (ip = 0; ip < ARRAY_SIZE(prefixes); ip++) {
219 const char *prefix = prefixes[ip];
220 int prefix_len = strlen(prefix);
221 struct ref_entry *child_entry;
222 int pos;
223
224 pos = search_ref_dir(dir, prefix, prefix_len);
225 if (pos >= 0)
226 continue;
227 child_entry = create_dir_entry(dir->cache, prefix, prefix_len);
228 add_entry_to_dir(dir, child_entry);
229 }
230 }
231
232 /*
233 * Read the loose references from the namespace dirname into dir
234 * (without recursing). dirname must end with '/'. dir must be the
235 * directory entry corresponding to dirname.
236 */
237 static void loose_fill_ref_dir(struct ref_store *ref_store,
238 struct ref_dir *dir, const char *dirname)
239 {
240 struct files_ref_store *refs =
241 files_downcast(ref_store, REF_STORE_READ, "fill_ref_dir");
242 DIR *d;
243 struct dirent *de;
244 int dirnamelen = strlen(dirname);
245 struct strbuf refname;
246 struct strbuf path = STRBUF_INIT;
247 size_t path_baselen;
248
249 files_ref_path(refs, &path, dirname);
250 path_baselen = path.len;
251
252 d = opendir(path.buf);
253 if (!d) {
254 strbuf_release(&path);
255 return;
256 }
257
258 strbuf_init(&refname, dirnamelen + 257);
259 strbuf_add(&refname, dirname, dirnamelen);
260
261 while ((de = readdir(d)) != NULL) {
262 struct object_id oid;
263 struct stat st;
264 int flag;
265
266 if (de->d_name[0] == '.')
267 continue;
268 if (ends_with(de->d_name, ".lock"))
269 continue;
270 strbuf_addstr(&refname, de->d_name);
271 strbuf_addstr(&path, de->d_name);
272 if (stat(path.buf, &st) < 0) {
273 ; /* silently ignore */
274 } else if (S_ISDIR(st.st_mode)) {
275 strbuf_addch(&refname, '/');
276 add_entry_to_dir(dir,
277 create_dir_entry(dir->cache, refname.buf,
278 refname.len));
279 } else {
280 if (!refs_resolve_ref_unsafe(&refs->base,
281 refname.buf,
282 RESOLVE_REF_READING,
283 &oid, &flag)) {
284 oidclr(&oid);
285 flag |= REF_ISBROKEN;
286 } else if (is_null_oid(&oid)) {
287 /*
288 * It is so astronomically unlikely
289 * that null_oid is the OID of an
290 * actual object that we consider its
291 * appearance in a loose reference
292 * file to be repo corruption
293 * (probably due to a software bug).
294 */
295 flag |= REF_ISBROKEN;
296 }
297
298 if (check_refname_format(refname.buf,
299 REFNAME_ALLOW_ONELEVEL)) {
300 if (!refname_is_safe(refname.buf))
301 die("loose refname is dangerous: %s", refname.buf);
302 oidclr(&oid);
303 flag |= REF_BAD_NAME | REF_ISBROKEN;
304 }
305 add_entry_to_dir(dir,
306 create_ref_entry(refname.buf, &oid, flag));
307 }
308 strbuf_setlen(&refname, dirnamelen);
309 strbuf_setlen(&path, path_baselen);
310 }
311 strbuf_release(&refname);
312 strbuf_release(&path);
313 closedir(d);
314
315 add_per_worktree_entries_to_dir(dir, dirname);
316 }
317
318 static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs)
319 {
320 if (!refs->loose) {
321 /*
322 * Mark the top-level directory complete because we
323 * are about to read the only subdirectory that can
324 * hold references:
325 */
326 refs->loose = create_ref_cache(&refs->base, loose_fill_ref_dir);
327
328 /* We're going to fill the top level ourselves: */
329 refs->loose->root->flag &= ~REF_INCOMPLETE;
330
331 /*
332 * Add an incomplete entry for "refs/" (to be filled
333 * lazily):
334 */
335 add_entry_to_dir(get_ref_dir(refs->loose->root),
336 create_dir_entry(refs->loose, "refs/", 5));
337 }
338 return refs->loose;
339 }
340
341 static int read_ref_internal(struct ref_store *ref_store, const char *refname,
342 struct object_id *oid, struct strbuf *referent,
343 unsigned int *type, int *failure_errno, int skip_packed_refs)
344 {
345 struct files_ref_store *refs =
346 files_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
347 struct strbuf sb_contents = STRBUF_INIT;
348 struct strbuf sb_path = STRBUF_INIT;
349 const char *path;
350 const char *buf;
351 struct stat st;
352 int fd;
353 int ret = -1;
354 int remaining_retries = 3;
355 int myerr = 0;
356
357 *type = 0;
358 strbuf_reset(&sb_path);
359
360 files_ref_path(refs, &sb_path, refname);
361
362 path = sb_path.buf;
363
364 stat_ref:
365 /*
366 * We might have to loop back here to avoid a race
367 * condition: first we lstat() the file, then we try
368 * to read it as a link or as a file. But if somebody
369 * changes the type of the file (file <-> directory
370 * <-> symlink) between the lstat() and reading, then
371 * we don't want to report that as an error but rather
372 * try again starting with the lstat().
373 *
374 * We'll keep a count of the retries, though, just to avoid
375 * any confusing situation sending us into an infinite loop.
376 */
377
378 if (remaining_retries-- <= 0)
379 goto out;
380
381 if (lstat(path, &st) < 0) {
382 int ignore_errno;
383 myerr = errno;
384 if (myerr != ENOENT || skip_packed_refs)
385 goto out;
386 if (refs_read_raw_ref(refs->packed_ref_store, refname, oid,
387 referent, type, &ignore_errno)) {
388 myerr = ENOENT;
389 goto out;
390 }
391 ret = 0;
392 goto out;
393 }
394
395 /* Follow "normalized" - ie "refs/.." symlinks by hand */
396 if (S_ISLNK(st.st_mode)) {
397 strbuf_reset(&sb_contents);
398 if (strbuf_readlink(&sb_contents, path, st.st_size) < 0) {
399 myerr = errno;
400 if (myerr == ENOENT || myerr == EINVAL)
401 /* inconsistent with lstat; retry */
402 goto stat_ref;
403 else
404 goto out;
405 }
406 if (starts_with(sb_contents.buf, "refs/") &&
407 !check_refname_format(sb_contents.buf, 0)) {
408 strbuf_swap(&sb_contents, referent);
409 *type |= REF_ISSYMREF;
410 ret = 0;
411 goto out;
412 }
413 /*
414 * It doesn't look like a refname; fall through to just
415 * treating it like a non-symlink, and reading whatever it
416 * points to.
417 */
418 }
419
420 /* Is it a directory? */
421 if (S_ISDIR(st.st_mode)) {
422 int ignore_errno;
423 /*
424 * Even though there is a directory where the loose
425 * ref is supposed to be, there could still be a
426 * packed ref:
427 */
428 if (skip_packed_refs ||
429 refs_read_raw_ref(refs->packed_ref_store, refname, oid,
430 referent, type, &ignore_errno)) {
431 myerr = EISDIR;
432 goto out;
433 }
434 ret = 0;
435 goto out;
436 }
437
438 /*
439 * Anything else, just open it and try to use it as
440 * a ref
441 */
442 fd = open(path, O_RDONLY);
443 if (fd < 0) {
444 myerr = errno;
445 if (myerr == ENOENT && !S_ISLNK(st.st_mode))
446 /* inconsistent with lstat; retry */
447 goto stat_ref;
448 else
449 goto out;
450 }
451 strbuf_reset(&sb_contents);
452 if (strbuf_read(&sb_contents, fd, 256) < 0) {
453 myerr = errno;
454 close(fd);
455 goto out;
456 }
457 close(fd);
458 strbuf_rtrim(&sb_contents);
459 buf = sb_contents.buf;
460
461 ret = parse_loose_ref_contents(buf, oid, referent, type, &myerr);
462
463 out:
464 if (ret && !myerr)
465 BUG("returning non-zero %d, should have set myerr!", ret);
466 *failure_errno = myerr;
467
468 strbuf_release(&sb_path);
469 strbuf_release(&sb_contents);
470 errno = 0;
471 return ret;
472 }
473
474 static int files_read_raw_ref(struct ref_store *ref_store, const char *refname,
475 struct object_id *oid, struct strbuf *referent,
476 unsigned int *type, int *failure_errno)
477 {
478 return read_ref_internal(ref_store, refname, oid, referent, type, failure_errno, 0);
479 }
480
481 static int files_read_symbolic_ref(struct ref_store *ref_store, const char *refname,
482 struct strbuf *referent)
483 {
484 struct object_id oid;
485 int failure_errno, ret;
486 unsigned int type;
487
488 ret = read_ref_internal(ref_store, refname, &oid, referent, &type, &failure_errno, 1);
489 if (ret)
490 return ret;
491
492 return !(type & REF_ISSYMREF);
493 }
494
495 int parse_loose_ref_contents(const char *buf, struct object_id *oid,
496 struct strbuf *referent, unsigned int *type,
497 int *failure_errno)
498 {
499 const char *p;
500 if (skip_prefix(buf, "ref:", &buf)) {
501 while (isspace(*buf))
502 buf++;
503
504 strbuf_reset(referent);
505 strbuf_addstr(referent, buf);
506 *type |= REF_ISSYMREF;
507 return 0;
508 }
509
510 /*
511 * FETCH_HEAD has additional data after the sha.
512 */
513 if (parse_oid_hex(buf, oid, &p) ||
514 (*p != '\0' && !isspace(*p))) {
515 *type |= REF_ISBROKEN;
516 *failure_errno = EINVAL;
517 return -1;
518 }
519 return 0;
520 }
521
522 static void unlock_ref(struct ref_lock *lock)
523 {
524 rollback_lock_file(&lock->lk);
525 free(lock->ref_name);
526 free(lock);
527 }
528
529 /*
530 * Lock refname, without following symrefs, and set *lock_p to point
531 * at a newly-allocated lock object. Fill in lock->old_oid, referent,
532 * and type similarly to read_raw_ref().
533 *
534 * The caller must verify that refname is a "safe" reference name (in
535 * the sense of refname_is_safe()) before calling this function.
536 *
537 * If the reference doesn't already exist, verify that refname doesn't
538 * have a D/F conflict with any existing references. extras and skip
539 * are passed to refs_verify_refname_available() for this check.
540 *
541 * If mustexist is not set and the reference is not found or is
542 * broken, lock the reference anyway but clear old_oid.
543 *
544 * Return 0 on success. On failure, write an error message to err and
545 * return TRANSACTION_NAME_CONFLICT or TRANSACTION_GENERIC_ERROR.
546 *
547 * Implementation note: This function is basically
548 *
549 * lock reference
550 * read_raw_ref()
551 *
552 * but it includes a lot more code to
553 * - Deal with possible races with other processes
554 * - Avoid calling refs_verify_refname_available() when it can be
555 * avoided, namely if we were successfully able to read the ref
556 * - Generate informative error messages in the case of failure
557 */
558 static int lock_raw_ref(struct files_ref_store *refs,
559 const char *refname, int mustexist,
560 const struct string_list *extras,
561 struct ref_lock **lock_p,
562 struct strbuf *referent,
563 unsigned int *type,
564 struct strbuf *err)
565 {
566 struct ref_lock *lock;
567 struct strbuf ref_file = STRBUF_INIT;
568 int attempts_remaining = 3;
569 int ret = TRANSACTION_GENERIC_ERROR;
570 int failure_errno;
571
572 assert(err);
573 files_assert_main_repository(refs, "lock_raw_ref");
574
575 *type = 0;
576
577 /* First lock the file so it can't change out from under us. */
578
579 *lock_p = CALLOC_ARRAY(lock, 1);
580
581 lock->ref_name = xstrdup(refname);
582 files_ref_path(refs, &ref_file, refname);
583
584 retry:
585 switch (safe_create_leading_directories(ref_file.buf)) {
586 case SCLD_OK:
587 break; /* success */
588 case SCLD_EXISTS:
589 /*
590 * Suppose refname is "refs/foo/bar". We just failed
591 * to create the containing directory, "refs/foo",
592 * because there was a non-directory in the way. This
593 * indicates a D/F conflict, probably because of
594 * another reference such as "refs/foo". There is no
595 * reason to expect this error to be transitory.
596 */
597 if (refs_verify_refname_available(&refs->base, refname,
598 extras, NULL, err)) {
599 if (mustexist) {
600 /*
601 * To the user the relevant error is
602 * that the "mustexist" reference is
603 * missing:
604 */
605 strbuf_reset(err);
606 strbuf_addf(err, "unable to resolve reference '%s'",
607 refname);
608 } else {
609 /*
610 * The error message set by
611 * refs_verify_refname_available() is
612 * OK.
613 */
614 ret = TRANSACTION_NAME_CONFLICT;
615 }
616 } else {
617 /*
618 * The file that is in the way isn't a loose
619 * reference. Report it as a low-level
620 * failure.
621 */
622 strbuf_addf(err, "unable to create lock file %s.lock; "
623 "non-directory in the way",
624 ref_file.buf);
625 }
626 goto error_return;
627 case SCLD_VANISHED:
628 /* Maybe another process was tidying up. Try again. */
629 if (--attempts_remaining > 0)
630 goto retry;
631 /* fall through */
632 default:
633 strbuf_addf(err, "unable to create directory for %s",
634 ref_file.buf);
635 goto error_return;
636 }
637
638 if (hold_lock_file_for_update_timeout(
639 &lock->lk, ref_file.buf, LOCK_NO_DEREF,
640 get_files_ref_lock_timeout_ms()) < 0) {
641 int myerr = errno;
642 errno = 0;
643 if (myerr == ENOENT && --attempts_remaining > 0) {
644 /*
645 * Maybe somebody just deleted one of the
646 * directories leading to ref_file. Try
647 * again:
648 */
649 goto retry;
650 } else {
651 unable_to_lock_message(ref_file.buf, myerr, err);
652 goto error_return;
653 }
654 }
655
656 /*
657 * Now we hold the lock and can read the reference without
658 * fear that its value will change.
659 */
660
661 if (files_read_raw_ref(&refs->base, refname, &lock->old_oid, referent,
662 type, &failure_errno)) {
663 if (failure_errno == ENOENT) {
664 if (mustexist) {
665 /* Garden variety missing reference. */
666 strbuf_addf(err, "unable to resolve reference '%s'",
667 refname);
668 goto error_return;
669 } else {
670 /*
671 * Reference is missing, but that's OK. We
672 * know that there is not a conflict with
673 * another loose reference because
674 * (supposing that we are trying to lock
675 * reference "refs/foo/bar"):
676 *
677 * - We were successfully able to create
678 * the lockfile refs/foo/bar.lock, so we
679 * know there cannot be a loose reference
680 * named "refs/foo".
681 *
682 * - We got ENOENT and not EISDIR, so we
683 * know that there cannot be a loose
684 * reference named "refs/foo/bar/baz".
685 */
686 }
687 } else if (failure_errno == EISDIR) {
688 /*
689 * There is a directory in the way. It might have
690 * contained references that have been deleted. If
691 * we don't require that the reference already
692 * exists, try to remove the directory so that it
693 * doesn't cause trouble when we want to rename the
694 * lockfile into place later.
695 */
696 if (mustexist) {
697 /* Garden variety missing reference. */
698 strbuf_addf(err, "unable to resolve reference '%s'",
699 refname);
700 goto error_return;
701 } else if (remove_dir_recursively(&ref_file,
702 REMOVE_DIR_EMPTY_ONLY)) {
703 if (refs_verify_refname_available(
704 &refs->base, refname,
705 extras, NULL, err)) {
706 /*
707 * The error message set by
708 * verify_refname_available() is OK.
709 */
710 ret = TRANSACTION_NAME_CONFLICT;
711 goto error_return;
712 } else {
713 /*
714 * We can't delete the directory,
715 * but we also don't know of any
716 * references that it should
717 * contain.
718 */
719 strbuf_addf(err, "there is a non-empty directory '%s' "
720 "blocking reference '%s'",
721 ref_file.buf, refname);
722 goto error_return;
723 }
724 }
725 } else if (failure_errno == EINVAL && (*type & REF_ISBROKEN)) {
726 strbuf_addf(err, "unable to resolve reference '%s': "
727 "reference broken", refname);
728 goto error_return;
729 } else {
730 strbuf_addf(err, "unable to resolve reference '%s': %s",
731 refname, strerror(failure_errno));
732 goto error_return;
733 }
734
735 /*
736 * If the ref did not exist and we are creating it,
737 * make sure there is no existing packed ref that
738 * conflicts with refname:
739 */
740 if (refs_verify_refname_available(
741 refs->packed_ref_store, refname,
742 extras, NULL, err))
743 goto error_return;
744 }
745
746 ret = 0;
747 goto out;
748
749 error_return:
750 unlock_ref(lock);
751 *lock_p = NULL;
752
753 out:
754 strbuf_release(&ref_file);
755 return ret;
756 }
757
758 struct files_ref_iterator {
759 struct ref_iterator base;
760
761 struct ref_iterator *iter0;
762 struct repository *repo;
763 unsigned int flags;
764 };
765
766 static int files_ref_iterator_advance(struct ref_iterator *ref_iterator)
767 {
768 struct files_ref_iterator *iter =
769 (struct files_ref_iterator *)ref_iterator;
770 int ok;
771
772 while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
773 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
774 ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
775 continue;
776
777 if ((iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS) &&
778 (iter->iter0->flags & REF_ISSYMREF) &&
779 (iter->iter0->flags & REF_ISBROKEN))
780 continue;
781
782 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
783 !ref_resolves_to_object(iter->iter0->refname,
784 iter->repo,
785 iter->iter0->oid,
786 iter->iter0->flags))
787 continue;
788
789 iter->base.refname = iter->iter0->refname;
790 iter->base.oid = iter->iter0->oid;
791 iter->base.flags = iter->iter0->flags;
792 return ITER_OK;
793 }
794
795 iter->iter0 = NULL;
796 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
797 ok = ITER_ERROR;
798
799 return ok;
800 }
801
802 static int files_ref_iterator_peel(struct ref_iterator *ref_iterator,
803 struct object_id *peeled)
804 {
805 struct files_ref_iterator *iter =
806 (struct files_ref_iterator *)ref_iterator;
807
808 return ref_iterator_peel(iter->iter0, peeled);
809 }
810
811 static int files_ref_iterator_abort(struct ref_iterator *ref_iterator)
812 {
813 struct files_ref_iterator *iter =
814 (struct files_ref_iterator *)ref_iterator;
815 int ok = ITER_DONE;
816
817 if (iter->iter0)
818 ok = ref_iterator_abort(iter->iter0);
819
820 base_ref_iterator_free(ref_iterator);
821 return ok;
822 }
823
824 static struct ref_iterator_vtable files_ref_iterator_vtable = {
825 .advance = files_ref_iterator_advance,
826 .peel = files_ref_iterator_peel,
827 .abort = files_ref_iterator_abort,
828 };
829
830 static struct ref_iterator *files_ref_iterator_begin(
831 struct ref_store *ref_store,
832 const char *prefix, unsigned int flags)
833 {
834 struct files_ref_store *refs;
835 struct ref_iterator *loose_iter, *packed_iter, *overlay_iter;
836 struct files_ref_iterator *iter;
837 struct ref_iterator *ref_iterator;
838 unsigned int required_flags = REF_STORE_READ;
839
840 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
841 required_flags |= REF_STORE_ODB;
842
843 refs = files_downcast(ref_store, required_flags, "ref_iterator_begin");
844
845 /*
846 * We must make sure that all loose refs are read before
847 * accessing the packed-refs file; this avoids a race
848 * condition if loose refs are migrated to the packed-refs
849 * file by a simultaneous process, but our in-memory view is
850 * from before the migration. We ensure this as follows:
851 * First, we call start the loose refs iteration with its
852 * `prime_ref` argument set to true. This causes the loose
853 * references in the subtree to be pre-read into the cache.
854 * (If they've already been read, that's OK; we only need to
855 * guarantee that they're read before the packed refs, not
856 * *how much* before.) After that, we call
857 * packed_ref_iterator_begin(), which internally checks
858 * whether the packed-ref cache is up to date with what is on
859 * disk, and re-reads it if not.
860 */
861
862 loose_iter = cache_ref_iterator_begin(get_loose_ref_cache(refs),
863 prefix, ref_store->repo, 1);
864
865 /*
866 * The packed-refs file might contain broken references, for
867 * example an old version of a reference that points at an
868 * object that has since been garbage-collected. This is OK as
869 * long as there is a corresponding loose reference that
870 * overrides it, and we don't want to emit an error message in
871 * this case. So ask the packed_ref_store for all of its
872 * references, and (if needed) do our own check for broken
873 * ones in files_ref_iterator_advance(), after we have merged
874 * the packed and loose references.
875 */
876 packed_iter = refs_ref_iterator_begin(
877 refs->packed_ref_store, prefix, 0,
878 DO_FOR_EACH_INCLUDE_BROKEN);
879
880 overlay_iter = overlay_ref_iterator_begin(loose_iter, packed_iter);
881
882 CALLOC_ARRAY(iter, 1);
883 ref_iterator = &iter->base;
884 base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable,
885 overlay_iter->ordered);
886 iter->iter0 = overlay_iter;
887 iter->repo = ref_store->repo;
888 iter->flags = flags;
889
890 return ref_iterator;
891 }
892
893 /*
894 * Callback function for raceproof_create_file(). This function is
895 * expected to do something that makes dirname(path) permanent despite
896 * the fact that other processes might be cleaning up empty
897 * directories at the same time. Usually it will create a file named
898 * path, but alternatively it could create another file in that
899 * directory, or even chdir() into that directory. The function should
900 * return 0 if the action was completed successfully. On error, it
901 * should return a nonzero result and set errno.
902 * raceproof_create_file() treats two errno values specially:
903 *
904 * - ENOENT -- dirname(path) does not exist. In this case,
905 * raceproof_create_file() tries creating dirname(path)
906 * (and any parent directories, if necessary) and calls
907 * the function again.
908 *
909 * - EISDIR -- the file already exists and is a directory. In this
910 * case, raceproof_create_file() removes the directory if
911 * it is empty (and recursively any empty directories that
912 * it contains) and calls the function again.
913 *
914 * Any other errno causes raceproof_create_file() to fail with the
915 * callback's return value and errno.
916 *
917 * Obviously, this function should be OK with being called again if it
918 * fails with ENOENT or EISDIR. In other scenarios it will not be
919 * called again.
920 */
921 typedef int create_file_fn(const char *path, void *cb);
922
923 /*
924 * Create a file in dirname(path) by calling fn, creating leading
925 * directories if necessary. Retry a few times in case we are racing
926 * with another process that is trying to clean up the directory that
927 * contains path. See the documentation for create_file_fn for more
928 * details.
929 *
930 * Return the value and set the errno that resulted from the most
931 * recent call of fn. fn is always called at least once, and will be
932 * called more than once if it returns ENOENT or EISDIR.
933 */
934 static int raceproof_create_file(const char *path, create_file_fn fn, void *cb)
935 {
936 /*
937 * The number of times we will try to remove empty directories
938 * in the way of path. This is only 1 because if another
939 * process is racily creating directories that conflict with
940 * us, we don't want to fight against them.
941 */
942 int remove_directories_remaining = 1;
943
944 /*
945 * The number of times that we will try to create the
946 * directories containing path. We are willing to attempt this
947 * more than once, because another process could be trying to
948 * clean up empty directories at the same time as we are
949 * trying to create them.
950 */
951 int create_directories_remaining = 3;
952
953 /* A scratch copy of path, filled lazily if we need it: */
954 struct strbuf path_copy = STRBUF_INIT;
955
956 int ret, save_errno;
957
958 /* Sanity check: */
959 assert(*path);
960
961 retry_fn:
962 ret = fn(path, cb);
963 save_errno = errno;
964 if (!ret)
965 goto out;
966
967 if (errno == EISDIR && remove_directories_remaining-- > 0) {
968 /*
969 * A directory is in the way. Maybe it is empty; try
970 * to remove it:
971 */
972 if (!path_copy.len)
973 strbuf_addstr(&path_copy, path);
974
975 if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY))
976 goto retry_fn;
977 } else if (errno == ENOENT && create_directories_remaining-- > 0) {
978 /*
979 * Maybe the containing directory didn't exist, or
980 * maybe it was just deleted by a process that is
981 * racing with us to clean up empty directories. Try
982 * to create it:
983 */
984 enum scld_error scld_result;
985
986 if (!path_copy.len)
987 strbuf_addstr(&path_copy, path);
988
989 do {
990 scld_result = safe_create_leading_directories(path_copy.buf);
991 if (scld_result == SCLD_OK)
992 goto retry_fn;
993 } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0);
994 }
995
996 out:
997 strbuf_release(&path_copy);
998 errno = save_errno;
999 return ret;
1000 }
1001
1002 static int remove_empty_directories(struct strbuf *path)
1003 {
1004 /*
1005 * we want to create a file but there is a directory there;
1006 * if that is an empty directory (or a directory that contains
1007 * only empty directories), remove them.
1008 */
1009 return remove_dir_recursively(path, REMOVE_DIR_EMPTY_ONLY);
1010 }
1011
1012 static int create_reflock(const char *path, void *cb)
1013 {
1014 struct lock_file *lk = cb;
1015
1016 return hold_lock_file_for_update_timeout(
1017 lk, path, LOCK_NO_DEREF,
1018 get_files_ref_lock_timeout_ms()) < 0 ? -1 : 0;
1019 }
1020
1021 /*
1022 * Locks a ref returning the lock on success and NULL on failure.
1023 */
1024 static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs,
1025 const char *refname,
1026 struct strbuf *err)
1027 {
1028 struct strbuf ref_file = STRBUF_INIT;
1029 struct ref_lock *lock;
1030
1031 files_assert_main_repository(refs, "lock_ref_oid_basic");
1032 assert(err);
1033
1034 CALLOC_ARRAY(lock, 1);
1035
1036 files_ref_path(refs, &ref_file, refname);
1037
1038 /*
1039 * If the ref did not exist and we are creating it, make sure
1040 * there is no existing packed ref whose name begins with our
1041 * refname, nor a packed ref whose name is a proper prefix of
1042 * our refname.
1043 */
1044 if (is_null_oid(&lock->old_oid) &&
1045 refs_verify_refname_available(refs->packed_ref_store, refname,
1046 NULL, NULL, err))
1047 goto error_return;
1048
1049 lock->ref_name = xstrdup(refname);
1050
1051 if (raceproof_create_file(ref_file.buf, create_reflock, &lock->lk)) {
1052 unable_to_lock_message(ref_file.buf, errno, err);
1053 goto error_return;
1054 }
1055
1056 if (!refs_resolve_ref_unsafe(&refs->base, lock->ref_name, 0,
1057 &lock->old_oid, NULL))
1058 oidclr(&lock->old_oid);
1059 goto out;
1060
1061 error_return:
1062 unlock_ref(lock);
1063 lock = NULL;
1064
1065 out:
1066 strbuf_release(&ref_file);
1067 return lock;
1068 }
1069
1070 struct ref_to_prune {
1071 struct ref_to_prune *next;
1072 struct object_id oid;
1073 char name[FLEX_ARRAY];
1074 };
1075
1076 enum {
1077 REMOVE_EMPTY_PARENTS_REF = 0x01,
1078 REMOVE_EMPTY_PARENTS_REFLOG = 0x02
1079 };
1080
1081 /*
1082 * Remove empty parent directories associated with the specified
1083 * reference and/or its reflog, but spare [logs/]refs/ and immediate
1084 * subdirs. flags is a combination of REMOVE_EMPTY_PARENTS_REF and/or
1085 * REMOVE_EMPTY_PARENTS_REFLOG.
1086 */
1087 static void try_remove_empty_parents(struct files_ref_store *refs,
1088 const char *refname,
1089 unsigned int flags)
1090 {
1091 struct strbuf buf = STRBUF_INIT;
1092 struct strbuf sb = STRBUF_INIT;
1093 char *p, *q;
1094 int i;
1095
1096 strbuf_addstr(&buf, refname);
1097 p = buf.buf;
1098 for (i = 0; i < 2; i++) { /* refs/{heads,tags,...}/ */
1099 while (*p && *p != '/')
1100 p++;
1101 /* tolerate duplicate slashes; see check_refname_format() */
1102 while (*p == '/')
1103 p++;
1104 }
1105 q = buf.buf + buf.len;
1106 while (flags & (REMOVE_EMPTY_PARENTS_REF | REMOVE_EMPTY_PARENTS_REFLOG)) {
1107 while (q > p && *q != '/')
1108 q--;
1109 while (q > p && *(q-1) == '/')
1110 q--;
1111 if (q == p)
1112 break;
1113 strbuf_setlen(&buf, q - buf.buf);
1114
1115 strbuf_reset(&sb);
1116 files_ref_path(refs, &sb, buf.buf);
1117 if ((flags & REMOVE_EMPTY_PARENTS_REF) && rmdir(sb.buf))
1118 flags &= ~REMOVE_EMPTY_PARENTS_REF;
1119
1120 strbuf_reset(&sb);
1121 files_reflog_path(refs, &sb, buf.buf);
1122 if ((flags & REMOVE_EMPTY_PARENTS_REFLOG) && rmdir(sb.buf))
1123 flags &= ~REMOVE_EMPTY_PARENTS_REFLOG;
1124 }
1125 strbuf_release(&buf);
1126 strbuf_release(&sb);
1127 }
1128
1129 /* make sure nobody touched the ref, and unlink */
1130 static void prune_ref(struct files_ref_store *refs, struct ref_to_prune *r)
1131 {
1132 struct ref_transaction *transaction;
1133 struct strbuf err = STRBUF_INIT;
1134 int ret = -1;
1135
1136 if (check_refname_format(r->name, 0))
1137 return;
1138
1139 transaction = ref_store_transaction_begin(&refs->base, &err);
1140 if (!transaction)
1141 goto cleanup;
1142 ref_transaction_add_update(
1143 transaction, r->name,
1144 REF_NO_DEREF | REF_HAVE_NEW | REF_HAVE_OLD | REF_IS_PRUNING,
1145 null_oid(), &r->oid, NULL);
1146 if (ref_transaction_commit(transaction, &err))
1147 goto cleanup;
1148
1149 ret = 0;
1150
1151 cleanup:
1152 if (ret)
1153 error("%s", err.buf);
1154 strbuf_release(&err);
1155 ref_transaction_free(transaction);
1156 return;
1157 }
1158
1159 /*
1160 * Prune the loose versions of the references in the linked list
1161 * `*refs_to_prune`, freeing the entries in the list as we go.
1162 */
1163 static void prune_refs(struct files_ref_store *refs, struct ref_to_prune **refs_to_prune)
1164 {
1165 while (*refs_to_prune) {
1166 struct ref_to_prune *r = *refs_to_prune;
1167 *refs_to_prune = r->next;
1168 prune_ref(refs, r);
1169 free(r);
1170 }
1171 }
1172
1173 /*
1174 * Return true if the specified reference should be packed.
1175 */
1176 static int should_pack_ref(const char *refname,
1177 const struct object_id *oid, unsigned int ref_flags,
1178 unsigned int pack_flags)
1179 {
1180 /* Do not pack per-worktree refs: */
1181 if (ref_type(refname) != REF_TYPE_NORMAL)
1182 return 0;
1183
1184 /* Do not pack non-tags unless PACK_REFS_ALL is set: */
1185 if (!(pack_flags & PACK_REFS_ALL) && !starts_with(refname, "refs/tags/"))
1186 return 0;
1187
1188 /* Do not pack symbolic refs: */
1189 if (ref_flags & REF_ISSYMREF)
1190 return 0;
1191
1192 /* Do not pack broken refs: */
1193 if (!ref_resolves_to_object(refname, the_repository, oid, ref_flags))
1194 return 0;
1195
1196 return 1;
1197 }
1198
1199 static int files_pack_refs(struct ref_store *ref_store, unsigned int flags)
1200 {
1201 struct files_ref_store *refs =
1202 files_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB,
1203 "pack_refs");
1204 struct ref_iterator *iter;
1205 int ok;
1206 struct ref_to_prune *refs_to_prune = NULL;
1207 struct strbuf err = STRBUF_INIT;
1208 struct ref_transaction *transaction;
1209
1210 transaction = ref_store_transaction_begin(refs->packed_ref_store, &err);
1211 if (!transaction)
1212 return -1;
1213
1214 packed_refs_lock(refs->packed_ref_store, LOCK_DIE_ON_ERROR, &err);
1215
1216 iter = cache_ref_iterator_begin(get_loose_ref_cache(refs), NULL,
1217 the_repository, 0);
1218 while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
1219 /*
1220 * If the loose reference can be packed, add an entry
1221 * in the packed ref cache. If the reference should be
1222 * pruned, also add it to refs_to_prune.
1223 */
1224 if (!should_pack_ref(iter->refname, iter->oid, iter->flags,
1225 flags))
1226 continue;
1227
1228 /*
1229 * Add a reference creation for this reference to the
1230 * packed-refs transaction:
1231 */
1232 if (ref_transaction_update(transaction, iter->refname,
1233 iter->oid, NULL,
1234 REF_NO_DEREF, NULL, &err))
1235 die("failure preparing to create packed reference %s: %s",
1236 iter->refname, err.buf);
1237
1238 /* Schedule the loose reference for pruning if requested. */
1239 if ((flags & PACK_REFS_PRUNE)) {
1240 struct ref_to_prune *n;
1241 FLEX_ALLOC_STR(n, name, iter->refname);
1242 oidcpy(&n->oid, iter->oid);
1243 n->next = refs_to_prune;
1244 refs_to_prune = n;
1245 }
1246 }
1247 if (ok != ITER_DONE)
1248 die("error while iterating over references");
1249
1250 if (ref_transaction_commit(transaction, &err))
1251 die("unable to write new packed-refs: %s", err.buf);
1252
1253 ref_transaction_free(transaction);
1254
1255 packed_refs_unlock(refs->packed_ref_store);
1256
1257 prune_refs(refs, &refs_to_prune);
1258 strbuf_release(&err);
1259 return 0;
1260 }
1261
1262 static int files_delete_refs(struct ref_store *ref_store, const char *msg,
1263 struct string_list *refnames, unsigned int flags)
1264 {
1265 struct files_ref_store *refs =
1266 files_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
1267 struct strbuf err = STRBUF_INIT;
1268 int i, result = 0;
1269
1270 if (!refnames->nr)
1271 return 0;
1272
1273 if (packed_refs_lock(refs->packed_ref_store, 0, &err))
1274 goto error;
1275
1276 if (refs_delete_refs(refs->packed_ref_store, msg, refnames, flags)) {
1277 packed_refs_unlock(refs->packed_ref_store);
1278 goto error;
1279 }
1280
1281 packed_refs_unlock(refs->packed_ref_store);
1282
1283 for (i = 0; i < refnames->nr; i++) {
1284 const char *refname = refnames->items[i].string;
1285
1286 if (refs_delete_ref(&refs->base, msg, refname, NULL, flags))
1287 result |= error(_("could not remove reference %s"), refname);
1288 }
1289
1290 strbuf_release(&err);
1291 return result;
1292
1293 error:
1294 /*
1295 * If we failed to rewrite the packed-refs file, then it is
1296 * unsafe to try to remove loose refs, because doing so might
1297 * expose an obsolete packed value for a reference that might
1298 * even point at an object that has been garbage collected.
1299 */
1300 if (refnames->nr == 1)
1301 error(_("could not delete reference %s: %s"),
1302 refnames->items[0].string, err.buf);
1303 else
1304 error(_("could not delete references: %s"), err.buf);
1305
1306 strbuf_release(&err);
1307 return -1;
1308 }
1309
1310 /*
1311 * People using contrib's git-new-workdir have .git/logs/refs ->
1312 * /some/other/path/.git/logs/refs, and that may live on another device.
1313 *
1314 * IOW, to avoid cross device rename errors, the temporary renamed log must
1315 * live into logs/refs.
1316 */
1317 #define TMP_RENAMED_LOG "refs/.tmp-renamed-log"
1318
1319 struct rename_cb {
1320 const char *tmp_renamed_log;
1321 int true_errno;
1322 };
1323
1324 static int rename_tmp_log_callback(const char *path, void *cb_data)
1325 {
1326 struct rename_cb *cb = cb_data;
1327
1328 if (rename(cb->tmp_renamed_log, path)) {
1329 /*
1330 * rename(a, b) when b is an existing directory ought
1331 * to result in ISDIR, but Solaris 5.8 gives ENOTDIR.
1332 * Sheesh. Record the true errno for error reporting,
1333 * but report EISDIR to raceproof_create_file() so
1334 * that it knows to retry.
1335 */
1336 cb->true_errno = errno;
1337 if (errno == ENOTDIR)
1338 errno = EISDIR;
1339 return -1;
1340 } else {
1341 return 0;
1342 }
1343 }
1344
1345 static int rename_tmp_log(struct files_ref_store *refs, const char *newrefname)
1346 {
1347 struct strbuf path = STRBUF_INIT;
1348 struct strbuf tmp = STRBUF_INIT;
1349 struct rename_cb cb;
1350 int ret;
1351
1352 files_reflog_path(refs, &path, newrefname);
1353 files_reflog_path(refs, &tmp, TMP_RENAMED_LOG);
1354 cb.tmp_renamed_log = tmp.buf;
1355 ret = raceproof_create_file(path.buf, rename_tmp_log_callback, &cb);
1356 if (ret) {
1357 if (errno == EISDIR)
1358 error("directory not empty: %s", path.buf);
1359 else
1360 error("unable to move logfile %s to %s: %s",
1361 tmp.buf, path.buf,
1362 strerror(cb.true_errno));
1363 }
1364
1365 strbuf_release(&path);
1366 strbuf_release(&tmp);
1367 return ret;
1368 }
1369
1370 static int write_ref_to_lockfile(struct ref_lock *lock,
1371 const struct object_id *oid,
1372 int skip_oid_verification, struct strbuf *err);
1373 static int commit_ref_update(struct files_ref_store *refs,
1374 struct ref_lock *lock,
1375 const struct object_id *oid, const char *logmsg,
1376 struct strbuf *err);
1377
1378 /*
1379 * Emit a better error message than lockfile.c's
1380 * unable_to_lock_message() would in case there is a D/F conflict with
1381 * another existing reference. If there would be a conflict, emit an error
1382 * message and return false; otherwise, return true.
1383 *
1384 * Note that this function is not safe against all races with other
1385 * processes, and that's not its job. We'll emit a more verbose error on D/f
1386 * conflicts if we get past it into lock_ref_oid_basic().
1387 */
1388 static int refs_rename_ref_available(struct ref_store *refs,
1389 const char *old_refname,
1390 const char *new_refname)
1391 {
1392 struct string_list skip = STRING_LIST_INIT_NODUP;
1393 struct strbuf err = STRBUF_INIT;
1394 int ok;
1395
1396 string_list_insert(&skip, old_refname);
1397 ok = !refs_verify_refname_available(refs, new_refname,
1398 NULL, &skip, &err);
1399 if (!ok)
1400 error("%s", err.buf);
1401
1402 string_list_clear(&skip, 0);
1403 strbuf_release(&err);
1404 return ok;
1405 }
1406
1407 static int files_copy_or_rename_ref(struct ref_store *ref_store,
1408 const char *oldrefname, const char *newrefname,
1409 const char *logmsg, int copy)
1410 {
1411 struct files_ref_store *refs =
1412 files_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1413 struct object_id orig_oid;
1414 int flag = 0, logmoved = 0;
1415 struct ref_lock *lock;
1416 struct stat loginfo;
1417 struct strbuf sb_oldref = STRBUF_INIT;
1418 struct strbuf sb_newref = STRBUF_INIT;
1419 struct strbuf tmp_renamed_log = STRBUF_INIT;
1420 int log, ret;
1421 struct strbuf err = STRBUF_INIT;
1422
1423 files_reflog_path(refs, &sb_oldref, oldrefname);
1424 files_reflog_path(refs, &sb_newref, newrefname);
1425 files_reflog_path(refs, &tmp_renamed_log, TMP_RENAMED_LOG);
1426
1427 log = !lstat(sb_oldref.buf, &loginfo);
1428 if (log && S_ISLNK(loginfo.st_mode)) {
1429 ret = error("reflog for %s is a symlink", oldrefname);
1430 goto out;
1431 }
1432
1433 if (!refs_resolve_ref_unsafe(&refs->base, oldrefname,
1434 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
1435 &orig_oid, &flag)) {
1436 ret = error("refname %s not found", oldrefname);
1437 goto out;
1438 }
1439
1440 if (flag & REF_ISSYMREF) {
1441 if (copy)
1442 ret = error("refname %s is a symbolic ref, copying it is not supported",
1443 oldrefname);
1444 else
1445 ret = error("refname %s is a symbolic ref, renaming it is not supported",
1446 oldrefname);
1447 goto out;
1448 }
1449 if (!refs_rename_ref_available(&refs->base, oldrefname, newrefname)) {
1450 ret = 1;
1451 goto out;
1452 }
1453
1454 if (!copy && log && rename(sb_oldref.buf, tmp_renamed_log.buf)) {
1455 ret = error("unable to move logfile logs/%s to logs/"TMP_RENAMED_LOG": %s",
1456 oldrefname, strerror(errno));
1457 goto out;
1458 }
1459
1460 if (copy && log && copy_file(tmp_renamed_log.buf, sb_oldref.buf, 0644)) {
1461 ret = error("unable to copy logfile logs/%s to logs/"TMP_RENAMED_LOG": %s",
1462 oldrefname, strerror(errno));
1463 goto out;
1464 }
1465
1466 if (!copy && refs_delete_ref(&refs->base, logmsg, oldrefname,
1467 &orig_oid, REF_NO_DEREF)) {
1468 error("unable to delete old %s", oldrefname);
1469 goto rollback;
1470 }
1471
1472 /*
1473 * Since we are doing a shallow lookup, oid is not the
1474 * correct value to pass to delete_ref as old_oid. But that
1475 * doesn't matter, because an old_oid check wouldn't add to
1476 * the safety anyway; we want to delete the reference whatever
1477 * its current value.
1478 */
1479 if (!copy && refs_resolve_ref_unsafe(&refs->base, newrefname,
1480 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
1481 NULL, NULL) &&
1482 refs_delete_ref(&refs->base, NULL, newrefname,
1483 NULL, REF_NO_DEREF)) {
1484 if (errno == EISDIR) {
1485 struct strbuf path = STRBUF_INIT;
1486 int result;
1487
1488 files_ref_path(refs, &path, newrefname);
1489 result = remove_empty_directories(&path);
1490 strbuf_release(&path);
1491
1492 if (result) {
1493 error("Directory not empty: %s", newrefname);
1494 goto rollback;
1495 }
1496 } else {
1497 error("unable to delete existing %s", newrefname);
1498 goto rollback;
1499 }
1500 }
1501
1502 if (log && rename_tmp_log(refs, newrefname))
1503 goto rollback;
1504
1505 logmoved = log;
1506
1507 lock = lock_ref_oid_basic(refs, newrefname, &err);
1508 if (!lock) {
1509 if (copy)
1510 error("unable to copy '%s' to '%s': %s", oldrefname, newrefname, err.buf);
1511 else
1512 error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf);
1513 strbuf_release(&err);
1514 goto rollback;
1515 }
1516 oidcpy(&lock->old_oid, &orig_oid);
1517
1518 if (write_ref_to_lockfile(lock, &orig_oid, 0, &err) ||
1519 commit_ref_update(refs, lock, &orig_oid, logmsg, &err)) {
1520 error("unable to write current sha1 into %s: %s", newrefname, err.buf);
1521 strbuf_release(&err);
1522 goto rollback;
1523 }
1524
1525 ret = 0;
1526 goto out;
1527
1528 rollback:
1529 lock = lock_ref_oid_basic(refs, oldrefname, &err);
1530 if (!lock) {
1531 error("unable to lock %s for rollback: %s", oldrefname, err.buf);
1532 strbuf_release(&err);
1533 goto rollbacklog;
1534 }
1535
1536 flag = log_all_ref_updates;
1537 log_all_ref_updates = LOG_REFS_NONE;
1538 if (write_ref_to_lockfile(lock, &orig_oid, 0, &err) ||
1539 commit_ref_update(refs, lock, &orig_oid, NULL, &err)) {
1540 error("unable to write current sha1 into %s: %s", oldrefname, err.buf);
1541 strbuf_release(&err);
1542 }
1543 log_all_ref_updates = flag;
1544
1545 rollbacklog:
1546 if (logmoved && rename(sb_newref.buf, sb_oldref.buf))
1547 error("unable to restore logfile %s from %s: %s",
1548 oldrefname, newrefname, strerror(errno));
1549 if (!logmoved && log &&
1550 rename(tmp_renamed_log.buf, sb_oldref.buf))
1551 error("unable to restore logfile %s from logs/"TMP_RENAMED_LOG": %s",
1552 oldrefname, strerror(errno));
1553 ret = 1;
1554 out:
1555 strbuf_release(&sb_newref);
1556 strbuf_release(&sb_oldref);
1557 strbuf_release(&tmp_renamed_log);
1558
1559 return ret;
1560 }
1561
1562 static int files_rename_ref(struct ref_store *ref_store,
1563 const char *oldrefname, const char *newrefname,
1564 const char *logmsg)
1565 {
1566 return files_copy_or_rename_ref(ref_store, oldrefname,
1567 newrefname, logmsg, 0);
1568 }
1569
1570 static int files_copy_ref(struct ref_store *ref_store,
1571 const char *oldrefname, const char *newrefname,
1572 const char *logmsg)
1573 {
1574 return files_copy_or_rename_ref(ref_store, oldrefname,
1575 newrefname, logmsg, 1);
1576 }
1577
1578 static int close_ref_gently(struct ref_lock *lock)
1579 {
1580 if (close_lock_file_gently(&lock->lk))
1581 return -1;
1582 return 0;
1583 }
1584
1585 static int commit_ref(struct ref_lock *lock)
1586 {
1587 char *path = get_locked_file_path(&lock->lk);
1588 struct stat st;
1589
1590 if (!lstat(path, &st) && S_ISDIR(st.st_mode)) {
1591 /*
1592 * There is a directory at the path we want to rename
1593 * the lockfile to. Hopefully it is empty; try to
1594 * delete it.
1595 */
1596 size_t len = strlen(path);
1597 struct strbuf sb_path = STRBUF_INIT;
1598
1599 strbuf_attach(&sb_path, path, len, len);
1600
1601 /*
1602 * If this fails, commit_lock_file() will also fail
1603 * and will report the problem.
1604 */
1605 remove_empty_directories(&sb_path);
1606 strbuf_release(&sb_path);
1607 } else {
1608 free(path);
1609 }
1610
1611 if (commit_lock_file(&lock->lk))
1612 return -1;
1613 return 0;
1614 }
1615
1616 static int open_or_create_logfile(const char *path, void *cb)
1617 {
1618 int *fd = cb;
1619
1620 *fd = open(path, O_APPEND | O_WRONLY | O_CREAT, 0666);
1621 return (*fd < 0) ? -1 : 0;
1622 }
1623
1624 /*
1625 * Create a reflog for a ref. If force_create = 0, only create the
1626 * reflog for certain refs (those for which should_autocreate_reflog
1627 * returns non-zero). Otherwise, create it regardless of the reference
1628 * name. If the logfile already existed or was created, return 0 and
1629 * set *logfd to the file descriptor opened for appending to the file.
1630 * If no logfile exists and we decided not to create one, return 0 and
1631 * set *logfd to -1. On failure, fill in *err, set *logfd to -1, and
1632 * return -1.
1633 */
1634 static int log_ref_setup(struct files_ref_store *refs,
1635 const char *refname, int force_create,
1636 int *logfd, struct strbuf *err)
1637 {
1638 struct strbuf logfile_sb = STRBUF_INIT;
1639 char *logfile;
1640
1641 files_reflog_path(refs, &logfile_sb, refname);
1642 logfile = strbuf_detach(&logfile_sb, NULL);
1643
1644 if (force_create || should_autocreate_reflog(refname)) {
1645 if (raceproof_create_file(logfile, open_or_create_logfile, logfd)) {
1646 if (errno == ENOENT)
1647 strbuf_addf(err, "unable to create directory for '%s': "
1648 "%s", logfile, strerror(errno));
1649 else if (errno == EISDIR)
1650 strbuf_addf(err, "there are still logs under '%s'",
1651 logfile);
1652 else
1653 strbuf_addf(err, "unable to append to '%s': %s",
1654 logfile, strerror(errno));
1655
1656 goto error;
1657 }
1658 } else {
1659 *logfd = open(logfile, O_APPEND | O_WRONLY);
1660 if (*logfd < 0) {
1661 if (errno == ENOENT || errno == EISDIR) {
1662 /*
1663 * The logfile doesn't already exist,
1664 * but that is not an error; it only
1665 * means that we won't write log
1666 * entries to it.
1667 */
1668 ;
1669 } else {
1670 strbuf_addf(err, "unable to append to '%s': %s",
1671 logfile, strerror(errno));
1672 goto error;
1673 }
1674 }
1675 }
1676
1677 if (*logfd >= 0)
1678 adjust_shared_perm(logfile);
1679
1680 free(logfile);
1681 return 0;
1682
1683 error:
1684 free(logfile);
1685 return -1;
1686 }
1687
1688 static int files_create_reflog(struct ref_store *ref_store, const char *refname,
1689 struct strbuf *err)
1690 {
1691 struct files_ref_store *refs =
1692 files_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1693 int fd;
1694
1695 if (log_ref_setup(refs, refname, 1, &fd, err))
1696 return -1;
1697
1698 if (fd >= 0)
1699 close(fd);
1700
1701 return 0;
1702 }
1703
1704 static int log_ref_write_fd(int fd, const struct object_id *old_oid,
1705 const struct object_id *new_oid,
1706 const char *committer, const char *msg)
1707 {
1708 struct strbuf sb = STRBUF_INIT;
1709 int ret = 0;
1710
1711 strbuf_addf(&sb, "%s %s %s", oid_to_hex(old_oid), oid_to_hex(new_oid), committer);
1712 if (msg && *msg) {
1713 strbuf_addch(&sb, '\t');
1714 strbuf_addstr(&sb, msg);
1715 }
1716 strbuf_addch(&sb, '\n');
1717 if (write_in_full(fd, sb.buf, sb.len) < 0)
1718 ret = -1;
1719 strbuf_release(&sb);
1720 return ret;
1721 }
1722
1723 static int files_log_ref_write(struct files_ref_store *refs,
1724 const char *refname, const struct object_id *old_oid,
1725 const struct object_id *new_oid, const char *msg,
1726 int flags, struct strbuf *err)
1727 {
1728 int logfd, result;
1729
1730 if (log_all_ref_updates == LOG_REFS_UNSET)
1731 log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
1732
1733 result = log_ref_setup(refs, refname,
1734 flags & REF_FORCE_CREATE_REFLOG,
1735 &logfd, err);
1736
1737 if (result)
1738 return result;
1739
1740 if (logfd < 0)
1741 return 0;
1742 result = log_ref_write_fd(logfd, old_oid, new_oid,
1743 git_committer_info(0), msg);
1744 if (result) {
1745 struct strbuf sb = STRBUF_INIT;
1746 int save_errno = errno;
1747
1748 files_reflog_path(refs, &sb, refname);
1749 strbuf_addf(err, "unable to append to '%s': %s",
1750 sb.buf, strerror(save_errno));
1751 strbuf_release(&sb);
1752 close(logfd);
1753 return -1;
1754 }
1755 if (close(logfd)) {
1756 struct strbuf sb = STRBUF_INIT;
1757 int save_errno = errno;
1758
1759 files_reflog_path(refs, &sb, refname);
1760 strbuf_addf(err, "unable to append to '%s': %s",
1761 sb.buf, strerror(save_errno));
1762 strbuf_release(&sb);
1763 return -1;
1764 }
1765 return 0;
1766 }
1767
1768 /*
1769 * Write oid into the open lockfile, then close the lockfile. On
1770 * errors, rollback the lockfile, fill in *err and return -1.
1771 */
1772 static int write_ref_to_lockfile(struct ref_lock *lock,
1773 const struct object_id *oid,
1774 int skip_oid_verification, struct strbuf *err)
1775 {
1776 static char term = '\n';
1777 struct object *o;
1778 int fd;
1779
1780 if (!skip_oid_verification) {
1781 o = parse_object(the_repository, oid);
1782 if (!o) {
1783 strbuf_addf(
1784 err,
1785 "trying to write ref '%s' with nonexistent object %s",
1786 lock->ref_name, oid_to_hex(oid));
1787 unlock_ref(lock);
1788 return -1;
1789 }
1790 if (o->type != OBJ_COMMIT && is_branch(lock->ref_name)) {
1791 strbuf_addf(
1792 err,
1793 "trying to write non-commit object %s to branch '%s'",
1794 oid_to_hex(oid), lock->ref_name);
1795 unlock_ref(lock);
1796 return -1;
1797 }
1798 }
1799 fd = get_lock_file_fd(&lock->lk);
1800 if (write_in_full(fd, oid_to_hex(oid), the_hash_algo->hexsz) < 0 ||
1801 write_in_full(fd, &term, 1) < 0 ||
1802 fsync_component(FSYNC_COMPONENT_REFERENCE, get_lock_file_fd(&lock->lk)) < 0 ||
1803 close_ref_gently(lock) < 0) {
1804 strbuf_addf(err,
1805 "couldn't write '%s'", get_lock_file_path(&lock->lk));
1806 unlock_ref(lock);
1807 return -1;
1808 }
1809 return 0;
1810 }
1811
1812 /*
1813 * Commit a change to a loose reference that has already been written
1814 * to the loose reference lockfile. Also update the reflogs if
1815 * necessary, using the specified lockmsg (which can be NULL).
1816 */
1817 static int commit_ref_update(struct files_ref_store *refs,
1818 struct ref_lock *lock,
1819 const struct object_id *oid, const char *logmsg,
1820 struct strbuf *err)
1821 {
1822 files_assert_main_repository(refs, "commit_ref_update");
1823
1824 clear_loose_ref_cache(refs);
1825 if (files_log_ref_write(refs, lock->ref_name,
1826 &lock->old_oid, oid,
1827 logmsg, 0, err)) {
1828 char *old_msg = strbuf_detach(err, NULL);
1829 strbuf_addf(err, "cannot update the ref '%s': %s",
1830 lock->ref_name, old_msg);
1831 free(old_msg);
1832 unlock_ref(lock);
1833 return -1;
1834 }
1835
1836 if (strcmp(lock->ref_name, "HEAD") != 0) {
1837 /*
1838 * Special hack: If a branch is updated directly and HEAD
1839 * points to it (may happen on the remote side of a push
1840 * for example) then logically the HEAD reflog should be
1841 * updated too.
1842 * A generic solution implies reverse symref information,
1843 * but finding all symrefs pointing to the given branch
1844 * would be rather costly for this rare event (the direct
1845 * update of a branch) to be worth it. So let's cheat and
1846 * check with HEAD only which should cover 99% of all usage
1847 * scenarios (even 100% of the default ones).
1848 */
1849 int head_flag;
1850 const char *head_ref;
1851
1852 head_ref = refs_resolve_ref_unsafe(&refs->base, "HEAD",
1853 RESOLVE_REF_READING,
1854 NULL, &head_flag);
1855 if (head_ref && (head_flag & REF_ISSYMREF) &&
1856 !strcmp(head_ref, lock->ref_name)) {
1857 struct strbuf log_err = STRBUF_INIT;
1858 if (files_log_ref_write(refs, "HEAD",
1859 &lock->old_oid, oid,
1860 logmsg, 0, &log_err)) {
1861 error("%s", log_err.buf);
1862 strbuf_release(&log_err);
1863 }
1864 }
1865 }
1866
1867 if (commit_ref(lock)) {
1868 strbuf_addf(err, "couldn't set '%s'", lock->ref_name);
1869 unlock_ref(lock);
1870 return -1;
1871 }
1872
1873 unlock_ref(lock);
1874 return 0;
1875 }
1876
1877 static int create_ref_symlink(struct ref_lock *lock, const char *target)
1878 {
1879 int ret = -1;
1880 #ifndef NO_SYMLINK_HEAD
1881 char *ref_path = get_locked_file_path(&lock->lk);
1882 unlink(ref_path);
1883 ret = symlink(target, ref_path);
1884 free(ref_path);
1885
1886 if (ret)
1887 fprintf(stderr, "no symlink - falling back to symbolic ref\n");
1888 #endif
1889 return ret;
1890 }
1891
1892 static void update_symref_reflog(struct files_ref_store *refs,
1893 struct ref_lock *lock, const char *refname,
1894 const char *target, const char *logmsg)
1895 {
1896 struct strbuf err = STRBUF_INIT;
1897 struct object_id new_oid;
1898
1899 if (logmsg &&
1900 refs_resolve_ref_unsafe(&refs->base, target,
1901 RESOLVE_REF_READING, &new_oid, NULL) &&
1902 files_log_ref_write(refs, refname, &lock->old_oid,
1903 &new_oid, logmsg, 0, &err)) {
1904 error("%s", err.buf);
1905 strbuf_release(&err);
1906 }
1907 }
1908
1909 static int create_symref_locked(struct files_ref_store *refs,
1910 struct ref_lock *lock, const char *refname,
1911 const char *target, const char *logmsg)
1912 {
1913 if (prefer_symlink_refs && !create_ref_symlink(lock, target)) {
1914 update_symref_reflog(refs, lock, refname, target, logmsg);
1915 return 0;
1916 }
1917
1918 if (!fdopen_lock_file(&lock->lk, "w"))
1919 return error("unable to fdopen %s: %s",
1920 get_lock_file_path(&lock->lk), strerror(errno));
1921
1922 update_symref_reflog(refs, lock, refname, target, logmsg);
1923
1924 /* no error check; commit_ref will check ferror */
1925 fprintf(get_lock_file_fp(&lock->lk), "ref: %s\n", target);
1926 if (commit_ref(lock) < 0)
1927 return error("unable to write symref for %s: %s", refname,
1928 strerror(errno));
1929 return 0;
1930 }
1931
1932 static int files_create_symref(struct ref_store *ref_store,
1933 const char *refname, const char *target,
1934 const char *logmsg)
1935 {
1936 struct files_ref_store *refs =
1937 files_downcast(ref_store, REF_STORE_WRITE, "create_symref");
1938 struct strbuf err = STRBUF_INIT;
1939 struct ref_lock *lock;
1940 int ret;
1941
1942 lock = lock_ref_oid_basic(refs, refname, &err);
1943 if (!lock) {
1944 error("%s", err.buf);
1945 strbuf_release(&err);
1946 return -1;
1947 }
1948
1949 ret = create_symref_locked(refs, lock, refname, target, logmsg);
1950 unlock_ref(lock);
1951 return ret;
1952 }
1953
1954 static int files_reflog_exists(struct ref_store *ref_store,
1955 const char *refname)
1956 {
1957 struct files_ref_store *refs =
1958 files_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1959 struct strbuf sb = STRBUF_INIT;
1960 struct stat st;
1961 int ret;
1962
1963 files_reflog_path(refs, &sb, refname);
1964 ret = !lstat(sb.buf, &st) && S_ISREG(st.st_mode);
1965 strbuf_release(&sb);
1966 return ret;
1967 }
1968
1969 static int files_delete_reflog(struct ref_store *ref_store,
1970 const char *refname)
1971 {
1972 struct files_ref_store *refs =
1973 files_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
1974 struct strbuf sb = STRBUF_INIT;
1975 int ret;
1976
1977 files_reflog_path(refs, &sb, refname);
1978 ret = remove_path(sb.buf);
1979 strbuf_release(&sb);
1980 return ret;
1981 }
1982
1983 static int show_one_reflog_ent(struct strbuf *sb, each_reflog_ent_fn fn, void *cb_data)
1984 {
1985 struct object_id ooid, noid;
1986 char *email_end, *message;
1987 timestamp_t timestamp;
1988 int tz;
1989 const char *p = sb->buf;
1990
1991 /* old SP new SP name <email> SP time TAB msg LF */
1992 if (!sb->len || sb->buf[sb->len - 1] != '\n' ||
1993 parse_oid_hex(p, &ooid, &p) || *p++ != ' ' ||
1994 parse_oid_hex(p, &noid, &p) || *p++ != ' ' ||
1995 !(email_end = strchr(p, '>')) ||
1996 email_end[1] != ' ' ||
1997 !(timestamp = parse_timestamp(email_end + 2, &message, 10)) ||
1998 !message || message[0] != ' ' ||
1999 (message[1] != '+' && message[1] != '-') ||
2000 !isdigit(message[2]) || !isdigit(message[3]) ||
2001 !isdigit(message[4]) || !isdigit(message[5]))
2002 return 0; /* corrupt? */
2003 email_end[1] = '\0';
2004 tz = strtol(message + 1, NULL, 10);
2005 if (message[6] != '\t')
2006 message += 6;
2007 else
2008 message += 7;
2009 return fn(&ooid, &noid, p, timestamp, tz, message, cb_data);
2010 }
2011
2012 static char *find_beginning_of_line(char *bob, char *scan)
2013 {
2014 while (bob < scan && *(--scan) != '\n')
2015 ; /* keep scanning backwards */
2016 /*
2017 * Return either beginning of the buffer, or LF at the end of
2018 * the previous line.
2019 */
2020 return scan;
2021 }
2022
2023 static int files_for_each_reflog_ent_reverse(struct ref_store *ref_store,
2024 const char *refname,
2025 each_reflog_ent_fn fn,
2026 void *cb_data)
2027 {
2028 struct files_ref_store *refs =
2029 files_downcast(ref_store, REF_STORE_READ,
2030 "for_each_reflog_ent_reverse");
2031 struct strbuf sb = STRBUF_INIT;
2032 FILE *logfp;
2033 long pos;
2034 int ret = 0, at_tail = 1;
2035
2036 files_reflog_path(refs, &sb, refname);
2037 logfp = fopen(sb.buf, "r");
2038 strbuf_release(&sb);
2039 if (!logfp)
2040 return -1;
2041
2042 /* Jump to the end */
2043 if (fseek(logfp, 0, SEEK_END) < 0)
2044 ret = error("cannot seek back reflog for %s: %s",
2045 refname, strerror(errno));
2046 pos = ftell(logfp);
2047 while (!ret && 0 < pos) {
2048 int cnt;
2049 size_t nread;
2050 char buf[BUFSIZ];
2051 char *endp, *scanp;
2052
2053 /* Fill next block from the end */
2054 cnt = (sizeof(buf) < pos) ? sizeof(buf) : pos;
2055 if (fseek(logfp, pos - cnt, SEEK_SET)) {
2056 ret = error("cannot seek back reflog for %s: %s",
2057 refname, strerror(errno));
2058 break;
2059 }
2060 nread = fread(buf, cnt, 1, logfp);
2061 if (nread != 1) {
2062 ret = error("cannot read %d bytes from reflog for %s: %s",
2063 cnt, refname, strerror(errno));
2064 break;
2065 }
2066 pos -= cnt;
2067
2068 scanp = endp = buf + cnt;
2069 if (at_tail && scanp[-1] == '\n')
2070 /* Looking at the final LF at the end of the file */
2071 scanp--;
2072 at_tail = 0;
2073
2074 while (buf < scanp) {
2075 /*
2076 * terminating LF of the previous line, or the beginning
2077 * of the buffer.
2078 */
2079 char *bp;
2080
2081 bp = find_beginning_of_line(buf, scanp);
2082
2083 if (*bp == '\n') {
2084 /*
2085 * The newline is the end of the previous line,
2086 * so we know we have complete line starting
2087 * at (bp + 1). Prefix it onto any prior data
2088 * we collected for the line and process it.
2089 */
2090 strbuf_splice(&sb, 0, 0, bp + 1, endp - (bp + 1));
2091 scanp = bp;
2092 endp = bp + 1;
2093 ret = show_one_reflog_ent(&sb, fn, cb_data);
2094 strbuf_reset(&sb);
2095 if (ret)
2096 break;
2097 } else if (!pos) {
2098 /*
2099 * We are at the start of the buffer, and the
2100 * start of the file; there is no previous
2101 * line, and we have everything for this one.
2102 * Process it, and we can end the loop.
2103 */
2104 strbuf_splice(&sb, 0, 0, buf, endp - buf);
2105 ret = show_one_reflog_ent(&sb, fn, cb_data);
2106 strbuf_reset(&sb);
2107 break;
2108 }
2109
2110 if (bp == buf) {
2111 /*
2112 * We are at the start of the buffer, and there
2113 * is more file to read backwards. Which means
2114 * we are in the middle of a line. Note that we
2115 * may get here even if *bp was a newline; that
2116 * just means we are at the exact end of the
2117 * previous line, rather than some spot in the
2118 * middle.
2119 *
2120 * Save away what we have to be combined with
2121 * the data from the next read.
2122 */
2123 strbuf_splice(&sb, 0, 0, buf, endp - buf);
2124 break;
2125 }
2126 }
2127
2128 }
2129 if (!ret && sb.len)
2130 BUG("reverse reflog parser had leftover data");
2131
2132 fclose(logfp);
2133 strbuf_release(&sb);
2134 return ret;
2135 }
2136
2137 static int files_for_each_reflog_ent(struct ref_store *ref_store,
2138 const char *refname,
2139 each_reflog_ent_fn fn, void *cb_data)
2140 {
2141 struct files_ref_store *refs =
2142 files_downcast(ref_store, REF_STORE_READ,
2143 "for_each_reflog_ent");
2144 FILE *logfp;
2145 struct strbuf sb = STRBUF_INIT;
2146 int ret = 0;
2147
2148 files_reflog_path(refs, &sb, refname);
2149 logfp = fopen(sb.buf, "r");
2150 strbuf_release(&sb);
2151 if (!logfp)
2152 return -1;
2153
2154 while (!ret && !strbuf_getwholeline(&sb, logfp, '\n'))
2155 ret = show_one_reflog_ent(&sb, fn, cb_data);
2156 fclose(logfp);
2157 strbuf_release(&sb);
2158 return ret;
2159 }
2160
2161 struct files_reflog_iterator {
2162 struct ref_iterator base;
2163
2164 struct ref_store *ref_store;
2165 struct dir_iterator *dir_iterator;
2166 struct object_id oid;
2167 };
2168
2169 static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator)
2170 {
2171 struct files_reflog_iterator *iter =
2172 (struct files_reflog_iterator *)ref_iterator;
2173 struct dir_iterator *diter = iter->dir_iterator;
2174 int ok;
2175
2176 while ((ok = dir_iterator_advance(diter)) == ITER_OK) {
2177 int flags;
2178
2179 if (!S_ISREG(diter->st.st_mode))
2180 continue;
2181 if (diter->basename[0] == '.')
2182 continue;
2183 if (ends_with(diter->basename, ".lock"))
2184 continue;
2185
2186 if (!refs_resolve_ref_unsafe(iter->ref_store,
2187 diter->relative_path, 0,
2188 &iter->oid, &flags)) {
2189 error("bad ref for %s", diter->path.buf);
2190 continue;
2191 }
2192
2193 iter->base.refname = diter->relative_path;
2194 iter->base.oid = &iter->oid;
2195 iter->base.flags = flags;
2196 return ITER_OK;
2197 }
2198
2199 iter->dir_iterator = NULL;
2200 if (ref_iterator_abort(ref_iterator) == ITER_ERROR)
2201 ok = ITER_ERROR;
2202 return ok;
2203 }
2204
2205 static int files_reflog_iterator_peel(struct ref_iterator *ref_iterator,
2206 struct object_id *peeled)
2207 {
2208 BUG("ref_iterator_peel() called for reflog_iterator");
2209 }
2210
2211 static int files_reflog_iterator_abort(struct ref_iterator *ref_iterator)
2212 {
2213 struct files_reflog_iterator *iter =
2214 (struct files_reflog_iterator *)ref_iterator;
2215 int ok = ITER_DONE;
2216
2217 if (iter->dir_iterator)
2218 ok = dir_iterator_abort(iter->dir_iterator);
2219
2220 base_ref_iterator_free(ref_iterator);
2221 return ok;
2222 }
2223
2224 static struct ref_iterator_vtable files_reflog_iterator_vtable = {
2225 .advance = files_reflog_iterator_advance,
2226 .peel = files_reflog_iterator_peel,
2227 .abort = files_reflog_iterator_abort,
2228 };
2229
2230 static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
2231 const char *gitdir)
2232 {
2233 struct dir_iterator *diter;
2234 struct files_reflog_iterator *iter;
2235 struct ref_iterator *ref_iterator;
2236 struct strbuf sb = STRBUF_INIT;
2237
2238 strbuf_addf(&sb, "%s/logs", gitdir);
2239
2240 diter = dir_iterator_begin(sb.buf, 0);
2241 if (!diter) {
2242 strbuf_release(&sb);
2243 return empty_ref_iterator_begin();
2244 }
2245
2246 CALLOC_ARRAY(iter, 1);
2247 ref_iterator = &iter->base;
2248
2249 base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable, 0);
2250 iter->dir_iterator = diter;
2251 iter->ref_store = ref_store;
2252 strbuf_release(&sb);
2253
2254 return ref_iterator;
2255 }
2256
2257 static enum iterator_selection reflog_iterator_select(
2258 struct ref_iterator *iter_worktree,
2259 struct ref_iterator *iter_common,
2260 void *cb_data)
2261 {
2262 if (iter_worktree) {
2263 /*
2264 * We're a bit loose here. We probably should ignore
2265 * common refs if they are accidentally added as
2266 * per-worktree refs.
2267 */
2268 return ITER_SELECT_0;
2269 } else if (iter_common) {
2270 if (ref_type(iter_common->refname) == REF_TYPE_NORMAL)
2271 return ITER_SELECT_1;
2272
2273 /*
2274 * The main ref store may contain main worktree's
2275 * per-worktree refs, which should be ignored
2276 */
2277 return ITER_SKIP_1;
2278 } else
2279 return ITER_DONE;
2280 }
2281
2282 static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)
2283 {
2284 struct files_ref_store *refs =
2285 files_downcast(ref_store, REF_STORE_READ,
2286 "reflog_iterator_begin");
2287
2288 if (!strcmp(refs->base.gitdir, refs->gitcommondir)) {
2289 return reflog_iterator_begin(ref_store, refs->gitcommondir);
2290 } else {
2291 return merge_ref_iterator_begin(
2292 0, reflog_iterator_begin(ref_store, refs->base.gitdir),
2293 reflog_iterator_begin(ref_store, refs->gitcommondir),
2294 reflog_iterator_select, refs);
2295 }
2296 }
2297
2298 /*
2299 * If update is a direct update of head_ref (the reference pointed to
2300 * by HEAD), then add an extra REF_LOG_ONLY update for HEAD.
2301 */
2302 static int split_head_update(struct ref_update *update,
2303 struct ref_transaction *transaction,
2304 const char *head_ref,
2305 struct string_list *affected_refnames,
2306 struct strbuf *err)
2307 {
2308 struct string_list_item *item;
2309 struct ref_update *new_update;
2310
2311 if ((update->flags & REF_LOG_ONLY) ||
2312 (update->flags & REF_IS_PRUNING) ||
2313 (update->flags & REF_UPDATE_VIA_HEAD))
2314 return 0;
2315
2316 if (strcmp(update->refname, head_ref))
2317 return 0;
2318
2319 /*
2320 * First make sure that HEAD is not already in the
2321 * transaction. This check is O(lg N) in the transaction
2322 * size, but it happens at most once per transaction.
2323 */
2324 if (string_list_has_string(affected_refnames, "HEAD")) {
2325 /* An entry already existed */
2326 strbuf_addf(err,
2327 "multiple updates for 'HEAD' (including one "
2328 "via its referent '%s') are not allowed",
2329 update->refname);
2330 return TRANSACTION_NAME_CONFLICT;
2331 }
2332
2333 new_update = ref_transaction_add_update(
2334 transaction, "HEAD",
2335 update->flags | REF_LOG_ONLY | REF_NO_DEREF,
2336 &update->new_oid, &update->old_oid,
2337 update->msg);
2338
2339 /*
2340 * Add "HEAD". This insertion is O(N) in the transaction
2341 * size, but it happens at most once per transaction.
2342 * Add new_update->refname instead of a literal "HEAD".
2343 */
2344 if (strcmp(new_update->refname, "HEAD"))
2345 BUG("%s unexpectedly not 'HEAD'", new_update->refname);
2346 item = string_list_insert(affected_refnames, new_update->refname);
2347 item->util = new_update;
2348
2349 return 0;
2350 }
2351
2352 /*
2353 * update is for a symref that points at referent and doesn't have
2354 * REF_NO_DEREF set. Split it into two updates:
2355 * - The original update, but with REF_LOG_ONLY and REF_NO_DEREF set
2356 * - A new, separate update for the referent reference
2357 * Note that the new update will itself be subject to splitting when
2358 * the iteration gets to it.
2359 */
2360 static int split_symref_update(struct ref_update *update,
2361 const char *referent,
2362 struct ref_transaction *transaction,
2363 struct string_list *affected_refnames,
2364 struct strbuf *err)
2365 {
2366 struct string_list_item *item;
2367 struct ref_update *new_update;
2368 unsigned int new_flags;
2369
2370 /*
2371 * First make sure that referent is not already in the
2372 * transaction. This check is O(lg N) in the transaction
2373 * size, but it happens at most once per symref in a
2374 * transaction.
2375 */
2376 if (string_list_has_string(affected_refnames, referent)) {
2377 /* An entry already exists */
2378 strbuf_addf(err,
2379 "multiple updates for '%s' (including one "
2380 "via symref '%s') are not allowed",
2381 referent, update->refname);
2382 return TRANSACTION_NAME_CONFLICT;
2383 }
2384
2385 new_flags = update->flags;
2386 if (!strcmp(update->refname, "HEAD")) {
2387 /*
2388 * Record that the new update came via HEAD, so that
2389 * when we process it, split_head_update() doesn't try
2390 * to add another reflog update for HEAD. Note that
2391 * this bit will be propagated if the new_update
2392 * itself needs to be split.
2393 */
2394 new_flags |= REF_UPDATE_VIA_HEAD;
2395 }
2396
2397 new_update = ref_transaction_add_update(
2398 transaction, referent, new_flags,
2399 &update->new_oid, &update->old_oid,
2400 update->msg);
2401
2402 new_update->parent_update = update;
2403
2404 /*
2405 * Change the symbolic ref update to log only. Also, it
2406 * doesn't need to check its old OID value, as that will be
2407 * done when new_update is processed.
2408 */
2409 update->flags |= REF_LOG_ONLY | REF_NO_DEREF;
2410 update->flags &= ~REF_HAVE_OLD;
2411
2412 /*
2413 * Add the referent. This insertion is O(N) in the transaction
2414 * size, but it happens at most once per symref in a
2415 * transaction. Make sure to add new_update->refname, which will
2416 * be valid as long as affected_refnames is in use, and NOT
2417 * referent, which might soon be freed by our caller.
2418 */
2419 item = string_list_insert(affected_refnames, new_update->refname);
2420 if (item->util)
2421 BUG("%s unexpectedly found in affected_refnames",
2422 new_update->refname);
2423 item->util = new_update;
2424
2425 return 0;
2426 }
2427
2428 /*
2429 * Return the refname under which update was originally requested.
2430 */
2431 static const char *original_update_refname(struct ref_update *update)
2432 {
2433 while (update->parent_update)
2434 update = update->parent_update;
2435
2436 return update->refname;
2437 }
2438
2439 /*
2440 * Check whether the REF_HAVE_OLD and old_oid values stored in update
2441 * are consistent with oid, which is the reference's current value. If
2442 * everything is OK, return 0; otherwise, write an error message to
2443 * err and return -1.
2444 */
2445 static int check_old_oid(struct ref_update *update, struct object_id *oid,
2446 struct strbuf *err)
2447 {
2448 if (!(update->flags & REF_HAVE_OLD) ||
2449 oideq(oid, &update->old_oid))
2450 return 0;
2451
2452 if (is_null_oid(&update->old_oid))
2453 strbuf_addf(err, "cannot lock ref '%s': "
2454 "reference already exists",
2455 original_update_refname(update));
2456 else if (is_null_oid(oid))
2457 strbuf_addf(err, "cannot lock ref '%s': "
2458 "reference is missing but expected %s",
2459 original_update_refname(update),
2460 oid_to_hex(&update->old_oid));
2461 else
2462 strbuf_addf(err, "cannot lock ref '%s': "
2463 "is at %s but expected %s",
2464 original_update_refname(update),
2465 oid_to_hex(oid),
2466 oid_to_hex(&update->old_oid));
2467
2468 return -1;
2469 }
2470
2471 /*
2472 * Prepare for carrying out update:
2473 * - Lock the reference referred to by update.
2474 * - Read the reference under lock.
2475 * - Check that its old OID value (if specified) is correct, and in
2476 * any case record it in update->lock->old_oid for later use when
2477 * writing the reflog.
2478 * - If it is a symref update without REF_NO_DEREF, split it up into a
2479 * REF_LOG_ONLY update of the symref and add a separate update for
2480 * the referent to transaction.
2481 * - If it is an update of head_ref, add a corresponding REF_LOG_ONLY
2482 * update of HEAD.
2483 */
2484 static int lock_ref_for_update(struct files_ref_store *refs,
2485 struct ref_update *update,
2486 struct ref_transaction *transaction,
2487 const char *head_ref,
2488 struct string_list *affected_refnames,
2489 struct strbuf *err)
2490 {
2491 struct strbuf referent = STRBUF_INIT;
2492 int mustexist = (update->flags & REF_HAVE_OLD) &&
2493 !is_null_oid(&update->old_oid);
2494 int ret = 0;
2495 struct ref_lock *lock;
2496
2497 files_assert_main_repository(refs, "lock_ref_for_update");
2498
2499 if ((update->flags & REF_HAVE_NEW) && is_null_oid(&update->new_oid))
2500 update->flags |= REF_DELETING;
2501
2502 if (head_ref) {
2503 ret = split_head_update(update, transaction, head_ref,
2504 affected_refnames, err);
2505 if (ret)
2506 goto out;
2507 }
2508
2509 ret = lock_raw_ref(refs, update->refname, mustexist,
2510 affected_refnames,
2511 &lock, &referent,
2512 &update->type, err);
2513 if (ret) {
2514 char *reason;
2515
2516 reason = strbuf_detach(err, NULL);
2517 strbuf_addf(err, "cannot lock ref '%s': %s",
2518 original_update_refname(update), reason);
2519 free(reason);
2520 goto out;
2521 }
2522
2523 update->backend_data = lock;
2524
2525 if (update->type & REF_ISSYMREF) {
2526 if (update->flags & REF_NO_DEREF) {
2527 /*
2528 * We won't be reading the referent as part of
2529 * the transaction, so we have to read it here
2530 * to record and possibly check old_oid:
2531 */
2532 if (!refs_resolve_ref_unsafe(&refs->base,
2533 referent.buf, 0,
2534 &lock->old_oid, NULL)) {
2535 if (update->flags & REF_HAVE_OLD) {
2536 strbuf_addf(err, "cannot lock ref '%s': "
2537 "error reading reference",
2538 original_update_refname(update));
2539 ret = TRANSACTION_GENERIC_ERROR;
2540 goto out;
2541 }
2542 } else if (check_old_oid(update, &lock->old_oid, err)) {
2543 ret = TRANSACTION_GENERIC_ERROR;
2544 goto out;
2545 }
2546 } else {
2547 /*
2548 * Create a new update for the reference this
2549 * symref is pointing at. Also, we will record
2550 * and verify old_oid for this update as part
2551 * of processing the split-off update, so we
2552 * don't have to do it here.
2553 */
2554 ret = split_symref_update(update,
2555 referent.buf, transaction,
2556 affected_refnames, err);
2557 if (ret)
2558 goto out;
2559 }
2560 } else {
2561 struct ref_update *parent_update;
2562
2563 if (check_old_oid(update, &lock->old_oid, err)) {
2564 ret = TRANSACTION_GENERIC_ERROR;
2565 goto out;
2566 }
2567
2568 /*
2569 * If this update is happening indirectly because of a
2570 * symref update, record the old OID in the parent
2571 * update:
2572 */
2573 for (parent_update = update->parent_update;
2574 parent_update;
2575 parent_update = parent_update->parent_update) {
2576 struct ref_lock *parent_lock = parent_update->backend_data;
2577 oidcpy(&parent_lock->old_oid, &lock->old_oid);
2578 }
2579 }
2580
2581 if ((update->flags & REF_HAVE_NEW) &&
2582 !(update->flags & REF_DELETING) &&
2583 !(update->flags & REF_LOG_ONLY)) {
2584 if (!(update->type & REF_ISSYMREF) &&
2585 oideq(&lock->old_oid, &update->new_oid)) {
2586 /*
2587 * The reference already has the desired
2588 * value, so we don't need to write it.
2589 */
2590 } else if (write_ref_to_lockfile(
2591 lock, &update->new_oid,
2592 update->flags & REF_SKIP_OID_VERIFICATION,
2593 err)) {
2594 char *write_err = strbuf_detach(err, NULL);
2595
2596 /*
2597 * The lock was freed upon failure of
2598 * write_ref_to_lockfile():
2599 */
2600 update->backend_data = NULL;
2601 strbuf_addf(err,
2602 "cannot update ref '%s': %s",
2603 update->refname, write_err);
2604 free(write_err);
2605 ret = TRANSACTION_GENERIC_ERROR;
2606 goto out;
2607 } else {
2608 update->flags |= REF_NEEDS_COMMIT;
2609 }
2610 }
2611 if (!(update->flags & REF_NEEDS_COMMIT)) {
2612 /*
2613 * We didn't call write_ref_to_lockfile(), so
2614 * the lockfile is still open. Close it to
2615 * free up the file descriptor:
2616 */
2617 if (close_ref_gently(lock)) {
2618 strbuf_addf(err, "couldn't close '%s.lock'",
2619 update->refname);
2620 ret = TRANSACTION_GENERIC_ERROR;
2621 goto out;
2622 }
2623 }
2624
2625 out:
2626 strbuf_release(&referent);
2627 return ret;
2628 }
2629
2630 struct files_transaction_backend_data {
2631 struct ref_transaction *packed_transaction;
2632 int packed_refs_locked;
2633 };
2634
2635 /*
2636 * Unlock any references in `transaction` that are still locked, and
2637 * mark the transaction closed.
2638 */
2639 static void files_transaction_cleanup(struct files_ref_store *refs,
2640 struct ref_transaction *transaction)
2641 {
2642 size_t i;
2643 struct files_transaction_backend_data *backend_data =
2644 transaction->backend_data;
2645 struct strbuf err = STRBUF_INIT;
2646
2647 for (i = 0; i < transaction->nr; i++) {
2648 struct ref_update *update = transaction->updates[i];
2649 struct ref_lock *lock = update->backend_data;
2650
2651 if (lock) {
2652 unlock_ref(lock);
2653 update->backend_data = NULL;
2654 }
2655 }
2656
2657 if (backend_data) {
2658 if (backend_data->packed_transaction &&
2659 ref_transaction_abort(backend_data->packed_transaction, &err)) {
2660 error("error aborting transaction: %s", err.buf);
2661 strbuf_release(&err);
2662 }
2663
2664 if (backend_data->packed_refs_locked)
2665 packed_refs_unlock(refs->packed_ref_store);
2666
2667 free(backend_data);
2668 }
2669
2670 transaction->state = REF_TRANSACTION_CLOSED;
2671 }
2672
2673 static int files_transaction_prepare(struct ref_store *ref_store,
2674 struct ref_transaction *transaction,
2675 struct strbuf *err)
2676 {
2677 struct files_ref_store *refs =
2678 files_downcast(ref_store, REF_STORE_WRITE,
2679 "ref_transaction_prepare");
2680 size_t i;
2681 int ret = 0;
2682 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
2683 char *head_ref = NULL;
2684 int head_type;
2685 struct files_transaction_backend_data *backend_data;
2686 struct ref_transaction *packed_transaction = NULL;
2687
2688 assert(err);
2689
2690 if (!transaction->nr)
2691 goto cleanup;
2692
2693 CALLOC_ARRAY(backend_data, 1);
2694 transaction->backend_data = backend_data;
2695
2696 /*
2697 * Fail if a refname appears more than once in the
2698 * transaction. (If we end up splitting up any updates using
2699 * split_symref_update() or split_head_update(), those
2700 * functions will check that the new updates don't have the
2701 * same refname as any existing ones.) Also fail if any of the
2702 * updates use REF_IS_PRUNING without REF_NO_DEREF.
2703 */
2704 for (i = 0; i < transaction->nr; i++) {
2705 struct ref_update *update = transaction->updates[i];
2706 struct string_list_item *item =
2707 string_list_append(&affected_refnames, update->refname);
2708
2709 if ((update->flags & REF_IS_PRUNING) &&
2710 !(update->flags & REF_NO_DEREF))
2711 BUG("REF_IS_PRUNING set without REF_NO_DEREF");
2712
2713 /*
2714 * We store a pointer to update in item->util, but at
2715 * the moment we never use the value of this field
2716 * except to check whether it is non-NULL.
2717 */
2718 item->util = update;
2719 }
2720 string_list_sort(&affected_refnames);
2721 if (ref_update_reject_duplicates(&affected_refnames, err)) {
2722 ret = TRANSACTION_GENERIC_ERROR;
2723 goto cleanup;
2724 }
2725
2726 /*
2727 * Special hack: If a branch is updated directly and HEAD
2728 * points to it (may happen on the remote side of a push
2729 * for example) then logically the HEAD reflog should be
2730 * updated too.
2731 *
2732 * A generic solution would require reverse symref lookups,
2733 * but finding all symrefs pointing to a given branch would be
2734 * rather costly for this rare event (the direct update of a
2735 * branch) to be worth it. So let's cheat and check with HEAD
2736 * only, which should cover 99% of all usage scenarios (even
2737 * 100% of the default ones).
2738 *
2739 * So if HEAD is a symbolic reference, then record the name of
2740 * the reference that it points to. If we see an update of
2741 * head_ref within the transaction, then split_head_update()
2742 * arranges for the reflog of HEAD to be updated, too.
2743 */
2744 head_ref = refs_resolve_refdup(ref_store, "HEAD",
2745 RESOLVE_REF_NO_RECURSE,
2746 NULL, &head_type);
2747
2748 if (head_ref && !(head_type & REF_ISSYMREF)) {
2749 FREE_AND_NULL(head_ref);
2750 }
2751
2752 /*
2753 * Acquire all locks, verify old values if provided, check
2754 * that new values are valid, and write new values to the
2755 * lockfiles, ready to be activated. Only keep one lockfile
2756 * open at a time to avoid running out of file descriptors.
2757 * Note that lock_ref_for_update() might append more updates
2758 * to the transaction.
2759 */
2760 for (i = 0; i < transaction->nr; i++) {
2761 struct ref_update *update = transaction->updates[i];
2762
2763 ret = lock_ref_for_update(refs, update, transaction,
2764 head_ref, &affected_refnames, err);
2765 if (ret)
2766 goto cleanup;
2767
2768 if (update->flags & REF_DELETING &&
2769 !(update->flags & REF_LOG_ONLY) &&
2770 !(update->flags & REF_IS_PRUNING)) {
2771 /*
2772 * This reference has to be deleted from
2773 * packed-refs if it exists there.
2774 */
2775 if (!packed_transaction) {
2776 packed_transaction = ref_store_transaction_begin(
2777 refs->packed_ref_store, err);
2778 if (!packed_transaction) {
2779 ret = TRANSACTION_GENERIC_ERROR;
2780 goto cleanup;
2781 }
2782
2783 backend_data->packed_transaction =
2784 packed_transaction;
2785 }
2786
2787 ref_transaction_add_update(
2788 packed_transaction, update->refname,
2789 REF_HAVE_NEW | REF_NO_DEREF,
2790 &update->new_oid, NULL,
2791 NULL);
2792 }
2793 }
2794
2795 if (packed_transaction) {
2796 if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
2797 ret = TRANSACTION_GENERIC_ERROR;
2798 goto cleanup;
2799 }
2800 backend_data->packed_refs_locked = 1;
2801
2802 if (is_packed_transaction_needed(refs->packed_ref_store,
2803 packed_transaction)) {
2804 ret = ref_transaction_prepare(packed_transaction, err);
2805 /*
2806 * A failure during the prepare step will abort
2807 * itself, but not free. Do that now, and disconnect
2808 * from the files_transaction so it does not try to
2809 * abort us when we hit the cleanup code below.
2810 */
2811 if (ret) {
2812 ref_transaction_free(packed_transaction);
2813 backend_data->packed_transaction = NULL;
2814 }
2815 } else {
2816 /*
2817 * We can skip rewriting the `packed-refs`
2818 * file. But we do need to leave it locked, so
2819 * that somebody else doesn't pack a reference
2820 * that we are trying to delete.
2821 *
2822 * We need to disconnect our transaction from
2823 * backend_data, since the abort (whether successful or
2824 * not) will free it.
2825 */
2826 backend_data->packed_transaction = NULL;
2827 if (ref_transaction_abort(packed_transaction, err)) {
2828 ret = TRANSACTION_GENERIC_ERROR;
2829 goto cleanup;
2830 }
2831 }
2832 }
2833
2834 cleanup:
2835 free(head_ref);
2836 string_list_clear(&affected_refnames, 0);
2837
2838 if (ret)
2839 files_transaction_cleanup(refs, transaction);
2840 else
2841 transaction->state = REF_TRANSACTION_PREPARED;
2842
2843 return ret;
2844 }
2845
2846 static int files_transaction_finish(struct ref_store *ref_store,
2847 struct ref_transaction *transaction,
2848 struct strbuf *err)
2849 {
2850 struct files_ref_store *refs =
2851 files_downcast(ref_store, 0, "ref_transaction_finish");
2852 size_t i;
2853 int ret = 0;
2854 struct strbuf sb = STRBUF_INIT;
2855 struct files_transaction_backend_data *backend_data;
2856 struct ref_transaction *packed_transaction;
2857
2858
2859 assert(err);
2860
2861 if (!transaction->nr) {
2862 transaction->state = REF_TRANSACTION_CLOSED;
2863 return 0;
2864 }
2865
2866 backend_data = transaction->backend_data;
2867 packed_transaction = backend_data->packed_transaction;
2868
2869 /* Perform updates first so live commits remain referenced */
2870 for (i = 0; i < transaction->nr; i++) {
2871 struct ref_update *update = transaction->updates[i];
2872 struct ref_lock *lock = update->backend_data;
2873
2874 if (update->flags & REF_NEEDS_COMMIT ||
2875 update->flags & REF_LOG_ONLY) {
2876 if (files_log_ref_write(refs,
2877 lock->ref_name,
2878 &lock->old_oid,
2879 &update->new_oid,
2880 update->msg, update->flags,
2881 err)) {
2882 char *old_msg = strbuf_detach(err, NULL);
2883
2884 strbuf_addf(err, "cannot update the ref '%s': %s",
2885 lock->ref_name, old_msg);
2886 free(old_msg);
2887 unlock_ref(lock);
2888 update->backend_data = NULL;
2889 ret = TRANSACTION_GENERIC_ERROR;
2890 goto cleanup;
2891 }
2892 }
2893 if (update->flags & REF_NEEDS_COMMIT) {
2894 clear_loose_ref_cache(refs);
2895 if (commit_ref(lock)) {
2896 strbuf_addf(err, "couldn't set '%s'", lock->ref_name);
2897 unlock_ref(lock);
2898 update->backend_data = NULL;
2899 ret = TRANSACTION_GENERIC_ERROR;
2900 goto cleanup;
2901 }
2902 }
2903 }
2904
2905 /*
2906 * Now that updates are safely completed, we can perform
2907 * deletes. First delete the reflogs of any references that
2908 * will be deleted, since (in the unexpected event of an
2909 * error) leaving a reference without a reflog is less bad
2910 * than leaving a reflog without a reference (the latter is a
2911 * mildly invalid repository state):
2912 */
2913 for (i = 0; i < transaction->nr; i++) {
2914 struct ref_update *update = transaction->updates[i];
2915 if (update->flags & REF_DELETING &&
2916 !(update->flags & REF_LOG_ONLY) &&
2917 !(update->flags & REF_IS_PRUNING)) {
2918 strbuf_reset(&sb);
2919 files_reflog_path(refs, &sb, update->refname);
2920 if (!unlink_or_warn(sb.buf))
2921 try_remove_empty_parents(refs, update->refname,
2922 REMOVE_EMPTY_PARENTS_REFLOG);
2923 }
2924 }
2925
2926 /*
2927 * Perform deletes now that updates are safely completed.
2928 *
2929 * First delete any packed versions of the references, while
2930 * retaining the packed-refs lock:
2931 */
2932 if (packed_transaction) {
2933 ret = ref_transaction_commit(packed_transaction, err);
2934 ref_transaction_free(packed_transaction);
2935 packed_transaction = NULL;
2936 backend_data->packed_transaction = NULL;
2937 if (ret)
2938 goto cleanup;
2939 }
2940
2941 /* Now delete the loose versions of the references: */
2942 for (i = 0; i < transaction->nr; i++) {
2943 struct ref_update *update = transaction->updates[i];
2944 struct ref_lock *lock = update->backend_data;
2945
2946 if (update->flags & REF_DELETING &&
2947 !(update->flags & REF_LOG_ONLY)) {
2948 update->flags |= REF_DELETED_RMDIR;
2949 if (!(update->type & REF_ISPACKED) ||
2950 update->type & REF_ISSYMREF) {
2951 /* It is a loose reference. */
2952 strbuf_reset(&sb);
2953 files_ref_path(refs, &sb, lock->ref_name);
2954 if (unlink_or_msg(sb.buf, err)) {
2955 ret = TRANSACTION_GENERIC_ERROR;
2956 goto cleanup;
2957 }
2958 }
2959 }
2960 }
2961
2962 clear_loose_ref_cache(refs);
2963
2964 cleanup:
2965 files_transaction_cleanup(refs, transaction);
2966
2967 for (i = 0; i < transaction->nr; i++) {
2968 struct ref_update *update = transaction->updates[i];
2969
2970 if (update->flags & REF_DELETED_RMDIR) {
2971 /*
2972 * The reference was deleted. Delete any
2973 * empty parent directories. (Note that this
2974 * can only work because we have already
2975 * removed the lockfile.)
2976 */
2977 try_remove_empty_parents(refs, update->refname,
2978 REMOVE_EMPTY_PARENTS_REF);
2979 }
2980 }
2981
2982 strbuf_release(&sb);
2983 return ret;
2984 }
2985
2986 static int files_transaction_abort(struct ref_store *ref_store,
2987 struct ref_transaction *transaction,
2988 struct strbuf *err)
2989 {
2990 struct files_ref_store *refs =
2991 files_downcast(ref_store, 0, "ref_transaction_abort");
2992
2993 files_transaction_cleanup(refs, transaction);
2994 return 0;
2995 }
2996
2997 static int ref_present(const char *refname,
2998 const struct object_id *oid, int flags, void *cb_data)
2999 {
3000 struct string_list *affected_refnames = cb_data;
3001
3002 return string_list_has_string(affected_refnames, refname);
3003 }
3004
3005 static int files_initial_transaction_commit(struct ref_store *ref_store,
3006 struct ref_transaction *transaction,
3007 struct strbuf *err)
3008 {
3009 struct files_ref_store *refs =
3010 files_downcast(ref_store, REF_STORE_WRITE,
3011 "initial_ref_transaction_commit");
3012 size_t i;
3013 int ret = 0;
3014 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
3015 struct ref_transaction *packed_transaction = NULL;
3016
3017 assert(err);
3018
3019 if (transaction->state != REF_TRANSACTION_OPEN)
3020 BUG("commit called for transaction that is not open");
3021
3022 /* Fail if a refname appears more than once in the transaction: */
3023 for (i = 0; i < transaction->nr; i++)
3024 string_list_append(&affected_refnames,
3025 transaction->updates[i]->refname);
3026 string_list_sort(&affected_refnames);
3027 if (ref_update_reject_duplicates(&affected_refnames, err)) {
3028 ret = TRANSACTION_GENERIC_ERROR;
3029 goto cleanup;
3030 }
3031
3032 /*
3033 * It's really undefined to call this function in an active
3034 * repository or when there are existing references: we are
3035 * only locking and changing packed-refs, so (1) any
3036 * simultaneous processes might try to change a reference at
3037 * the same time we do, and (2) any existing loose versions of
3038 * the references that we are setting would have precedence
3039 * over our values. But some remote helpers create the remote
3040 * "HEAD" and "master" branches before calling this function,
3041 * so here we really only check that none of the references
3042 * that we are creating already exists.
3043 */
3044 if (refs_for_each_rawref(&refs->base, ref_present,
3045 &affected_refnames))
3046 BUG("initial ref transaction called with existing refs");
3047
3048 packed_transaction = ref_store_transaction_begin(refs->packed_ref_store, err);
3049 if (!packed_transaction) {
3050 ret = TRANSACTION_GENERIC_ERROR;
3051 goto cleanup;
3052 }
3053
3054 for (i = 0; i < transaction->nr; i++) {
3055 struct ref_update *update = transaction->updates[i];
3056
3057 if ((update->flags & REF_HAVE_OLD) &&
3058 !is_null_oid(&update->old_oid))
3059 BUG("initial ref transaction with old_sha1 set");
3060 if (refs_verify_refname_available(&refs->base, update->refname,
3061 &affected_refnames, NULL,
3062 err)) {
3063 ret = TRANSACTION_NAME_CONFLICT;
3064 goto cleanup;
3065 }
3066
3067 /*
3068 * Add a reference creation for this reference to the
3069 * packed-refs transaction:
3070 */
3071 ref_transaction_add_update(packed_transaction, update->refname,
3072 update->flags & ~REF_HAVE_OLD,
3073 &update->new_oid, &update->old_oid,
3074 NULL);
3075 }
3076
3077 if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
3078 ret = TRANSACTION_GENERIC_ERROR;
3079 goto cleanup;
3080 }
3081
3082 if (initial_ref_transaction_commit(packed_transaction, err)) {
3083 ret = TRANSACTION_GENERIC_ERROR;
3084 }
3085
3086 packed_refs_unlock(refs->packed_ref_store);
3087 cleanup:
3088 if (packed_transaction)
3089 ref_transaction_free(packed_transaction);
3090 transaction->state = REF_TRANSACTION_CLOSED;
3091 string_list_clear(&affected_refnames, 0);
3092 return ret;
3093 }
3094
3095 struct expire_reflog_cb {
3096 reflog_expiry_should_prune_fn *should_prune_fn;
3097 void *policy_cb;
3098 FILE *newlog;
3099 struct object_id last_kept_oid;
3100 unsigned int rewrite:1,
3101 dry_run:1;
3102 };
3103
3104 static int expire_reflog_ent(struct object_id *ooid, struct object_id *noid,
3105 const char *email, timestamp_t timestamp, int tz,
3106 const char *message, void *cb_data)
3107 {
3108 struct expire_reflog_cb *cb = cb_data;
3109 reflog_expiry_should_prune_fn *fn = cb->should_prune_fn;
3110
3111 if (cb->rewrite)
3112 ooid = &cb->last_kept_oid;
3113
3114 if (fn(ooid, noid, email, timestamp, tz, message, cb->policy_cb))
3115 return 0;
3116
3117 if (cb->dry_run)
3118 return 0; /* --dry-run */
3119
3120 fprintf(cb->newlog, "%s %s %s %"PRItime" %+05d\t%s", oid_to_hex(ooid),
3121 oid_to_hex(noid), email, timestamp, tz, message);
3122 oidcpy(&cb->last_kept_oid, noid);
3123
3124 return 0;
3125 }
3126
3127 static int files_reflog_expire(struct ref_store *ref_store,
3128 const char *refname,
3129 unsigned int expire_flags,
3130 reflog_expiry_prepare_fn prepare_fn,
3131 reflog_expiry_should_prune_fn should_prune_fn,
3132 reflog_expiry_cleanup_fn cleanup_fn,
3133 void *policy_cb_data)
3134 {
3135 struct files_ref_store *refs =
3136 files_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
3137 struct lock_file reflog_lock = LOCK_INIT;
3138 struct expire_reflog_cb cb;
3139 struct ref_lock *lock;
3140 struct strbuf log_file_sb = STRBUF_INIT;
3141 char *log_file;
3142 int status = 0;
3143 struct strbuf err = STRBUF_INIT;
3144 const struct object_id *oid;
3145
3146 memset(&cb, 0, sizeof(cb));
3147 cb.rewrite = !!(expire_flags & EXPIRE_REFLOGS_REWRITE);
3148 cb.dry_run = !!(expire_flags & EXPIRE_REFLOGS_DRY_RUN);
3149 cb.policy_cb = policy_cb_data;
3150 cb.should_prune_fn = should_prune_fn;
3151
3152 /*
3153 * The reflog file is locked by holding the lock on the
3154 * reference itself, plus we might need to update the
3155 * reference if --updateref was specified:
3156 */
3157 lock = lock_ref_oid_basic(refs, refname, &err);
3158 if (!lock) {
3159 error("cannot lock ref '%s': %s", refname, err.buf);
3160 strbuf_release(&err);
3161 return -1;
3162 }
3163 oid = &lock->old_oid;
3164
3165 /*
3166 * When refs are deleted, their reflog is deleted before the
3167 * ref itself is deleted. This is because there is no separate
3168 * lock for reflog; instead we take a lock on the ref with
3169 * lock_ref_oid_basic().
3170 *
3171 * If a race happens and the reflog doesn't exist after we've
3172 * acquired the lock that's OK. We've got nothing more to do;
3173 * We were asked to delete the reflog, but someone else
3174 * deleted it! The caller doesn't care that we deleted it,
3175 * just that it is deleted. So we can return successfully.
3176 */
3177 if (!refs_reflog_exists(ref_store, refname)) {
3178 unlock_ref(lock);
3179 return 0;
3180 }
3181
3182 files_reflog_path(refs, &log_file_sb, refname);
3183 log_file = strbuf_detach(&log_file_sb, NULL);
3184 if (!cb.dry_run) {
3185 /*
3186 * Even though holding $GIT_DIR/logs/$reflog.lock has
3187 * no locking implications, we use the lock_file
3188 * machinery here anyway because it does a lot of the
3189 * work we need, including cleaning up if the program
3190 * exits unexpectedly.
3191 */
3192 if (hold_lock_file_for_update(&reflog_lock, log_file, 0) < 0) {
3193 struct strbuf err = STRBUF_INIT;
3194 unable_to_lock_message(log_file, errno, &err);
3195 error("%s", err.buf);
3196 strbuf_release(&err);
3197 goto failure;
3198 }
3199 cb.newlog = fdopen_lock_file(&reflog_lock, "w");
3200 if (!cb.newlog) {
3201 error("cannot fdopen %s (%s)",
3202 get_lock_file_path(&reflog_lock), strerror(errno));
3203 goto failure;
3204 }
3205 }
3206
3207 (*prepare_fn)(refname, oid, cb.policy_cb);
3208 refs_for_each_reflog_ent(ref_store, refname, expire_reflog_ent, &cb);
3209 (*cleanup_fn)(cb.policy_cb);
3210
3211 if (!cb.dry_run) {
3212 /*
3213 * It doesn't make sense to adjust a reference pointed
3214 * to by a symbolic ref based on expiring entries in
3215 * the symbolic reference's reflog. Nor can we update
3216 * a reference if there are no remaining reflog
3217 * entries.
3218 */
3219 int update = 0;
3220
3221 if ((expire_flags & EXPIRE_REFLOGS_UPDATE_REF) &&
3222 !is_null_oid(&cb.last_kept_oid)) {
3223 int type;
3224 const char *ref;
3225
3226 ref = refs_resolve_ref_unsafe(&refs->base, refname,
3227 RESOLVE_REF_NO_RECURSE,
3228 NULL, &type);
3229 update = !!(ref && !(type & REF_ISSYMREF));
3230 }
3231
3232 if (close_lock_file_gently(&reflog_lock)) {
3233 status |= error("couldn't write %s: %s", log_file,
3234 strerror(errno));
3235 rollback_lock_file(&reflog_lock);
3236 } else if (update &&
3237 (write_in_full(get_lock_file_fd(&lock->lk),
3238 oid_to_hex(&cb.last_kept_oid), the_hash_algo->hexsz) < 0 ||
3239 write_str_in_full(get_lock_file_fd(&lock->lk), "\n") < 0 ||
3240 close_ref_gently(lock) < 0)) {
3241 status |= error("couldn't write %s",
3242 get_lock_file_path(&lock->lk));
3243 rollback_lock_file(&reflog_lock);
3244 } else if (commit_lock_file(&reflog_lock)) {
3245 status |= error("unable to write reflog '%s' (%s)",
3246 log_file, strerror(errno));
3247 } else if (update && commit_ref(lock)) {
3248 status |= error("couldn't set %s", lock->ref_name);
3249 }
3250 }
3251 free(log_file);
3252 unlock_ref(lock);
3253 return status;
3254
3255 failure:
3256 rollback_lock_file(&reflog_lock);
3257 free(log_file);
3258 unlock_ref(lock);
3259 return -1;
3260 }
3261
3262 static int files_init_db(struct ref_store *ref_store, struct strbuf *err)
3263 {
3264 struct files_ref_store *refs =
3265 files_downcast(ref_store, REF_STORE_WRITE, "init_db");
3266 struct strbuf sb = STRBUF_INIT;
3267
3268 /*
3269 * Create .git/refs/{heads,tags}
3270 */
3271 files_ref_path(refs, &sb, "refs/heads");
3272 safe_create_dir(sb.buf, 1);
3273
3274 strbuf_reset(&sb);
3275 files_ref_path(refs, &sb, "refs/tags");
3276 safe_create_dir(sb.buf, 1);
3277
3278 strbuf_release(&sb);
3279 return 0;
3280 }
3281
3282 struct ref_storage_be refs_be_files = {
3283 .next = NULL,
3284 .name = "files",
3285 .init = files_ref_store_create,
3286 .init_db = files_init_db,
3287 .transaction_prepare = files_transaction_prepare,
3288 .transaction_finish = files_transaction_finish,
3289 .transaction_abort = files_transaction_abort,
3290 .initial_transaction_commit = files_initial_transaction_commit,
3291
3292 .pack_refs = files_pack_refs,
3293 .create_symref = files_create_symref,
3294 .delete_refs = files_delete_refs,
3295 .rename_ref = files_rename_ref,
3296 .copy_ref = files_copy_ref,
3297
3298 .iterator_begin = files_ref_iterator_begin,
3299 .read_raw_ref = files_read_raw_ref,
3300 .read_symbolic_ref = files_read_symbolic_ref,
3301
3302 .reflog_iterator_begin = files_reflog_iterator_begin,
3303 .for_each_reflog_ent = files_for_each_reflog_ent,
3304 .for_each_reflog_ent_reverse = files_for_each_reflog_ent_reverse,
3305 .reflog_exists = files_reflog_exists,
3306 .create_reflog = files_create_reflog,
3307 .delete_reflog = files_delete_reflog,
3308 .reflog_expire = files_reflog_expire
3309 };