]> git.ipfire.org Git - thirdparty/git.git/blob - refs/files-backend.c
1adc4b5182f7592a28aa165027a498d139b7e7ca
[thirdparty/git.git] / refs / files-backend.c
1 #define USE_THE_REPOSITORY_VARIABLE
2 #define DISABLE_SIGN_COMPARE_WARNINGS
3
4 #include "../git-compat-util.h"
5 #include "../abspath.h"
6 #include "../config.h"
7 #include "../copy.h"
8 #include "../environment.h"
9 #include "../gettext.h"
10 #include "../hash.h"
11 #include "../hex.h"
12 #include "../fsck.h"
13 #include "../refs.h"
14 #include "../repo-settings.h"
15 #include "refs-internal.h"
16 #include "ref-cache.h"
17 #include "packed-backend.h"
18 #include "../ident.h"
19 #include "../iterator.h"
20 #include "../dir-iterator.h"
21 #include "../lockfile.h"
22 #include "../object.h"
23 #include "../path.h"
24 #include "../dir.h"
25 #include "../chdir-notify.h"
26 #include "../setup.h"
27 #include "../worktree.h"
28 #include "../wrapper.h"
29 #include "../write-or-die.h"
30 #include "../revision.h"
31 #include <wildmatch.h>
32
33 /*
34 * This backend uses the following flags in `ref_update::flags` for
35 * internal bookkeeping purposes. Their numerical values must not
36 * conflict with REF_NO_DEREF, REF_FORCE_CREATE_REFLOG, REF_HAVE_NEW,
37 * or REF_HAVE_OLD, which are also stored in `ref_update::flags`.
38 */
39
40 /*
41 * Used as a flag in ref_update::flags when a loose ref is being
42 * pruned. This flag must only be used when REF_NO_DEREF is set.
43 */
44 #define REF_IS_PRUNING (1 << 4)
45
46 /*
47 * Flag passed to lock_ref_sha1_basic() telling it to tolerate broken
48 * refs (i.e., because the reference is about to be deleted anyway).
49 */
50 #define REF_DELETING (1 << 5)
51
52 /*
53 * Used as a flag in ref_update::flags when the lockfile needs to be
54 * committed.
55 */
56 #define REF_NEEDS_COMMIT (1 << 6)
57
58 /*
59 * Used as a flag in ref_update::flags when the ref_update was via an
60 * update to HEAD.
61 */
62 #define REF_UPDATE_VIA_HEAD (1 << 8)
63
64 /*
65 * Used as a flag in ref_update::flags when a reference has been
66 * deleted and the ref's parent directories may need cleanup.
67 */
68 #define REF_DELETED_RMDIR (1 << 9)
69
70 /*
71 * Used to indicate that the reflog-only update has been created via
72 * `split_head_update()`.
73 */
74 #define REF_LOG_VIA_SPLIT (1 << 14)
75
76 struct ref_lock {
77 char *ref_name;
78 struct lock_file lk;
79 struct object_id old_oid;
80 unsigned int count; /* track users of the lock (ref update + reflog updates) */
81 };
82
83 struct files_ref_store {
84 struct ref_store base;
85 unsigned int store_flags;
86
87 char *gitcommondir;
88 enum log_refs_config log_all_ref_updates;
89 int prefer_symlink_refs;
90
91 struct ref_cache *loose;
92
93 struct ref_store *packed_ref_store;
94 };
95
96 static void clear_loose_ref_cache(struct files_ref_store *refs)
97 {
98 if (refs->loose) {
99 free_ref_cache(refs->loose);
100 refs->loose = NULL;
101 }
102 }
103
104 /*
105 * Create a new submodule ref cache and add it to the internal
106 * set of caches.
107 */
108 static struct ref_store *files_ref_store_init(struct repository *repo,
109 const char *gitdir,
110 unsigned int flags)
111 {
112 struct files_ref_store *refs = xcalloc(1, sizeof(*refs));
113 struct ref_store *ref_store = (struct ref_store *)refs;
114 struct strbuf sb = STRBUF_INIT;
115
116 base_ref_store_init(ref_store, repo, gitdir, &refs_be_files);
117 refs->store_flags = flags;
118 get_common_dir_noenv(&sb, gitdir);
119 refs->gitcommondir = strbuf_detach(&sb, NULL);
120 refs->packed_ref_store =
121 packed_ref_store_init(repo, refs->gitcommondir, flags);
122 refs->log_all_ref_updates = repo_settings_get_log_all_ref_updates(repo);
123 repo_config_get_bool(repo, "core.prefersymlinkrefs", &refs->prefer_symlink_refs);
124
125 chdir_notify_reparent("files-backend $GIT_DIR", &refs->base.gitdir);
126 chdir_notify_reparent("files-backend $GIT_COMMONDIR",
127 &refs->gitcommondir);
128
129 return ref_store;
130 }
131
132 /*
133 * Die if refs is not the main ref store. caller is used in any
134 * necessary error messages.
135 */
136 static void files_assert_main_repository(struct files_ref_store *refs,
137 const char *caller)
138 {
139 if (refs->store_flags & REF_STORE_MAIN)
140 return;
141
142 BUG("operation %s only allowed for main ref store", caller);
143 }
144
145 /*
146 * Downcast ref_store to files_ref_store. Die if ref_store is not a
147 * files_ref_store. required_flags is compared with ref_store's
148 * store_flags to ensure the ref_store has all required capabilities.
149 * "caller" is used in any necessary error messages.
150 */
151 static struct files_ref_store *files_downcast(struct ref_store *ref_store,
152 unsigned int required_flags,
153 const char *caller)
154 {
155 struct files_ref_store *refs;
156
157 if (ref_store->be != &refs_be_files)
158 BUG("ref_store is type \"%s\" not \"files\" in %s",
159 ref_store->be->name, caller);
160
161 refs = (struct files_ref_store *)ref_store;
162
163 if ((refs->store_flags & required_flags) != required_flags)
164 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
165 caller, required_flags, refs->store_flags);
166
167 return refs;
168 }
169
170 static void files_ref_store_release(struct ref_store *ref_store)
171 {
172 struct files_ref_store *refs = files_downcast(ref_store, 0, "release");
173 free_ref_cache(refs->loose);
174 free(refs->gitcommondir);
175 ref_store_release(refs->packed_ref_store);
176 free(refs->packed_ref_store);
177 }
178
179 static void files_reflog_path(struct files_ref_store *refs,
180 struct strbuf *sb,
181 const char *refname)
182 {
183 const char *bare_refname;
184 const char *wtname;
185 int wtname_len;
186 enum ref_worktree_type wt_type = parse_worktree_ref(
187 refname, &wtname, &wtname_len, &bare_refname);
188
189 switch (wt_type) {
190 case REF_WORKTREE_CURRENT:
191 strbuf_addf(sb, "%s/logs/%s", refs->base.gitdir, refname);
192 break;
193 case REF_WORKTREE_SHARED:
194 case REF_WORKTREE_MAIN:
195 strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir, bare_refname);
196 break;
197 case REF_WORKTREE_OTHER:
198 strbuf_addf(sb, "%s/worktrees/%.*s/logs/%s", refs->gitcommondir,
199 wtname_len, wtname, bare_refname);
200 break;
201 default:
202 BUG("unknown ref type %d of ref %s", wt_type, refname);
203 }
204 }
205
206 static void files_ref_path(struct files_ref_store *refs,
207 struct strbuf *sb,
208 const char *refname)
209 {
210 const char *bare_refname;
211 const char *wtname;
212 int wtname_len;
213 enum ref_worktree_type wt_type = parse_worktree_ref(
214 refname, &wtname, &wtname_len, &bare_refname);
215 switch (wt_type) {
216 case REF_WORKTREE_CURRENT:
217 strbuf_addf(sb, "%s/%s", refs->base.gitdir, refname);
218 break;
219 case REF_WORKTREE_OTHER:
220 strbuf_addf(sb, "%s/worktrees/%.*s/%s", refs->gitcommondir,
221 wtname_len, wtname, bare_refname);
222 break;
223 case REF_WORKTREE_SHARED:
224 case REF_WORKTREE_MAIN:
225 strbuf_addf(sb, "%s/%s", refs->gitcommondir, bare_refname);
226 break;
227 default:
228 BUG("unknown ref type %d of ref %s", wt_type, refname);
229 }
230 }
231
232 /*
233 * Manually add refs/bisect, refs/rewritten and refs/worktree, which, being
234 * per-worktree, might not appear in the directory listing for
235 * refs/ in the main repo.
236 */
237 static void add_per_worktree_entries_to_dir(struct ref_dir *dir, const char *dirname)
238 {
239 const char *prefixes[] = { "refs/bisect/", "refs/worktree/", "refs/rewritten/" };
240 int ip;
241
242 if (strcmp(dirname, "refs/"))
243 return;
244
245 for (ip = 0; ip < ARRAY_SIZE(prefixes); ip++) {
246 const char *prefix = prefixes[ip];
247 int prefix_len = strlen(prefix);
248 struct ref_entry *child_entry;
249 int pos;
250
251 pos = search_ref_dir(dir, prefix, prefix_len);
252 if (pos >= 0)
253 continue;
254 child_entry = create_dir_entry(dir->cache, prefix, prefix_len);
255 add_entry_to_dir(dir, child_entry);
256 }
257 }
258
259 static void loose_fill_ref_dir_regular_file(struct files_ref_store *refs,
260 const char *refname,
261 struct ref_dir *dir)
262 {
263 struct object_id oid;
264 int flag;
265 const char *referent = refs_resolve_ref_unsafe(&refs->base,
266 refname,
267 RESOLVE_REF_READING,
268 &oid, &flag);
269
270 if (!referent) {
271 oidclr(&oid, refs->base.repo->hash_algo);
272 flag |= REF_ISBROKEN;
273 } else if (is_null_oid(&oid)) {
274 /*
275 * It is so astronomically unlikely
276 * that null_oid is the OID of an
277 * actual object that we consider its
278 * appearance in a loose reference
279 * file to be repo corruption
280 * (probably due to a software bug).
281 */
282 flag |= REF_ISBROKEN;
283 }
284
285 if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) {
286 if (!refname_is_safe(refname))
287 die("loose refname is dangerous: %s", refname);
288 oidclr(&oid, refs->base.repo->hash_algo);
289 flag |= REF_BAD_NAME | REF_ISBROKEN;
290 }
291
292 if (!(flag & REF_ISSYMREF))
293 referent = NULL;
294
295 add_entry_to_dir(dir, create_ref_entry(refname, referent, &oid, flag));
296 }
297
298 /*
299 * Read the loose references from the namespace dirname into dir
300 * (without recursing). dirname must end with '/'. dir must be the
301 * directory entry corresponding to dirname.
302 */
303 static void loose_fill_ref_dir(struct ref_store *ref_store,
304 struct ref_dir *dir, const char *dirname)
305 {
306 struct files_ref_store *refs =
307 files_downcast(ref_store, REF_STORE_READ, "fill_ref_dir");
308 DIR *d;
309 struct dirent *de;
310 int dirnamelen = strlen(dirname);
311 struct strbuf refname;
312 struct strbuf path = STRBUF_INIT;
313
314 files_ref_path(refs, &path, dirname);
315
316 d = opendir(path.buf);
317 if (!d) {
318 strbuf_release(&path);
319 return;
320 }
321
322 strbuf_init(&refname, dirnamelen + 257);
323 strbuf_add(&refname, dirname, dirnamelen);
324
325 while ((de = readdir(d)) != NULL) {
326 unsigned char dtype;
327
328 if (de->d_name[0] == '.')
329 continue;
330 if (ends_with(de->d_name, ".lock"))
331 continue;
332 strbuf_addstr(&refname, de->d_name);
333
334 dtype = get_dtype(de, &path, 1);
335 if (dtype == DT_DIR) {
336 strbuf_addch(&refname, '/');
337 add_entry_to_dir(dir,
338 create_dir_entry(dir->cache, refname.buf,
339 refname.len));
340 } else if (dtype == DT_REG) {
341 loose_fill_ref_dir_regular_file(refs, refname.buf, dir);
342 }
343 strbuf_setlen(&refname, dirnamelen);
344 }
345 strbuf_release(&refname);
346 strbuf_release(&path);
347 closedir(d);
348
349 add_per_worktree_entries_to_dir(dir, dirname);
350 }
351
352 static int for_each_root_ref(struct files_ref_store *refs,
353 int (*cb)(const char *refname, void *cb_data),
354 void *cb_data)
355 {
356 struct strbuf path = STRBUF_INIT, refname = STRBUF_INIT;
357 const char *dirname = refs->loose->root->name;
358 struct dirent *de;
359 size_t dirnamelen;
360 int ret;
361 DIR *d;
362
363 files_ref_path(refs, &path, dirname);
364
365 d = opendir(path.buf);
366 if (!d) {
367 strbuf_release(&path);
368 return -1;
369 }
370
371 strbuf_addstr(&refname, dirname);
372 dirnamelen = refname.len;
373
374 while ((de = readdir(d)) != NULL) {
375 unsigned char dtype;
376
377 if (de->d_name[0] == '.')
378 continue;
379 if (ends_with(de->d_name, ".lock"))
380 continue;
381 strbuf_addstr(&refname, de->d_name);
382
383 dtype = get_dtype(de, &path, 1);
384 if (dtype == DT_REG && is_root_ref(de->d_name)) {
385 ret = cb(refname.buf, cb_data);
386 if (ret)
387 goto done;
388 }
389
390 strbuf_setlen(&refname, dirnamelen);
391 }
392
393 ret = 0;
394
395 done:
396 strbuf_release(&refname);
397 strbuf_release(&path);
398 closedir(d);
399 return ret;
400 }
401
402 struct fill_root_ref_data {
403 struct files_ref_store *refs;
404 struct ref_dir *dir;
405 };
406
407 static int fill_root_ref(const char *refname, void *cb_data)
408 {
409 struct fill_root_ref_data *data = cb_data;
410 loose_fill_ref_dir_regular_file(data->refs, refname, data->dir);
411 return 0;
412 }
413
414 /*
415 * Add root refs to the ref dir by parsing the directory for any files which
416 * follow the root ref syntax.
417 */
418 static void add_root_refs(struct files_ref_store *refs,
419 struct ref_dir *dir)
420 {
421 struct fill_root_ref_data data = {
422 .refs = refs,
423 .dir = dir,
424 };
425
426 for_each_root_ref(refs, fill_root_ref, &data);
427 }
428
429 static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs,
430 unsigned int flags)
431 {
432 if (!refs->loose) {
433 struct ref_dir *dir;
434
435 /*
436 * Mark the top-level directory complete because we
437 * are about to read the only subdirectory that can
438 * hold references:
439 */
440 refs->loose = create_ref_cache(&refs->base, loose_fill_ref_dir);
441
442 /* We're going to fill the top level ourselves: */
443 refs->loose->root->flag &= ~REF_INCOMPLETE;
444
445 dir = get_ref_dir(refs->loose->root);
446
447 if (flags & DO_FOR_EACH_INCLUDE_ROOT_REFS)
448 add_root_refs(refs, dir);
449
450 /*
451 * Add an incomplete entry for "refs/" (to be filled
452 * lazily):
453 */
454 add_entry_to_dir(dir, create_dir_entry(refs->loose, "refs/", 5));
455 }
456 return refs->loose;
457 }
458
459 static int read_ref_internal(struct ref_store *ref_store, const char *refname,
460 struct object_id *oid, struct strbuf *referent,
461 unsigned int *type, int *failure_errno, int skip_packed_refs)
462 {
463 struct files_ref_store *refs =
464 files_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
465 struct strbuf sb_contents = STRBUF_INIT;
466 struct strbuf sb_path = STRBUF_INIT;
467 const char *path;
468 const char *buf;
469 struct stat st;
470 int fd;
471 int ret = -1;
472 int remaining_retries = 3;
473 int myerr = 0;
474
475 *type = 0;
476 strbuf_reset(&sb_path);
477
478 files_ref_path(refs, &sb_path, refname);
479
480 path = sb_path.buf;
481
482 stat_ref:
483 /*
484 * We might have to loop back here to avoid a race
485 * condition: first we lstat() the file, then we try
486 * to read it as a link or as a file. But if somebody
487 * changes the type of the file (file <-> directory
488 * <-> symlink) between the lstat() and reading, then
489 * we don't want to report that as an error but rather
490 * try again starting with the lstat().
491 *
492 * We'll keep a count of the retries, though, just to avoid
493 * any confusing situation sending us into an infinite loop.
494 */
495
496 if (remaining_retries-- <= 0)
497 goto out;
498
499 if (lstat(path, &st) < 0) {
500 int ignore_errno;
501 myerr = errno;
502 if (myerr != ENOENT || skip_packed_refs)
503 goto out;
504 if (refs_read_raw_ref(refs->packed_ref_store, refname, oid,
505 referent, type, &ignore_errno)) {
506 myerr = ENOENT;
507 goto out;
508 }
509 ret = 0;
510 goto out;
511 }
512
513 /* Follow "normalized" - ie "refs/.." symlinks by hand */
514 if (S_ISLNK(st.st_mode)) {
515 strbuf_reset(&sb_contents);
516 if (strbuf_readlink(&sb_contents, path, st.st_size) < 0) {
517 myerr = errno;
518 if (myerr == ENOENT || myerr == EINVAL)
519 /* inconsistent with lstat; retry */
520 goto stat_ref;
521 else
522 goto out;
523 }
524 if (starts_with(sb_contents.buf, "refs/") &&
525 !check_refname_format(sb_contents.buf, 0)) {
526 strbuf_swap(&sb_contents, referent);
527 *type |= REF_ISSYMREF;
528 ret = 0;
529 goto out;
530 }
531 /*
532 * It doesn't look like a refname; fall through to just
533 * treating it like a non-symlink, and reading whatever it
534 * points to.
535 */
536 }
537
538 /* Is it a directory? */
539 if (S_ISDIR(st.st_mode)) {
540 int ignore_errno;
541 /*
542 * Even though there is a directory where the loose
543 * ref is supposed to be, there could still be a
544 * packed ref:
545 */
546 if (skip_packed_refs ||
547 refs_read_raw_ref(refs->packed_ref_store, refname, oid,
548 referent, type, &ignore_errno)) {
549 myerr = EISDIR;
550 goto out;
551 }
552 ret = 0;
553 goto out;
554 }
555
556 /*
557 * Anything else, just open it and try to use it as
558 * a ref
559 */
560 fd = open(path, O_RDONLY);
561 if (fd < 0) {
562 myerr = errno;
563 if (myerr == ENOENT && !S_ISLNK(st.st_mode))
564 /* inconsistent with lstat; retry */
565 goto stat_ref;
566 else
567 goto out;
568 }
569 strbuf_reset(&sb_contents);
570 if (strbuf_read(&sb_contents, fd, 256) < 0) {
571 myerr = errno;
572 close(fd);
573 goto out;
574 }
575 close(fd);
576 strbuf_rtrim(&sb_contents);
577 buf = sb_contents.buf;
578
579 ret = parse_loose_ref_contents(ref_store->repo->hash_algo, buf,
580 oid, referent, type, NULL, &myerr);
581
582 out:
583 if (ret && !myerr)
584 BUG("returning non-zero %d, should have set myerr!", ret);
585 *failure_errno = myerr;
586
587 strbuf_release(&sb_path);
588 strbuf_release(&sb_contents);
589 errno = 0;
590 return ret;
591 }
592
593 static int files_read_raw_ref(struct ref_store *ref_store, const char *refname,
594 struct object_id *oid, struct strbuf *referent,
595 unsigned int *type, int *failure_errno)
596 {
597 return read_ref_internal(ref_store, refname, oid, referent, type, failure_errno, 0);
598 }
599
600 static int files_read_symbolic_ref(struct ref_store *ref_store, const char *refname,
601 struct strbuf *referent)
602 {
603 struct object_id oid;
604 int failure_errno, ret;
605 unsigned int type;
606
607 ret = read_ref_internal(ref_store, refname, &oid, referent, &type, &failure_errno, 1);
608 if (!ret && !(type & REF_ISSYMREF))
609 return NOT_A_SYMREF;
610 return ret;
611 }
612
613 int parse_loose_ref_contents(const struct git_hash_algo *algop,
614 const char *buf, struct object_id *oid,
615 struct strbuf *referent, unsigned int *type,
616 const char **trailing, int *failure_errno)
617 {
618 const char *p;
619 if (skip_prefix(buf, "ref:", &buf)) {
620 while (isspace(*buf))
621 buf++;
622
623 strbuf_reset(referent);
624 strbuf_addstr(referent, buf);
625 *type |= REF_ISSYMREF;
626 return 0;
627 }
628
629 /*
630 * FETCH_HEAD has additional data after the sha.
631 */
632 if (parse_oid_hex_algop(buf, oid, &p, algop) ||
633 (*p != '\0' && !isspace(*p))) {
634 *type |= REF_ISBROKEN;
635 *failure_errno = EINVAL;
636 return -1;
637 }
638
639 if (trailing)
640 *trailing = p;
641
642 return 0;
643 }
644
645 static void unlock_ref(struct ref_lock *lock)
646 {
647 lock->count--;
648 if (!lock->count) {
649 rollback_lock_file(&lock->lk);
650 free(lock->ref_name);
651 free(lock);
652 }
653 }
654
655 /*
656 * Check if the transaction has another update with a case-insensitive refname
657 * match.
658 *
659 * If the update is part of the transaction, we only check up to that index.
660 * Further updates are expected to call this function to match previous indices.
661 */
662 static bool transaction_has_case_conflicting_update(struct ref_transaction *transaction,
663 struct ref_update *update)
664 {
665 for (size_t i = 0; i < transaction->nr; i++) {
666 if (transaction->updates[i] == update)
667 break;
668
669 if (!strcasecmp(transaction->updates[i]->refname, update->refname))
670 return true;
671 }
672 return false;
673 }
674
675 /*
676 * Lock refname, without following symrefs, and set *lock_p to point
677 * at a newly-allocated lock object. Fill in lock->old_oid, referent,
678 * and type similarly to read_raw_ref().
679 *
680 * The caller must verify that refname is a "safe" reference name (in
681 * the sense of refname_is_safe()) before calling this function.
682 *
683 * If the reference doesn't already exist, verify that refname doesn't
684 * have a D/F conflict with any existing references. extras and skip
685 * are passed to refs_verify_refname_available() for this check.
686 *
687 * If mustexist is not set and the reference is not found or is
688 * broken, lock the reference anyway but clear old_oid.
689 *
690 * Return 0 on success. On failure, write an error message to err and
691 * return REF_TRANSACTION_ERROR_NAME_CONFLICT or REF_TRANSACTION_ERROR_GENERIC.
692 *
693 * Implementation note: This function is basically
694 *
695 * lock reference
696 * read_raw_ref()
697 *
698 * but it includes a lot more code to
699 * - Deal with possible races with other processes
700 * - Avoid calling refs_verify_refname_available() when it can be
701 * avoided, namely if we were successfully able to read the ref
702 * - Generate informative error messages in the case of failure
703 */
704 static enum ref_transaction_error lock_raw_ref(struct files_ref_store *refs,
705 struct ref_transaction *transaction,
706 size_t update_idx,
707 int mustexist,
708 struct string_list *refnames_to_check,
709 struct ref_lock **lock_p,
710 struct strbuf *referent,
711 struct strbuf *err)
712 {
713 enum ref_transaction_error ret = REF_TRANSACTION_ERROR_GENERIC;
714 struct ref_update *update = transaction->updates[update_idx];
715 const struct string_list *extras = &transaction->refnames;
716 const char *refname = update->refname;
717 unsigned int *type = &update->type;
718 struct ref_lock *lock;
719 struct strbuf ref_file = STRBUF_INIT;
720 int attempts_remaining = 3;
721 int failure_errno;
722
723 assert(err);
724 files_assert_main_repository(refs, "lock_raw_ref");
725
726 *type = 0;
727
728 /* First lock the file so it can't change out from under us. */
729
730 *lock_p = CALLOC_ARRAY(lock, 1);
731
732 lock->ref_name = xstrdup(refname);
733 lock->count = 1;
734 files_ref_path(refs, &ref_file, refname);
735
736 retry:
737 switch (safe_create_leading_directories(the_repository, ref_file.buf)) {
738 case SCLD_OK:
739 break; /* success */
740 case SCLD_EXISTS:
741 /*
742 * Suppose refname is "refs/foo/bar". We just failed
743 * to create the containing directory, "refs/foo",
744 * because there was a non-directory in the way. This
745 * indicates a D/F conflict, probably because of
746 * another reference such as "refs/foo". There is no
747 * reason to expect this error to be transitory.
748 */
749 if (refs_verify_refname_available(&refs->base, refname,
750 extras, NULL, 0, err)) {
751 if (mustexist) {
752 /*
753 * To the user the relevant error is
754 * that the "mustexist" reference is
755 * missing:
756 */
757 strbuf_reset(err);
758 strbuf_addf(err, "unable to resolve reference '%s'",
759 refname);
760 ret = REF_TRANSACTION_ERROR_NONEXISTENT_REF;
761 } else {
762 /*
763 * The error message set by
764 * refs_verify_refname_available() is
765 * OK.
766 */
767 ret = REF_TRANSACTION_ERROR_NAME_CONFLICT;
768 }
769 } else {
770 /*
771 * The file that is in the way isn't a loose
772 * reference. Report it as a low-level
773 * failure.
774 */
775 strbuf_addf(err, "unable to create lock file %s.lock; "
776 "non-directory in the way",
777 ref_file.buf);
778 }
779 goto error_return;
780 case SCLD_VANISHED:
781 /* Maybe another process was tidying up. Try again. */
782 if (--attempts_remaining > 0)
783 goto retry;
784 /* fall through */
785 default:
786 strbuf_addf(err, "unable to create directory for %s",
787 ref_file.buf);
788 goto error_return;
789 }
790
791 if (hold_lock_file_for_update_timeout(
792 &lock->lk, ref_file.buf, LOCK_NO_DEREF,
793 get_files_ref_lock_timeout_ms()) < 0) {
794 int myerr = errno;
795 errno = 0;
796 if (myerr == ENOENT && --attempts_remaining > 0) {
797 /*
798 * Maybe somebody just deleted one of the
799 * directories leading to ref_file. Try
800 * again:
801 */
802 goto retry;
803 } else {
804 unable_to_lock_message(ref_file.buf, myerr, err);
805 if (myerr == EEXIST) {
806 if (ignore_case &&
807 transaction_has_case_conflicting_update(transaction, update)) {
808 /*
809 * In case-insensitive filesystems, ensure that conflicts within a
810 * given transaction are handled. Pre-existing refs on a
811 * case-insensitive system will be overridden without any issue.
812 */
813 ret = REF_TRANSACTION_ERROR_CASE_CONFLICT;
814 } else {
815 /*
816 * Pre-existing case-conflicting reference locks should also be
817 * specially categorized to avoid failing all batched updates.
818 */
819 ret = REF_TRANSACTION_ERROR_CREATE_EXISTS;
820 }
821 }
822
823 goto error_return;
824 }
825 }
826
827 /*
828 * Now we hold the lock and can read the reference without
829 * fear that its value will change.
830 */
831
832 if (files_read_raw_ref(&refs->base, refname, &lock->old_oid, referent,
833 type, &failure_errno)) {
834 struct string_list_item *item;
835
836 if (failure_errno == ENOENT) {
837 if (mustexist) {
838 /* Garden variety missing reference. */
839 strbuf_addf(err, "unable to resolve reference '%s'",
840 refname);
841 ret = REF_TRANSACTION_ERROR_NONEXISTENT_REF;
842 goto error_return;
843 } else {
844 /*
845 * Reference is missing, but that's OK. We
846 * know that there is not a conflict with
847 * another loose reference because
848 * (supposing that we are trying to lock
849 * reference "refs/foo/bar"):
850 *
851 * - We were successfully able to create
852 * the lockfile refs/foo/bar.lock, so we
853 * know there cannot be a loose reference
854 * named "refs/foo".
855 *
856 * - We got ENOENT and not EISDIR, so we
857 * know that there cannot be a loose
858 * reference named "refs/foo/bar/baz".
859 */
860 }
861 } else if (failure_errno == EISDIR) {
862 /*
863 * There is a directory in the way. It might have
864 * contained references that have been deleted. If
865 * we don't require that the reference already
866 * exists, try to remove the directory so that it
867 * doesn't cause trouble when we want to rename the
868 * lockfile into place later.
869 */
870 if (mustexist) {
871 /* Garden variety missing reference. */
872 strbuf_addf(err, "unable to resolve reference '%s'",
873 refname);
874 ret = REF_TRANSACTION_ERROR_NONEXISTENT_REF;
875 goto error_return;
876 } else if (remove_dir_recursively(&ref_file,
877 REMOVE_DIR_EMPTY_ONLY)) {
878 ret = REF_TRANSACTION_ERROR_NAME_CONFLICT;
879 if (refs_verify_refname_available(
880 &refs->base, refname,
881 extras, NULL, 0, err)) {
882 /*
883 * The error message set by
884 * verify_refname_available() is OK.
885 */
886 goto error_return;
887 } else {
888 /*
889 * Directory conflicts can occur if there
890 * is an existing lock file in the directory
891 * or if the filesystem is case-insensitive
892 * and the directory contains a valid reference
893 * but conflicts with the update.
894 */
895 strbuf_addf(err, "there is a non-empty directory '%s' "
896 "blocking reference '%s'",
897 ref_file.buf, refname);
898 goto error_return;
899 }
900 }
901 } else if (failure_errno == EINVAL && (*type & REF_ISBROKEN)) {
902 strbuf_addf(err, "unable to resolve reference '%s': "
903 "reference broken", refname);
904 goto error_return;
905 } else {
906 strbuf_addf(err, "unable to resolve reference '%s': %s",
907 refname, strerror(failure_errno));
908 goto error_return;
909 }
910
911 /*
912 * If the ref did not exist and we are creating it, we have to
913 * make sure there is no existing packed ref that conflicts
914 * with refname. This check is deferred so that we can batch it.
915 *
916 * For case-insensitive filesystems, we should also check for F/D
917 * conflicts between 'foo' and 'Foo/bar'. So let's lowercase
918 * the refname.
919 */
920 if (ignore_case) {
921 struct strbuf lower = STRBUF_INIT;
922
923 strbuf_addstr(&lower, refname);
924 strbuf_tolower(&lower);
925
926 item = string_list_append_nodup(refnames_to_check,
927 strbuf_detach(&lower, NULL));
928 } else {
929 item = string_list_append(refnames_to_check, refname);
930 }
931
932 item->util = xmalloc(sizeof(update_idx));
933 memcpy(item->util, &update_idx, sizeof(update_idx));
934 }
935
936 ret = 0;
937 goto out;
938
939 error_return:
940 unlock_ref(lock);
941 *lock_p = NULL;
942
943 out:
944 strbuf_release(&ref_file);
945 return ret;
946 }
947
948 struct files_ref_iterator {
949 struct ref_iterator base;
950
951 struct ref_iterator *iter0;
952 struct repository *repo;
953 unsigned int flags;
954 };
955
956 static int files_ref_iterator_advance(struct ref_iterator *ref_iterator)
957 {
958 struct files_ref_iterator *iter =
959 (struct files_ref_iterator *)ref_iterator;
960 int ok;
961
962 while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
963 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
964 parse_worktree_ref(iter->iter0->refname, NULL, NULL,
965 NULL) != REF_WORKTREE_CURRENT)
966 continue;
967
968 if ((iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS) &&
969 (iter->iter0->flags & REF_ISSYMREF) &&
970 (iter->iter0->flags & REF_ISBROKEN))
971 continue;
972
973 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
974 !ref_resolves_to_object(iter->iter0->refname,
975 iter->repo,
976 iter->iter0->oid,
977 iter->iter0->flags))
978 continue;
979
980 iter->base.refname = iter->iter0->refname;
981 iter->base.oid = iter->iter0->oid;
982 iter->base.flags = iter->iter0->flags;
983 iter->base.referent = iter->iter0->referent;
984
985 return ITER_OK;
986 }
987
988 return ok;
989 }
990
991 static int files_ref_iterator_seek(struct ref_iterator *ref_iterator,
992 const char *refname, unsigned int flags)
993 {
994 struct files_ref_iterator *iter =
995 (struct files_ref_iterator *)ref_iterator;
996 return ref_iterator_seek(iter->iter0, refname, flags);
997 }
998
999 static int files_ref_iterator_peel(struct ref_iterator *ref_iterator,
1000 struct object_id *peeled)
1001 {
1002 struct files_ref_iterator *iter =
1003 (struct files_ref_iterator *)ref_iterator;
1004
1005 return ref_iterator_peel(iter->iter0, peeled);
1006 }
1007
1008 static void files_ref_iterator_release(struct ref_iterator *ref_iterator)
1009 {
1010 struct files_ref_iterator *iter =
1011 (struct files_ref_iterator *)ref_iterator;
1012 ref_iterator_free(iter->iter0);
1013 }
1014
1015 static struct ref_iterator_vtable files_ref_iterator_vtable = {
1016 .advance = files_ref_iterator_advance,
1017 .seek = files_ref_iterator_seek,
1018 .peel = files_ref_iterator_peel,
1019 .release = files_ref_iterator_release,
1020 };
1021
1022 static struct ref_iterator *files_ref_iterator_begin(
1023 struct ref_store *ref_store,
1024 const char *prefix, const char **exclude_patterns,
1025 unsigned int flags)
1026 {
1027 struct files_ref_store *refs;
1028 struct ref_iterator *loose_iter, *packed_iter, *overlay_iter;
1029 struct files_ref_iterator *iter;
1030 struct ref_iterator *ref_iterator;
1031 unsigned int required_flags = REF_STORE_READ;
1032
1033 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
1034 required_flags |= REF_STORE_ODB;
1035
1036 refs = files_downcast(ref_store, required_flags, "ref_iterator_begin");
1037
1038 /*
1039 * We must make sure that all loose refs are read before
1040 * accessing the packed-refs file; this avoids a race
1041 * condition if loose refs are migrated to the packed-refs
1042 * file by a simultaneous process, but our in-memory view is
1043 * from before the migration. We ensure this as follows:
1044 * First, we call start the loose refs iteration with its
1045 * `prime_ref` argument set to true. This causes the loose
1046 * references in the subtree to be pre-read into the cache.
1047 * (If they've already been read, that's OK; we only need to
1048 * guarantee that they're read before the packed refs, not
1049 * *how much* before.) After that, we call
1050 * packed_ref_iterator_begin(), which internally checks
1051 * whether the packed-ref cache is up to date with what is on
1052 * disk, and re-reads it if not.
1053 */
1054
1055 loose_iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, flags),
1056 prefix, ref_store->repo, 1);
1057
1058 /*
1059 * The packed-refs file might contain broken references, for
1060 * example an old version of a reference that points at an
1061 * object that has since been garbage-collected. This is OK as
1062 * long as there is a corresponding loose reference that
1063 * overrides it, and we don't want to emit an error message in
1064 * this case. So ask the packed_ref_store for all of its
1065 * references, and (if needed) do our own check for broken
1066 * ones in files_ref_iterator_advance(), after we have merged
1067 * the packed and loose references.
1068 */
1069 packed_iter = refs_ref_iterator_begin(
1070 refs->packed_ref_store, prefix, exclude_patterns, 0,
1071 DO_FOR_EACH_INCLUDE_BROKEN);
1072
1073 overlay_iter = overlay_ref_iterator_begin(loose_iter, packed_iter);
1074
1075 CALLOC_ARRAY(iter, 1);
1076 ref_iterator = &iter->base;
1077 base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable);
1078 iter->iter0 = overlay_iter;
1079 iter->repo = ref_store->repo;
1080 iter->flags = flags;
1081
1082 return ref_iterator;
1083 }
1084
1085 /*
1086 * Callback function for raceproof_create_file(). This function is
1087 * expected to do something that makes dirname(path) permanent despite
1088 * the fact that other processes might be cleaning up empty
1089 * directories at the same time. Usually it will create a file named
1090 * path, but alternatively it could create another file in that
1091 * directory, or even chdir() into that directory. The function should
1092 * return 0 if the action was completed successfully. On error, it
1093 * should return a nonzero result and set errno.
1094 * raceproof_create_file() treats two errno values specially:
1095 *
1096 * - ENOENT -- dirname(path) does not exist. In this case,
1097 * raceproof_create_file() tries creating dirname(path)
1098 * (and any parent directories, if necessary) and calls
1099 * the function again.
1100 *
1101 * - EISDIR -- the file already exists and is a directory. In this
1102 * case, raceproof_create_file() removes the directory if
1103 * it is empty (and recursively any empty directories that
1104 * it contains) and calls the function again.
1105 *
1106 * Any other errno causes raceproof_create_file() to fail with the
1107 * callback's return value and errno.
1108 *
1109 * Obviously, this function should be OK with being called again if it
1110 * fails with ENOENT or EISDIR. In other scenarios it will not be
1111 * called again.
1112 */
1113 typedef int create_file_fn(const char *path, void *cb);
1114
1115 /*
1116 * Create a file in dirname(path) by calling fn, creating leading
1117 * directories if necessary. Retry a few times in case we are racing
1118 * with another process that is trying to clean up the directory that
1119 * contains path. See the documentation for create_file_fn for more
1120 * details.
1121 *
1122 * Return the value and set the errno that resulted from the most
1123 * recent call of fn. fn is always called at least once, and will be
1124 * called more than once if it returns ENOENT or EISDIR.
1125 */
1126 static int raceproof_create_file(const char *path, create_file_fn fn, void *cb)
1127 {
1128 /*
1129 * The number of times we will try to remove empty directories
1130 * in the way of path. This is only 1 because if another
1131 * process is racily creating directories that conflict with
1132 * us, we don't want to fight against them.
1133 */
1134 int remove_directories_remaining = 1;
1135
1136 /*
1137 * The number of times that we will try to create the
1138 * directories containing path. We are willing to attempt this
1139 * more than once, because another process could be trying to
1140 * clean up empty directories at the same time as we are
1141 * trying to create them.
1142 */
1143 int create_directories_remaining = 3;
1144
1145 /* A scratch copy of path, filled lazily if we need it: */
1146 struct strbuf path_copy = STRBUF_INIT;
1147
1148 int ret, save_errno;
1149
1150 /* Sanity check: */
1151 assert(*path);
1152
1153 retry_fn:
1154 ret = fn(path, cb);
1155 save_errno = errno;
1156 if (!ret)
1157 goto out;
1158
1159 if (errno == EISDIR && remove_directories_remaining-- > 0) {
1160 /*
1161 * A directory is in the way. Maybe it is empty; try
1162 * to remove it:
1163 */
1164 if (!path_copy.len)
1165 strbuf_addstr(&path_copy, path);
1166
1167 if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY))
1168 goto retry_fn;
1169 } else if (errno == ENOENT && create_directories_remaining-- > 0) {
1170 /*
1171 * Maybe the containing directory didn't exist, or
1172 * maybe it was just deleted by a process that is
1173 * racing with us to clean up empty directories. Try
1174 * to create it:
1175 */
1176 enum scld_error scld_result;
1177
1178 if (!path_copy.len)
1179 strbuf_addstr(&path_copy, path);
1180
1181 do {
1182 scld_result = safe_create_leading_directories(the_repository, path_copy.buf);
1183 if (scld_result == SCLD_OK)
1184 goto retry_fn;
1185 } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0);
1186 }
1187
1188 out:
1189 strbuf_release(&path_copy);
1190 errno = save_errno;
1191 return ret;
1192 }
1193
1194 static int remove_empty_directories(struct strbuf *path)
1195 {
1196 /*
1197 * we want to create a file but there is a directory there;
1198 * if that is an empty directory (or a directory that contains
1199 * only empty directories), remove them.
1200 */
1201 return remove_dir_recursively(path, REMOVE_DIR_EMPTY_ONLY);
1202 }
1203
1204 static int create_reflock(const char *path, void *cb)
1205 {
1206 struct lock_file *lk = cb;
1207
1208 return hold_lock_file_for_update_timeout(
1209 lk, path, LOCK_NO_DEREF,
1210 get_files_ref_lock_timeout_ms()) < 0 ? -1 : 0;
1211 }
1212
1213 /*
1214 * Locks a ref returning the lock on success and NULL on failure.
1215 */
1216 static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs,
1217 const char *refname,
1218 struct strbuf *err)
1219 {
1220 struct strbuf ref_file = STRBUF_INIT;
1221 struct ref_lock *lock;
1222
1223 files_assert_main_repository(refs, "lock_ref_oid_basic");
1224 assert(err);
1225
1226 CALLOC_ARRAY(lock, 1);
1227
1228 files_ref_path(refs, &ref_file, refname);
1229
1230 /*
1231 * If the ref did not exist and we are creating it, make sure
1232 * there is no existing packed ref whose name begins with our
1233 * refname, nor a packed ref whose name is a proper prefix of
1234 * our refname.
1235 */
1236 if (is_null_oid(&lock->old_oid) &&
1237 refs_verify_refname_available(refs->packed_ref_store, refname,
1238 NULL, NULL, 0, err))
1239 goto error_return;
1240
1241 lock->ref_name = xstrdup(refname);
1242 lock->count = 1;
1243
1244 if (raceproof_create_file(ref_file.buf, create_reflock, &lock->lk)) {
1245 unable_to_lock_message(ref_file.buf, errno, err);
1246 goto error_return;
1247 }
1248
1249 if (!refs_resolve_ref_unsafe(&refs->base, lock->ref_name, 0,
1250 &lock->old_oid, NULL))
1251 oidclr(&lock->old_oid, refs->base.repo->hash_algo);
1252 goto out;
1253
1254 error_return:
1255 unlock_ref(lock);
1256 lock = NULL;
1257
1258 out:
1259 strbuf_release(&ref_file);
1260 return lock;
1261 }
1262
1263 struct ref_to_prune {
1264 struct ref_to_prune *next;
1265 struct object_id oid;
1266 char name[FLEX_ARRAY];
1267 };
1268
1269 enum {
1270 REMOVE_EMPTY_PARENTS_REF = 0x01,
1271 REMOVE_EMPTY_PARENTS_REFLOG = 0x02
1272 };
1273
1274 /*
1275 * Remove empty parent directories associated with the specified
1276 * reference and/or its reflog, but spare [logs/]refs/ and immediate
1277 * subdirs. flags is a combination of REMOVE_EMPTY_PARENTS_REF and/or
1278 * REMOVE_EMPTY_PARENTS_REFLOG.
1279 */
1280 static void try_remove_empty_parents(struct files_ref_store *refs,
1281 const char *refname,
1282 unsigned int flags)
1283 {
1284 struct strbuf buf = STRBUF_INIT;
1285 struct strbuf sb = STRBUF_INIT;
1286 char *p, *q;
1287 int i;
1288
1289 strbuf_addstr(&buf, refname);
1290 p = buf.buf;
1291 for (i = 0; i < 2; i++) { /* refs/{heads,tags,...}/ */
1292 while (*p && *p != '/')
1293 p++;
1294 /* tolerate duplicate slashes; see check_refname_format() */
1295 while (*p == '/')
1296 p++;
1297 }
1298 q = buf.buf + buf.len;
1299 while (flags & (REMOVE_EMPTY_PARENTS_REF | REMOVE_EMPTY_PARENTS_REFLOG)) {
1300 while (q > p && *q != '/')
1301 q--;
1302 while (q > p && *(q-1) == '/')
1303 q--;
1304 if (q == p)
1305 break;
1306 strbuf_setlen(&buf, q - buf.buf);
1307
1308 strbuf_reset(&sb);
1309 files_ref_path(refs, &sb, buf.buf);
1310 if ((flags & REMOVE_EMPTY_PARENTS_REF) && rmdir(sb.buf))
1311 flags &= ~REMOVE_EMPTY_PARENTS_REF;
1312
1313 strbuf_reset(&sb);
1314 files_reflog_path(refs, &sb, buf.buf);
1315 if ((flags & REMOVE_EMPTY_PARENTS_REFLOG) && rmdir(sb.buf))
1316 flags &= ~REMOVE_EMPTY_PARENTS_REFLOG;
1317 }
1318 strbuf_release(&buf);
1319 strbuf_release(&sb);
1320 }
1321
1322 /* make sure nobody touched the ref, and unlink */
1323 static void prune_ref(struct files_ref_store *refs, struct ref_to_prune *r)
1324 {
1325 struct ref_transaction *transaction;
1326 struct strbuf err = STRBUF_INIT;
1327 int ret = -1;
1328
1329 if (check_refname_format(r->name, 0))
1330 return;
1331
1332 transaction = ref_store_transaction_begin(&refs->base, 0, &err);
1333 if (!transaction)
1334 goto cleanup;
1335 ref_transaction_add_update(
1336 transaction, r->name,
1337 REF_NO_DEREF | REF_HAVE_NEW | REF_HAVE_OLD | REF_IS_PRUNING,
1338 null_oid(the_hash_algo), &r->oid, NULL, NULL, NULL, NULL);
1339 if (ref_transaction_commit(transaction, &err))
1340 goto cleanup;
1341
1342 ret = 0;
1343
1344 cleanup:
1345 if (ret)
1346 error("%s", err.buf);
1347 strbuf_release(&err);
1348 ref_transaction_free(transaction);
1349 return;
1350 }
1351
1352 /*
1353 * Prune the loose versions of the references in the linked list
1354 * `*refs_to_prune`, freeing the entries in the list as we go.
1355 */
1356 static void prune_refs(struct files_ref_store *refs, struct ref_to_prune **refs_to_prune)
1357 {
1358 while (*refs_to_prune) {
1359 struct ref_to_prune *r = *refs_to_prune;
1360 *refs_to_prune = r->next;
1361 prune_ref(refs, r);
1362 free(r);
1363 }
1364 }
1365
1366 /*
1367 * Return true if the specified reference should be packed.
1368 */
1369 static int should_pack_ref(struct files_ref_store *refs,
1370 const char *refname,
1371 const struct object_id *oid, unsigned int ref_flags,
1372 struct pack_refs_opts *opts)
1373 {
1374 struct string_list_item *item;
1375
1376 /* Do not pack per-worktree refs: */
1377 if (parse_worktree_ref(refname, NULL, NULL, NULL) !=
1378 REF_WORKTREE_SHARED)
1379 return 0;
1380
1381 /* Do not pack symbolic refs: */
1382 if (ref_flags & REF_ISSYMREF)
1383 return 0;
1384
1385 /* Do not pack broken refs: */
1386 if (!ref_resolves_to_object(refname, refs->base.repo, oid, ref_flags))
1387 return 0;
1388
1389 if (ref_excluded(opts->exclusions, refname))
1390 return 0;
1391
1392 for_each_string_list_item(item, opts->includes)
1393 if (!wildmatch(item->string, refname, 0))
1394 return 1;
1395
1396 return 0;
1397 }
1398
1399 static int should_pack_refs(struct files_ref_store *refs,
1400 struct pack_refs_opts *opts)
1401 {
1402 struct ref_iterator *iter;
1403 size_t packed_size;
1404 size_t refcount = 0;
1405 size_t limit;
1406 int ret;
1407
1408 if (!(opts->flags & PACK_REFS_AUTO))
1409 return 1;
1410
1411 ret = packed_refs_size(refs->packed_ref_store, &packed_size);
1412 if (ret < 0)
1413 die("cannot determine packed-refs size");
1414
1415 /*
1416 * Packing loose references into the packed-refs file scales with the
1417 * number of references we're about to write. We thus decide whether we
1418 * repack refs by weighing the current size of the packed-refs file
1419 * against the number of loose references. This is done such that we do
1420 * not repack too often on repositories with a huge number of
1421 * references, where we can expect a lot of churn in the number of
1422 * references.
1423 *
1424 * As a heuristic, we repack if the number of loose references in the
1425 * repository exceeds `log2(nr_packed_refs) * 5`, where we estimate
1426 * `nr_packed_refs = packed_size / 100`, which scales as following:
1427 *
1428 * - 1kB ~ 10 packed refs: 16 refs
1429 * - 10kB ~ 100 packed refs: 33 refs
1430 * - 100kB ~ 1k packed refs: 49 refs
1431 * - 1MB ~ 10k packed refs: 66 refs
1432 * - 10MB ~ 100k packed refs: 82 refs
1433 * - 100MB ~ 1m packed refs: 99 refs
1434 *
1435 * We thus allow roughly 16 additional loose refs per factor of ten of
1436 * packed refs. This heuristic may be tweaked in the future, but should
1437 * serve as a sufficiently good first iteration.
1438 */
1439 limit = log2u(packed_size / 100) * 5;
1440 if (limit < 16)
1441 limit = 16;
1442
1443 iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, 0), NULL,
1444 refs->base.repo, 0);
1445 while ((ret = ref_iterator_advance(iter)) == ITER_OK) {
1446 if (should_pack_ref(refs, iter->refname, iter->oid,
1447 iter->flags, opts))
1448 refcount++;
1449 if (refcount >= limit) {
1450 ref_iterator_free(iter);
1451 return 1;
1452 }
1453 }
1454
1455 if (ret != ITER_DONE)
1456 die("error while iterating over references");
1457
1458 ref_iterator_free(iter);
1459 return 0;
1460 }
1461
1462 static int files_pack_refs(struct ref_store *ref_store,
1463 struct pack_refs_opts *opts)
1464 {
1465 struct files_ref_store *refs =
1466 files_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB,
1467 "pack_refs");
1468 struct ref_iterator *iter;
1469 int ok;
1470 struct ref_to_prune *refs_to_prune = NULL;
1471 struct strbuf err = STRBUF_INIT;
1472 struct ref_transaction *transaction;
1473
1474 if (!should_pack_refs(refs, opts))
1475 return 0;
1476
1477 transaction = ref_store_transaction_begin(refs->packed_ref_store,
1478 0, &err);
1479 if (!transaction)
1480 return -1;
1481
1482 packed_refs_lock(refs->packed_ref_store, LOCK_DIE_ON_ERROR, &err);
1483
1484 iter = cache_ref_iterator_begin(get_loose_ref_cache(refs, 0), NULL,
1485 refs->base.repo, 0);
1486 while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
1487 /*
1488 * If the loose reference can be packed, add an entry
1489 * in the packed ref cache. If the reference should be
1490 * pruned, also add it to refs_to_prune.
1491 */
1492 if (!should_pack_ref(refs, iter->refname, iter->oid, iter->flags, opts))
1493 continue;
1494
1495 /*
1496 * Add a reference creation for this reference to the
1497 * packed-refs transaction:
1498 */
1499 if (ref_transaction_update(transaction, iter->refname,
1500 iter->oid, NULL, NULL, NULL,
1501 REF_NO_DEREF, NULL, &err))
1502 die("failure preparing to create packed reference %s: %s",
1503 iter->refname, err.buf);
1504
1505 /* Schedule the loose reference for pruning if requested. */
1506 if ((opts->flags & PACK_REFS_PRUNE)) {
1507 struct ref_to_prune *n;
1508 FLEX_ALLOC_STR(n, name, iter->refname);
1509 oidcpy(&n->oid, iter->oid);
1510 n->next = refs_to_prune;
1511 refs_to_prune = n;
1512 }
1513 }
1514 if (ok != ITER_DONE)
1515 die("error while iterating over references");
1516
1517 if (ref_transaction_commit(transaction, &err))
1518 die("unable to write new packed-refs: %s", err.buf);
1519
1520 ref_transaction_free(transaction);
1521
1522 packed_refs_unlock(refs->packed_ref_store);
1523
1524 prune_refs(refs, &refs_to_prune);
1525 ref_iterator_free(iter);
1526 strbuf_release(&err);
1527 return 0;
1528 }
1529
1530 static int files_optimize(struct ref_store *ref_store, struct pack_refs_opts *opts)
1531 {
1532 /*
1533 * For the "files" backend, "optimizing" is the same as "packing".
1534 * So, we just call the existing worker function for packing.
1535 */
1536 return files_pack_refs(ref_store, opts);
1537 }
1538
1539 /*
1540 * People using contrib's git-new-workdir have .git/logs/refs ->
1541 * /some/other/path/.git/logs/refs, and that may live on another device.
1542 *
1543 * IOW, to avoid cross device rename errors, the temporary renamed log must
1544 * live into logs/refs.
1545 */
1546 #define TMP_RENAMED_LOG "refs/.tmp-renamed-log"
1547
1548 struct rename_cb {
1549 const char *tmp_renamed_log;
1550 int true_errno;
1551 };
1552
1553 static int rename_tmp_log_callback(const char *path, void *cb_data)
1554 {
1555 struct rename_cb *cb = cb_data;
1556
1557 if (rename(cb->tmp_renamed_log, path)) {
1558 /*
1559 * rename(a, b) when b is an existing directory ought
1560 * to result in ISDIR, but Solaris 5.8 gives ENOTDIR.
1561 * Sheesh. Record the true errno for error reporting,
1562 * but report EISDIR to raceproof_create_file() so
1563 * that it knows to retry.
1564 */
1565 cb->true_errno = errno;
1566 if (errno == ENOTDIR)
1567 errno = EISDIR;
1568 return -1;
1569 } else {
1570 return 0;
1571 }
1572 }
1573
1574 static int rename_tmp_log(struct files_ref_store *refs, const char *newrefname)
1575 {
1576 struct strbuf path = STRBUF_INIT;
1577 struct strbuf tmp = STRBUF_INIT;
1578 struct rename_cb cb;
1579 int ret;
1580
1581 files_reflog_path(refs, &path, newrefname);
1582 files_reflog_path(refs, &tmp, TMP_RENAMED_LOG);
1583 cb.tmp_renamed_log = tmp.buf;
1584 ret = raceproof_create_file(path.buf, rename_tmp_log_callback, &cb);
1585 if (ret) {
1586 if (errno == EISDIR)
1587 error("directory not empty: %s", path.buf);
1588 else
1589 error("unable to move logfile %s to %s: %s",
1590 tmp.buf, path.buf,
1591 strerror(cb.true_errno));
1592 }
1593
1594 strbuf_release(&path);
1595 strbuf_release(&tmp);
1596 return ret;
1597 }
1598
1599 static enum ref_transaction_error write_ref_to_lockfile(struct files_ref_store *refs,
1600 struct ref_lock *lock,
1601 const struct object_id *oid,
1602 int skip_oid_verification,
1603 struct strbuf *err);
1604 static int commit_ref_update(struct files_ref_store *refs,
1605 struct ref_lock *lock,
1606 const struct object_id *oid, const char *logmsg,
1607 int flags,
1608 struct strbuf *err);
1609
1610 /*
1611 * Emit a better error message than lockfile.c's
1612 * unable_to_lock_message() would in case there is a D/F conflict with
1613 * another existing reference. If there would be a conflict, emit an error
1614 * message and return false; otherwise, return true.
1615 *
1616 * Note that this function is not safe against all races with other
1617 * processes, and that's not its job. We'll emit a more verbose error on D/f
1618 * conflicts if we get past it into lock_ref_oid_basic().
1619 */
1620 static int refs_rename_ref_available(struct ref_store *refs,
1621 const char *old_refname,
1622 const char *new_refname)
1623 {
1624 struct string_list skip = STRING_LIST_INIT_NODUP;
1625 struct strbuf err = STRBUF_INIT;
1626 int ok;
1627
1628 string_list_insert(&skip, old_refname);
1629 ok = !refs_verify_refname_available(refs, new_refname,
1630 NULL, &skip, 0, &err);
1631 if (!ok)
1632 error("%s", err.buf);
1633
1634 string_list_clear(&skip, 0);
1635 strbuf_release(&err);
1636 return ok;
1637 }
1638
1639 static int files_copy_or_rename_ref(struct ref_store *ref_store,
1640 const char *oldrefname, const char *newrefname,
1641 const char *logmsg, int copy)
1642 {
1643 struct files_ref_store *refs =
1644 files_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1645 struct object_id orig_oid;
1646 int flag = 0, logmoved = 0;
1647 struct ref_lock *lock;
1648 struct stat loginfo;
1649 struct strbuf sb_oldref = STRBUF_INIT;
1650 struct strbuf sb_newref = STRBUF_INIT;
1651 struct strbuf tmp_renamed_log = STRBUF_INIT;
1652 int log, ret;
1653 struct strbuf err = STRBUF_INIT;
1654
1655 files_reflog_path(refs, &sb_oldref, oldrefname);
1656 files_reflog_path(refs, &sb_newref, newrefname);
1657 files_reflog_path(refs, &tmp_renamed_log, TMP_RENAMED_LOG);
1658
1659 log = !lstat(sb_oldref.buf, &loginfo);
1660 if (log && S_ISLNK(loginfo.st_mode)) {
1661 ret = error("reflog for %s is a symlink", oldrefname);
1662 goto out;
1663 }
1664
1665 if (!refs_resolve_ref_unsafe(&refs->base, oldrefname,
1666 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
1667 &orig_oid, &flag)) {
1668 ret = error("refname %s not found", oldrefname);
1669 goto out;
1670 }
1671
1672 if (flag & REF_ISSYMREF) {
1673 if (copy)
1674 ret = error("refname %s is a symbolic ref, copying it is not supported",
1675 oldrefname);
1676 else
1677 ret = error("refname %s is a symbolic ref, renaming it is not supported",
1678 oldrefname);
1679 goto out;
1680 }
1681 if (!refs_rename_ref_available(&refs->base, oldrefname, newrefname)) {
1682 ret = 1;
1683 goto out;
1684 }
1685
1686 if (!copy && log && rename(sb_oldref.buf, tmp_renamed_log.buf)) {
1687 ret = error("unable to move logfile logs/%s to logs/"TMP_RENAMED_LOG": %s",
1688 oldrefname, strerror(errno));
1689 goto out;
1690 }
1691
1692 if (copy && log && copy_file(tmp_renamed_log.buf, sb_oldref.buf, 0644)) {
1693 ret = error("unable to copy logfile logs/%s to logs/"TMP_RENAMED_LOG": %s",
1694 oldrefname, strerror(errno));
1695 goto out;
1696 }
1697
1698 if (!copy && refs_delete_ref(&refs->base, logmsg, oldrefname,
1699 &orig_oid, REF_NO_DEREF)) {
1700 error("unable to delete old %s", oldrefname);
1701 goto rollback;
1702 }
1703
1704 /*
1705 * Since we are doing a shallow lookup, oid is not the
1706 * correct value to pass to delete_ref as old_oid. But that
1707 * doesn't matter, because an old_oid check wouldn't add to
1708 * the safety anyway; we want to delete the reference whatever
1709 * its current value.
1710 */
1711 if (!copy && refs_resolve_ref_unsafe(&refs->base, newrefname,
1712 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
1713 NULL, NULL) &&
1714 refs_delete_ref(&refs->base, NULL, newrefname,
1715 NULL, REF_NO_DEREF)) {
1716 if (errno == EISDIR) {
1717 struct strbuf path = STRBUF_INIT;
1718 int result;
1719
1720 files_ref_path(refs, &path, newrefname);
1721 result = remove_empty_directories(&path);
1722 strbuf_release(&path);
1723
1724 if (result) {
1725 error("Directory not empty: %s", newrefname);
1726 goto rollback;
1727 }
1728 } else {
1729 error("unable to delete existing %s", newrefname);
1730 goto rollback;
1731 }
1732 }
1733
1734 if (log && rename_tmp_log(refs, newrefname))
1735 goto rollback;
1736
1737 logmoved = log;
1738
1739 lock = lock_ref_oid_basic(refs, newrefname, &err);
1740 if (!lock) {
1741 if (copy)
1742 error("unable to copy '%s' to '%s': %s", oldrefname, newrefname, err.buf);
1743 else
1744 error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf);
1745 strbuf_release(&err);
1746 goto rollback;
1747 }
1748 oidcpy(&lock->old_oid, &orig_oid);
1749
1750 if (write_ref_to_lockfile(refs, lock, &orig_oid, 0, &err) ||
1751 commit_ref_update(refs, lock, &orig_oid, logmsg, 0, &err)) {
1752 error("unable to write current sha1 into %s: %s", newrefname, err.buf);
1753 strbuf_release(&err);
1754 goto rollback;
1755 }
1756
1757 ret = 0;
1758 goto out;
1759
1760 rollback:
1761 lock = lock_ref_oid_basic(refs, oldrefname, &err);
1762 if (!lock) {
1763 error("unable to lock %s for rollback: %s", oldrefname, err.buf);
1764 strbuf_release(&err);
1765 goto rollbacklog;
1766 }
1767
1768 if (write_ref_to_lockfile(refs, lock, &orig_oid, 0, &err) ||
1769 commit_ref_update(refs, lock, &orig_oid, NULL, REF_SKIP_CREATE_REFLOG, &err)) {
1770 error("unable to write current sha1 into %s: %s", oldrefname, err.buf);
1771 strbuf_release(&err);
1772 }
1773
1774 rollbacklog:
1775 if (logmoved && rename(sb_newref.buf, sb_oldref.buf))
1776 error("unable to restore logfile %s from %s: %s",
1777 oldrefname, newrefname, strerror(errno));
1778 if (!logmoved && log &&
1779 rename(tmp_renamed_log.buf, sb_oldref.buf))
1780 error("unable to restore logfile %s from logs/"TMP_RENAMED_LOG": %s",
1781 oldrefname, strerror(errno));
1782 ret = 1;
1783 out:
1784 strbuf_release(&sb_newref);
1785 strbuf_release(&sb_oldref);
1786 strbuf_release(&tmp_renamed_log);
1787
1788 return ret;
1789 }
1790
1791 static int files_rename_ref(struct ref_store *ref_store,
1792 const char *oldrefname, const char *newrefname,
1793 const char *logmsg)
1794 {
1795 return files_copy_or_rename_ref(ref_store, oldrefname,
1796 newrefname, logmsg, 0);
1797 }
1798
1799 static int files_copy_ref(struct ref_store *ref_store,
1800 const char *oldrefname, const char *newrefname,
1801 const char *logmsg)
1802 {
1803 return files_copy_or_rename_ref(ref_store, oldrefname,
1804 newrefname, logmsg, 1);
1805 }
1806
1807 static int close_ref_gently(struct ref_lock *lock)
1808 {
1809 if (close_lock_file_gently(&lock->lk))
1810 return -1;
1811 return 0;
1812 }
1813
1814 static int commit_ref(struct ref_lock *lock)
1815 {
1816 char *path = get_locked_file_path(&lock->lk);
1817 struct stat st;
1818
1819 if (!lstat(path, &st) && S_ISDIR(st.st_mode)) {
1820 /*
1821 * There is a directory at the path we want to rename
1822 * the lockfile to. Hopefully it is empty; try to
1823 * delete it.
1824 */
1825 size_t len = strlen(path);
1826 struct strbuf sb_path = STRBUF_INIT;
1827
1828 strbuf_attach(&sb_path, path, len, len);
1829
1830 /*
1831 * If this fails, commit_lock_file() will also fail
1832 * and will report the problem.
1833 */
1834 remove_empty_directories(&sb_path);
1835 strbuf_release(&sb_path);
1836 } else {
1837 free(path);
1838 }
1839
1840 if (commit_lock_file(&lock->lk))
1841 return -1;
1842 return 0;
1843 }
1844
1845 static int open_or_create_logfile(const char *path, void *cb)
1846 {
1847 int *fd = cb;
1848
1849 *fd = open(path, O_APPEND | O_WRONLY | O_CREAT, 0666);
1850 return (*fd < 0) ? -1 : 0;
1851 }
1852
1853 /*
1854 * Create a reflog for a ref. If force_create = 0, only create the
1855 * reflog for certain refs (those for which should_autocreate_reflog
1856 * returns non-zero). Otherwise, create it regardless of the reference
1857 * name. If the logfile already existed or was created, return 0 and
1858 * set *logfd to the file descriptor opened for appending to the file.
1859 * If no logfile exists and we decided not to create one, return 0 and
1860 * set *logfd to -1. On failure, fill in *err, set *logfd to -1, and
1861 * return -1.
1862 */
1863 static int log_ref_setup(struct files_ref_store *refs,
1864 const char *refname, int force_create,
1865 int *logfd, struct strbuf *err)
1866 {
1867 enum log_refs_config log_refs_cfg = refs->log_all_ref_updates;
1868 struct strbuf logfile_sb = STRBUF_INIT;
1869 char *logfile;
1870
1871 if (log_refs_cfg == LOG_REFS_UNSET)
1872 log_refs_cfg = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
1873
1874 files_reflog_path(refs, &logfile_sb, refname);
1875 logfile = strbuf_detach(&logfile_sb, NULL);
1876
1877 if (force_create || should_autocreate_reflog(log_refs_cfg, refname)) {
1878 if (raceproof_create_file(logfile, open_or_create_logfile, logfd)) {
1879 if (errno == ENOENT)
1880 strbuf_addf(err, "unable to create directory for '%s': "
1881 "%s", logfile, strerror(errno));
1882 else if (errno == EISDIR)
1883 strbuf_addf(err, "there are still logs under '%s'",
1884 logfile);
1885 else
1886 strbuf_addf(err, "unable to append to '%s': %s",
1887 logfile, strerror(errno));
1888
1889 goto error;
1890 }
1891 } else {
1892 *logfd = open(logfile, O_APPEND | O_WRONLY);
1893 if (*logfd < 0) {
1894 if (errno == ENOENT || errno == EISDIR) {
1895 /*
1896 * The logfile doesn't already exist,
1897 * but that is not an error; it only
1898 * means that we won't write log
1899 * entries to it.
1900 */
1901 ;
1902 } else {
1903 strbuf_addf(err, "unable to append to '%s': %s",
1904 logfile, strerror(errno));
1905 goto error;
1906 }
1907 }
1908 }
1909
1910 if (*logfd >= 0)
1911 adjust_shared_perm(the_repository, logfile);
1912
1913 free(logfile);
1914 return 0;
1915
1916 error:
1917 free(logfile);
1918 return -1;
1919 }
1920
1921 static int files_create_reflog(struct ref_store *ref_store, const char *refname,
1922 struct strbuf *err)
1923 {
1924 struct files_ref_store *refs =
1925 files_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1926 int fd;
1927
1928 if (log_ref_setup(refs, refname, 1, &fd, err))
1929 return -1;
1930
1931 if (fd >= 0)
1932 close(fd);
1933
1934 return 0;
1935 }
1936
1937 static int log_ref_write_fd(int fd, const struct object_id *old_oid,
1938 const struct object_id *new_oid,
1939 const char *committer, const char *msg)
1940 {
1941 struct strbuf sb = STRBUF_INIT;
1942 int ret = 0;
1943
1944 if (!committer)
1945 committer = git_committer_info(0);
1946
1947 strbuf_addf(&sb, "%s %s %s", oid_to_hex(old_oid), oid_to_hex(new_oid), committer);
1948 if (msg && *msg) {
1949 strbuf_addch(&sb, '\t');
1950 strbuf_addstr(&sb, msg);
1951 }
1952 strbuf_addch(&sb, '\n');
1953 if (write_in_full(fd, sb.buf, sb.len) < 0)
1954 ret = -1;
1955 strbuf_release(&sb);
1956 return ret;
1957 }
1958
1959 static int files_log_ref_write(struct files_ref_store *refs,
1960 const char *refname,
1961 const struct object_id *old_oid,
1962 const struct object_id *new_oid,
1963 const char *committer_info, const char *msg,
1964 int flags, struct strbuf *err)
1965 {
1966 int logfd, result;
1967
1968 if (flags & REF_SKIP_CREATE_REFLOG)
1969 return 0;
1970
1971 result = log_ref_setup(refs, refname,
1972 flags & REF_FORCE_CREATE_REFLOG,
1973 &logfd, err);
1974
1975 if (result)
1976 return result;
1977
1978 if (logfd < 0)
1979 return 0;
1980 result = log_ref_write_fd(logfd, old_oid, new_oid, committer_info, msg);
1981 if (result) {
1982 struct strbuf sb = STRBUF_INIT;
1983 int save_errno = errno;
1984
1985 files_reflog_path(refs, &sb, refname);
1986 strbuf_addf(err, "unable to append to '%s': %s",
1987 sb.buf, strerror(save_errno));
1988 strbuf_release(&sb);
1989 close(logfd);
1990 return -1;
1991 }
1992 if (close(logfd)) {
1993 struct strbuf sb = STRBUF_INIT;
1994 int save_errno = errno;
1995
1996 files_reflog_path(refs, &sb, refname);
1997 strbuf_addf(err, "unable to append to '%s': %s",
1998 sb.buf, strerror(save_errno));
1999 strbuf_release(&sb);
2000 return -1;
2001 }
2002 return 0;
2003 }
2004
2005 /*
2006 * Write oid into the open lockfile, then close the lockfile. On
2007 * errors, rollback the lockfile, fill in *err and return -1.
2008 */
2009 static enum ref_transaction_error write_ref_to_lockfile(struct files_ref_store *refs,
2010 struct ref_lock *lock,
2011 const struct object_id *oid,
2012 int skip_oid_verification,
2013 struct strbuf *err)
2014 {
2015 static char term = '\n';
2016 struct object *o;
2017 int fd;
2018
2019 if (!skip_oid_verification) {
2020 o = parse_object(refs->base.repo, oid);
2021 if (!o) {
2022 strbuf_addf(
2023 err,
2024 "trying to write ref '%s' with nonexistent object %s",
2025 lock->ref_name, oid_to_hex(oid));
2026 unlock_ref(lock);
2027 return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE;
2028 }
2029 if (o->type != OBJ_COMMIT && is_branch(lock->ref_name)) {
2030 strbuf_addf(
2031 err,
2032 "trying to write non-commit object %s to branch '%s'",
2033 oid_to_hex(oid), lock->ref_name);
2034 unlock_ref(lock);
2035 return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE;
2036 }
2037 }
2038 fd = get_lock_file_fd(&lock->lk);
2039 if (write_in_full(fd, oid_to_hex(oid), refs->base.repo->hash_algo->hexsz) < 0 ||
2040 write_in_full(fd, &term, 1) < 0 ||
2041 fsync_component(FSYNC_COMPONENT_REFERENCE, get_lock_file_fd(&lock->lk)) < 0 ||
2042 close_ref_gently(lock) < 0) {
2043 strbuf_addf(err,
2044 "couldn't write '%s'", get_lock_file_path(&lock->lk));
2045 unlock_ref(lock);
2046 return REF_TRANSACTION_ERROR_GENERIC;
2047 }
2048 return 0;
2049 }
2050
2051 /*
2052 * Commit a change to a loose reference that has already been written
2053 * to the loose reference lockfile. Also update the reflogs if
2054 * necessary, using the specified lockmsg (which can be NULL).
2055 */
2056 static int commit_ref_update(struct files_ref_store *refs,
2057 struct ref_lock *lock,
2058 const struct object_id *oid, const char *logmsg,
2059 int flags,
2060 struct strbuf *err)
2061 {
2062 files_assert_main_repository(refs, "commit_ref_update");
2063
2064 clear_loose_ref_cache(refs);
2065 if (files_log_ref_write(refs, lock->ref_name, &lock->old_oid, oid, NULL,
2066 logmsg, flags, err)) {
2067 char *old_msg = strbuf_detach(err, NULL);
2068 strbuf_addf(err, "cannot update the ref '%s': %s",
2069 lock->ref_name, old_msg);
2070 free(old_msg);
2071 unlock_ref(lock);
2072 return -1;
2073 }
2074
2075 if (strcmp(lock->ref_name, "HEAD") != 0) {
2076 /*
2077 * Special hack: If a branch is updated directly and HEAD
2078 * points to it (may happen on the remote side of a push
2079 * for example) then logically the HEAD reflog should be
2080 * updated too.
2081 * A generic solution implies reverse symref information,
2082 * but finding all symrefs pointing to the given branch
2083 * would be rather costly for this rare event (the direct
2084 * update of a branch) to be worth it. So let's cheat and
2085 * check with HEAD only which should cover 99% of all usage
2086 * scenarios (even 100% of the default ones).
2087 */
2088 int head_flag;
2089 const char *head_ref;
2090
2091 head_ref = refs_resolve_ref_unsafe(&refs->base, "HEAD",
2092 RESOLVE_REF_READING,
2093 NULL, &head_flag);
2094 if (head_ref && (head_flag & REF_ISSYMREF) &&
2095 !strcmp(head_ref, lock->ref_name)) {
2096 struct strbuf log_err = STRBUF_INIT;
2097 if (files_log_ref_write(refs, "HEAD", &lock->old_oid,
2098 oid, NULL, logmsg, flags,
2099 &log_err)) {
2100 error("%s", log_err.buf);
2101 strbuf_release(&log_err);
2102 }
2103 }
2104 }
2105
2106 if (commit_ref(lock)) {
2107 strbuf_addf(err, "couldn't set '%s'", lock->ref_name);
2108 unlock_ref(lock);
2109 return -1;
2110 }
2111
2112 unlock_ref(lock);
2113 return 0;
2114 }
2115
2116 #if defined(NO_SYMLINK_HEAD) || defined(WITH_BREAKING_CHANGES)
2117 #define create_ref_symlink(a, b) (-1)
2118 #else
2119 static int create_ref_symlink(struct ref_lock *lock, const char *target)
2120 {
2121 static int warn_once = 1;
2122 char *ref_path;
2123 int ret = -1;
2124
2125 ref_path = get_locked_file_path(&lock->lk);
2126 unlink(ref_path);
2127 ret = symlink(target, ref_path);
2128 free(ref_path);
2129
2130 if (ret)
2131 fprintf(stderr, "no symlink - falling back to symbolic ref\n");
2132
2133 if (warn_once)
2134 warning(_("'core.preferSymlinkRefs=true' is nominated for removal.\n"
2135 "hint: The use of symbolic links for symbolic refs is deprecated\n"
2136 "hint: and will be removed in Git 3.0. The configuration that\n"
2137 "hint: tells Git to use them is thus going away. You can unset\n"
2138 "hint: it with:\n"
2139 "hint:\n"
2140 "hint:\tgit config unset core.preferSymlinkRefs\n"
2141 "hint:\n"
2142 "hint: Git will then use the textual symref format instead."));
2143 warn_once = 0;
2144
2145 return ret;
2146 }
2147 #endif
2148
2149 static int create_symref_lock(struct ref_lock *lock, const char *target,
2150 struct strbuf *err)
2151 {
2152 if (!fdopen_lock_file(&lock->lk, "w")) {
2153 strbuf_addf(err, "unable to fdopen %s: %s",
2154 get_lock_file_path(&lock->lk), strerror(errno));
2155 return -1;
2156 }
2157
2158 if (fprintf(get_lock_file_fp(&lock->lk), "ref: %s\n", target) < 0) {
2159 strbuf_addf(err, "unable to write to %s: %s",
2160 get_lock_file_path(&lock->lk), strerror(errno));
2161 return -1;
2162 }
2163
2164 return 0;
2165 }
2166
2167 static int files_reflog_exists(struct ref_store *ref_store,
2168 const char *refname)
2169 {
2170 struct files_ref_store *refs =
2171 files_downcast(ref_store, REF_STORE_READ, "reflog_exists");
2172 struct strbuf sb = STRBUF_INIT;
2173 struct stat st;
2174 int ret;
2175
2176 files_reflog_path(refs, &sb, refname);
2177 ret = !lstat(sb.buf, &st) && S_ISREG(st.st_mode);
2178 strbuf_release(&sb);
2179 return ret;
2180 }
2181
2182 static int files_delete_reflog(struct ref_store *ref_store,
2183 const char *refname)
2184 {
2185 struct files_ref_store *refs =
2186 files_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
2187 struct strbuf sb = STRBUF_INIT;
2188 int ret;
2189
2190 files_reflog_path(refs, &sb, refname);
2191 ret = remove_path(sb.buf);
2192 strbuf_release(&sb);
2193 return ret;
2194 }
2195
2196 static int show_one_reflog_ent(struct files_ref_store *refs,
2197 const char *refname,
2198 struct strbuf *sb,
2199 each_reflog_ent_fn fn, void *cb_data)
2200 {
2201 struct object_id ooid, noid;
2202 char *email_end, *message;
2203 timestamp_t timestamp;
2204 int tz;
2205 const char *p = sb->buf;
2206
2207 /* old SP new SP name <email> SP time TAB msg LF */
2208 if (!sb->len || sb->buf[sb->len - 1] != '\n' ||
2209 parse_oid_hex_algop(p, &ooid, &p, refs->base.repo->hash_algo) || *p++ != ' ' ||
2210 parse_oid_hex_algop(p, &noid, &p, refs->base.repo->hash_algo) || *p++ != ' ' ||
2211 !(email_end = strchr(p, '>')) ||
2212 email_end[1] != ' ' ||
2213 !(timestamp = parse_timestamp(email_end + 2, &message, 10)) ||
2214 !message || message[0] != ' ' ||
2215 (message[1] != '+' && message[1] != '-') ||
2216 !isdigit(message[2]) || !isdigit(message[3]) ||
2217 !isdigit(message[4]) || !isdigit(message[5]))
2218 return 0; /* corrupt? */
2219 email_end[1] = '\0';
2220 tz = strtol(message + 1, NULL, 10);
2221 if (message[6] != '\t')
2222 message += 6;
2223 else
2224 message += 7;
2225 return fn(refname, &ooid, &noid, p, timestamp, tz, message, cb_data);
2226 }
2227
2228 static char *find_beginning_of_line(char *bob, char *scan)
2229 {
2230 while (bob < scan && *(--scan) != '\n')
2231 ; /* keep scanning backwards */
2232 /*
2233 * Return either beginning of the buffer, or LF at the end of
2234 * the previous line.
2235 */
2236 return scan;
2237 }
2238
2239 static int files_for_each_reflog_ent_reverse(struct ref_store *ref_store,
2240 const char *refname,
2241 each_reflog_ent_fn fn,
2242 void *cb_data)
2243 {
2244 struct files_ref_store *refs =
2245 files_downcast(ref_store, REF_STORE_READ,
2246 "for_each_reflog_ent_reverse");
2247 struct strbuf sb = STRBUF_INIT;
2248 FILE *logfp;
2249 long pos;
2250 int ret = 0, at_tail = 1;
2251
2252 files_reflog_path(refs, &sb, refname);
2253 logfp = fopen(sb.buf, "r");
2254 strbuf_release(&sb);
2255 if (!logfp)
2256 return -1;
2257
2258 /* Jump to the end */
2259 if (fseek(logfp, 0, SEEK_END) < 0)
2260 ret = error("cannot seek back reflog for %s: %s",
2261 refname, strerror(errno));
2262 pos = ftell(logfp);
2263 while (!ret && 0 < pos) {
2264 int cnt;
2265 size_t nread;
2266 char buf[BUFSIZ];
2267 char *endp, *scanp;
2268
2269 /* Fill next block from the end */
2270 cnt = (sizeof(buf) < pos) ? sizeof(buf) : pos;
2271 if (fseek(logfp, pos - cnt, SEEK_SET)) {
2272 ret = error("cannot seek back reflog for %s: %s",
2273 refname, strerror(errno));
2274 break;
2275 }
2276 nread = fread(buf, cnt, 1, logfp);
2277 if (nread != 1) {
2278 ret = error("cannot read %d bytes from reflog for %s: %s",
2279 cnt, refname, strerror(errno));
2280 break;
2281 }
2282 pos -= cnt;
2283
2284 scanp = endp = buf + cnt;
2285 if (at_tail && scanp[-1] == '\n')
2286 /* Looking at the final LF at the end of the file */
2287 scanp--;
2288 at_tail = 0;
2289
2290 while (buf < scanp) {
2291 /*
2292 * terminating LF of the previous line, or the beginning
2293 * of the buffer.
2294 */
2295 char *bp;
2296
2297 bp = find_beginning_of_line(buf, scanp);
2298
2299 if (*bp == '\n') {
2300 /*
2301 * The newline is the end of the previous line,
2302 * so we know we have complete line starting
2303 * at (bp + 1). Prefix it onto any prior data
2304 * we collected for the line and process it.
2305 */
2306 strbuf_splice(&sb, 0, 0, bp + 1, endp - (bp + 1));
2307 scanp = bp;
2308 endp = bp + 1;
2309 ret = show_one_reflog_ent(refs, refname, &sb, fn, cb_data);
2310 strbuf_reset(&sb);
2311 if (ret)
2312 break;
2313 } else if (!pos) {
2314 /*
2315 * We are at the start of the buffer, and the
2316 * start of the file; there is no previous
2317 * line, and we have everything for this one.
2318 * Process it, and we can end the loop.
2319 */
2320 strbuf_splice(&sb, 0, 0, buf, endp - buf);
2321 ret = show_one_reflog_ent(refs, refname, &sb, fn, cb_data);
2322 strbuf_reset(&sb);
2323 break;
2324 }
2325
2326 if (bp == buf) {
2327 /*
2328 * We are at the start of the buffer, and there
2329 * is more file to read backwards. Which means
2330 * we are in the middle of a line. Note that we
2331 * may get here even if *bp was a newline; that
2332 * just means we are at the exact end of the
2333 * previous line, rather than some spot in the
2334 * middle.
2335 *
2336 * Save away what we have to be combined with
2337 * the data from the next read.
2338 */
2339 strbuf_splice(&sb, 0, 0, buf, endp - buf);
2340 break;
2341 }
2342 }
2343
2344 }
2345 if (!ret && sb.len)
2346 BUG("reverse reflog parser had leftover data");
2347
2348 fclose(logfp);
2349 strbuf_release(&sb);
2350 return ret;
2351 }
2352
2353 static int files_for_each_reflog_ent(struct ref_store *ref_store,
2354 const char *refname,
2355 each_reflog_ent_fn fn, void *cb_data)
2356 {
2357 struct files_ref_store *refs =
2358 files_downcast(ref_store, REF_STORE_READ,
2359 "for_each_reflog_ent");
2360 FILE *logfp;
2361 struct strbuf sb = STRBUF_INIT;
2362 int ret = 0;
2363
2364 files_reflog_path(refs, &sb, refname);
2365 logfp = fopen(sb.buf, "r");
2366 strbuf_release(&sb);
2367 if (!logfp)
2368 return -1;
2369
2370 while (!ret && !strbuf_getwholeline(&sb, logfp, '\n'))
2371 ret = show_one_reflog_ent(refs, refname, &sb, fn, cb_data);
2372 fclose(logfp);
2373 strbuf_release(&sb);
2374 return ret;
2375 }
2376
2377 struct files_reflog_iterator {
2378 struct ref_iterator base;
2379 struct ref_store *ref_store;
2380 struct dir_iterator *dir_iterator;
2381 };
2382
2383 static int files_reflog_iterator_advance(struct ref_iterator *ref_iterator)
2384 {
2385 struct files_reflog_iterator *iter =
2386 (struct files_reflog_iterator *)ref_iterator;
2387 struct dir_iterator *diter = iter->dir_iterator;
2388 int ok;
2389
2390 while ((ok = dir_iterator_advance(diter)) == ITER_OK) {
2391 if (!S_ISREG(diter->st.st_mode))
2392 continue;
2393 if (check_refname_format(diter->basename,
2394 REFNAME_ALLOW_ONELEVEL))
2395 continue;
2396
2397 iter->base.refname = diter->relative_path;
2398 return ITER_OK;
2399 }
2400
2401 return ok;
2402 }
2403
2404 static int files_reflog_iterator_seek(struct ref_iterator *ref_iterator UNUSED,
2405 const char *refname UNUSED,
2406 unsigned int flags UNUSED)
2407 {
2408 BUG("ref_iterator_seek() called for reflog_iterator");
2409 }
2410
2411 static int files_reflog_iterator_peel(struct ref_iterator *ref_iterator UNUSED,
2412 struct object_id *peeled UNUSED)
2413 {
2414 BUG("ref_iterator_peel() called for reflog_iterator");
2415 }
2416
2417 static void files_reflog_iterator_release(struct ref_iterator *ref_iterator)
2418 {
2419 struct files_reflog_iterator *iter =
2420 (struct files_reflog_iterator *)ref_iterator;
2421 dir_iterator_free(iter->dir_iterator);
2422 }
2423
2424 static struct ref_iterator_vtable files_reflog_iterator_vtable = {
2425 .advance = files_reflog_iterator_advance,
2426 .seek = files_reflog_iterator_seek,
2427 .peel = files_reflog_iterator_peel,
2428 .release = files_reflog_iterator_release,
2429 };
2430
2431 static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
2432 const char *gitdir)
2433 {
2434 struct dir_iterator *diter;
2435 struct files_reflog_iterator *iter;
2436 struct ref_iterator *ref_iterator;
2437 struct strbuf sb = STRBUF_INIT;
2438
2439 strbuf_addf(&sb, "%s/logs", gitdir);
2440
2441 diter = dir_iterator_begin(sb.buf, DIR_ITERATOR_SORTED);
2442 if (!diter) {
2443 strbuf_release(&sb);
2444 return empty_ref_iterator_begin();
2445 }
2446
2447 CALLOC_ARRAY(iter, 1);
2448 ref_iterator = &iter->base;
2449
2450 base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);
2451 iter->dir_iterator = diter;
2452 iter->ref_store = ref_store;
2453 strbuf_release(&sb);
2454
2455 return ref_iterator;
2456 }
2457
2458 static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)
2459 {
2460 struct files_ref_store *refs =
2461 files_downcast(ref_store, REF_STORE_READ,
2462 "reflog_iterator_begin");
2463
2464 if (!strcmp(refs->base.gitdir, refs->gitcommondir)) {
2465 return reflog_iterator_begin(ref_store, refs->gitcommondir);
2466 } else {
2467 return merge_ref_iterator_begin(
2468 reflog_iterator_begin(ref_store, refs->base.gitdir),
2469 reflog_iterator_begin(ref_store, refs->gitcommondir),
2470 ref_iterator_select, refs);
2471 }
2472 }
2473
2474 /*
2475 * If update is a direct update of head_ref (the reference pointed to
2476 * by HEAD), then add an extra REF_LOG_ONLY update for HEAD.
2477 */
2478 static enum ref_transaction_error split_head_update(struct ref_update *update,
2479 struct ref_transaction *transaction,
2480 const char *head_ref,
2481 struct strbuf *err)
2482 {
2483 struct ref_update *new_update;
2484
2485 if ((update->flags & REF_LOG_ONLY) ||
2486 (update->flags & REF_SKIP_CREATE_REFLOG) ||
2487 (update->flags & REF_IS_PRUNING) ||
2488 (update->flags & REF_UPDATE_VIA_HEAD))
2489 return 0;
2490
2491 if (strcmp(update->refname, head_ref))
2492 return 0;
2493
2494 /*
2495 * First make sure that HEAD is not already in the
2496 * transaction. This check is O(lg N) in the transaction
2497 * size, but it happens at most once per transaction.
2498 */
2499 if (string_list_has_string(&transaction->refnames, "HEAD")) {
2500 /* An entry already existed */
2501 strbuf_addf(err,
2502 "multiple updates for 'HEAD' (including one "
2503 "via its referent '%s') are not allowed",
2504 update->refname);
2505 return REF_TRANSACTION_ERROR_NAME_CONFLICT;
2506 }
2507
2508 new_update = ref_transaction_add_update(
2509 transaction, "HEAD",
2510 update->flags | REF_LOG_ONLY | REF_NO_DEREF | REF_LOG_VIA_SPLIT,
2511 &update->new_oid, &update->old_oid,
2512 NULL, NULL, update->committer_info, update->msg);
2513 new_update->parent_update = update;
2514
2515 /*
2516 * Add "HEAD". This insertion is O(N) in the transaction
2517 * size, but it happens at most once per transaction.
2518 * Add new_update->refname instead of a literal "HEAD".
2519 */
2520 if (strcmp(new_update->refname, "HEAD"))
2521 BUG("%s unexpectedly not 'HEAD'", new_update->refname);
2522
2523 return 0;
2524 }
2525
2526 /*
2527 * update is for a symref that points at referent and doesn't have
2528 * REF_NO_DEREF set. Split it into two updates:
2529 * - The original update, but with REF_LOG_ONLY and REF_NO_DEREF set
2530 * - A new, separate update for the referent reference
2531 * Note that the new update will itself be subject to splitting when
2532 * the iteration gets to it.
2533 */
2534 static enum ref_transaction_error split_symref_update(struct ref_update *update,
2535 const char *referent,
2536 struct ref_transaction *transaction,
2537 struct strbuf *err)
2538 {
2539 struct ref_update *new_update;
2540 unsigned int new_flags;
2541
2542 /*
2543 * First make sure that referent is not already in the
2544 * transaction. This check is O(lg N) in the transaction
2545 * size, but it happens at most once per symref in a
2546 * transaction.
2547 */
2548 if (string_list_has_string(&transaction->refnames, referent)) {
2549 /* An entry already exists */
2550 strbuf_addf(err,
2551 "multiple updates for '%s' (including one "
2552 "via symref '%s') are not allowed",
2553 referent, update->refname);
2554 return REF_TRANSACTION_ERROR_NAME_CONFLICT;
2555 }
2556
2557 new_flags = update->flags;
2558 if (!strcmp(update->refname, "HEAD")) {
2559 /*
2560 * Record that the new update came via HEAD, so that
2561 * when we process it, split_head_update() doesn't try
2562 * to add another reflog update for HEAD. Note that
2563 * this bit will be propagated if the new_update
2564 * itself needs to be split.
2565 */
2566 new_flags |= REF_UPDATE_VIA_HEAD;
2567 }
2568
2569 new_update = ref_transaction_add_update(
2570 transaction, referent, new_flags,
2571 update->new_target ? NULL : &update->new_oid,
2572 update->old_target ? NULL : &update->old_oid,
2573 update->new_target, update->old_target, NULL,
2574 update->msg);
2575
2576 new_update->parent_update = update;
2577
2578 /*
2579 * Change the symbolic ref update to log only. Also, it
2580 * doesn't need to check its old OID value, as that will be
2581 * done when new_update is processed.
2582 */
2583 update->flags |= REF_LOG_ONLY | REF_NO_DEREF;
2584
2585 return 0;
2586 }
2587
2588 /*
2589 * Check whether the REF_HAVE_OLD and old_oid values stored in update
2590 * are consistent with oid, which is the reference's current value. If
2591 * everything is OK, return 0; otherwise, write an error message to
2592 * err and return -1.
2593 */
2594 static enum ref_transaction_error check_old_oid(struct ref_update *update,
2595 struct object_id *oid,
2596 struct strbuf *referent,
2597 struct strbuf *err)
2598 {
2599 if (update->flags & REF_LOG_ONLY ||
2600 !(update->flags & REF_HAVE_OLD))
2601 return 0;
2602
2603 if (oideq(oid, &update->old_oid)) {
2604 /*
2605 * Normally matching the expected old oid is enough. Either we
2606 * found the ref at the expected state, or we are creating and
2607 * expect the null oid (and likewise found nothing).
2608 *
2609 * But there is one exception for the null oid: if we found a
2610 * symref pointing to nothing we'll also get the null oid. In
2611 * regular recursive mode, that's good (we'll write to what the
2612 * symref points to, which doesn't exist). But in no-deref
2613 * mode, it means we'll clobber the symref, even though the
2614 * caller asked for this to be a creation event. So flag
2615 * that case to preserve the dangling symref.
2616 */
2617 if ((update->flags & REF_NO_DEREF) && referent->len &&
2618 is_null_oid(oid)) {
2619 strbuf_addf(err, "cannot lock ref '%s': "
2620 "dangling symref already exists",
2621 ref_update_original_update_refname(update));
2622 return REF_TRANSACTION_ERROR_CREATE_EXISTS;
2623 }
2624 return 0;
2625 }
2626
2627 if (is_null_oid(&update->old_oid)) {
2628 strbuf_addf(err, "cannot lock ref '%s': "
2629 "reference already exists",
2630 ref_update_original_update_refname(update));
2631 return REF_TRANSACTION_ERROR_CREATE_EXISTS;
2632 } else if (is_null_oid(oid)) {
2633 strbuf_addf(err, "cannot lock ref '%s': "
2634 "reference is missing but expected %s",
2635 ref_update_original_update_refname(update),
2636 oid_to_hex(&update->old_oid));
2637 return REF_TRANSACTION_ERROR_NONEXISTENT_REF;
2638 }
2639
2640 strbuf_addf(err, "cannot lock ref '%s': is at %s but expected %s",
2641 ref_update_original_update_refname(update), oid_to_hex(oid),
2642 oid_to_hex(&update->old_oid));
2643
2644 return REF_TRANSACTION_ERROR_INCORRECT_OLD_VALUE;
2645 }
2646
2647 struct files_transaction_backend_data {
2648 struct ref_transaction *packed_transaction;
2649 int packed_refs_locked;
2650 struct strmap ref_locks;
2651 };
2652
2653 /*
2654 * Prepare for carrying out update:
2655 * - Lock the reference referred to by update.
2656 * - Read the reference under lock.
2657 * - Check that its old OID value (if specified) is correct, and in
2658 * any case record it in update->lock->old_oid for later use when
2659 * writing the reflog.
2660 * - If it is a symref update without REF_NO_DEREF, split it up into a
2661 * REF_LOG_ONLY update of the symref and add a separate update for
2662 * the referent to transaction.
2663 * - If it is an update of head_ref, add a corresponding REF_LOG_ONLY
2664 * update of HEAD.
2665 */
2666 static enum ref_transaction_error lock_ref_for_update(struct files_ref_store *refs,
2667 struct ref_update *update,
2668 size_t update_idx,
2669 struct ref_transaction *transaction,
2670 const char *head_ref,
2671 struct string_list *refnames_to_check,
2672 struct strbuf *err)
2673 {
2674 struct strbuf referent = STRBUF_INIT;
2675 int mustexist = ref_update_expects_existing_old_ref(update);
2676 struct files_transaction_backend_data *backend_data;
2677 enum ref_transaction_error ret = 0;
2678 struct ref_lock *lock;
2679
2680 files_assert_main_repository(refs, "lock_ref_for_update");
2681
2682 backend_data = transaction->backend_data;
2683
2684 if ((update->flags & REF_HAVE_NEW) && ref_update_has_null_new_value(update))
2685 update->flags |= REF_DELETING;
2686
2687 if (head_ref) {
2688 ret = split_head_update(update, transaction, head_ref, err);
2689 if (ret)
2690 goto out;
2691 }
2692
2693 lock = strmap_get(&backend_data->ref_locks, update->refname);
2694 if (lock) {
2695 lock->count++;
2696 } else {
2697 ret = lock_raw_ref(refs, transaction, update_idx, mustexist,
2698 refnames_to_check, &lock, &referent, err);
2699 if (ret) {
2700 char *reason;
2701
2702 reason = strbuf_detach(err, NULL);
2703 strbuf_addf(err, "cannot lock ref '%s': %s",
2704 ref_update_original_update_refname(update), reason);
2705 free(reason);
2706 goto out;
2707 }
2708
2709 strmap_put(&backend_data->ref_locks, update->refname, lock);
2710 }
2711
2712 update->backend_data = lock;
2713
2714 if (update->flags & REF_LOG_VIA_SPLIT) {
2715 struct ref_lock *parent_lock;
2716
2717 if (!update->parent_update)
2718 BUG("split update without a parent");
2719
2720 parent_lock = update->parent_update->backend_data;
2721
2722 /*
2723 * Check that "HEAD" didn't racily change since we have looked
2724 * it up. If it did we must refuse to write the reflog entry.
2725 *
2726 * Note that this does not catch all races: if "HEAD" was
2727 * racily changed to point to one of the refs part of the
2728 * transaction then we would miss writing the split reflog
2729 * entry for "HEAD".
2730 */
2731 if (!(update->type & REF_ISSYMREF) ||
2732 strcmp(update->parent_update->refname, referent.buf)) {
2733 strbuf_addstr(err, "HEAD has been racily updated");
2734 ret = REF_TRANSACTION_ERROR_GENERIC;
2735 goto out;
2736 }
2737
2738 if (update->flags & REF_HAVE_OLD) {
2739 oidcpy(&lock->old_oid, &update->old_oid);
2740 } else {
2741 oidcpy(&lock->old_oid, &parent_lock->old_oid);
2742 }
2743 } else if (update->type & REF_ISSYMREF) {
2744 if (update->flags & REF_NO_DEREF) {
2745 /*
2746 * We won't be reading the referent as part of
2747 * the transaction, so we have to read it here
2748 * to record and possibly check old_oid:
2749 */
2750 if (!refs_resolve_ref_unsafe(&refs->base,
2751 referent.buf, 0,
2752 &lock->old_oid, NULL)) {
2753 if (update->flags & REF_HAVE_OLD) {
2754 strbuf_addf(err, "cannot lock ref '%s': "
2755 "error reading reference",
2756 ref_update_original_update_refname(update));
2757 ret = REF_TRANSACTION_ERROR_GENERIC;
2758 goto out;
2759 }
2760 }
2761
2762 if (update->old_target)
2763 ret = ref_update_check_old_target(referent.buf, update, err);
2764 else
2765 ret = check_old_oid(update, &lock->old_oid,
2766 &referent, err);
2767 if (ret)
2768 goto out;
2769 } else {
2770 /*
2771 * Create a new update for the reference this
2772 * symref is pointing at. Also, we will record
2773 * and verify old_oid for this update as part
2774 * of processing the split-off update, so we
2775 * don't have to do it here.
2776 */
2777 ret = split_symref_update(update, referent.buf,
2778 transaction, err);
2779 if (ret)
2780 goto out;
2781 }
2782 } else {
2783 struct ref_update *parent_update;
2784
2785 /*
2786 * Even if the ref is a regular ref, if `old_target` is set, we
2787 * fail with an error.
2788 */
2789 if (update->old_target) {
2790 strbuf_addf(err, _("cannot lock ref '%s': "
2791 "expected symref with target '%s': "
2792 "but is a regular ref"),
2793 ref_update_original_update_refname(update),
2794 update->old_target);
2795 ret = REF_TRANSACTION_ERROR_EXPECTED_SYMREF;
2796 goto out;
2797 } else {
2798 ret = check_old_oid(update, &lock->old_oid,
2799 &referent, err);
2800 if (ret) {
2801 goto out;
2802 }
2803 }
2804
2805 /*
2806 * If this update is happening indirectly because of a
2807 * symref update, record the old OID in the parent
2808 * update:
2809 */
2810 for (parent_update = update->parent_update;
2811 parent_update;
2812 parent_update = parent_update->parent_update) {
2813 struct ref_lock *parent_lock = parent_update->backend_data;
2814 oidcpy(&parent_lock->old_oid, &lock->old_oid);
2815 }
2816 }
2817
2818 if (update->new_target && !(update->flags & REF_LOG_ONLY)) {
2819 if (create_symref_lock(lock, update->new_target, err)) {
2820 ret = REF_TRANSACTION_ERROR_GENERIC;
2821 goto out;
2822 }
2823
2824 if (close_ref_gently(lock)) {
2825 strbuf_addf(err, "couldn't close '%s.lock'",
2826 update->refname);
2827 ret = REF_TRANSACTION_ERROR_GENERIC;
2828 goto out;
2829 }
2830
2831 /*
2832 * Once we have created the symref lock, the commit
2833 * phase of the transaction only needs to commit the lock.
2834 */
2835 update->flags |= REF_NEEDS_COMMIT;
2836 } else if ((update->flags & REF_HAVE_NEW) &&
2837 !(update->flags & REF_DELETING) &&
2838 !(update->flags & REF_LOG_ONLY)) {
2839 if (!(update->type & REF_ISSYMREF) &&
2840 oideq(&lock->old_oid, &update->new_oid)) {
2841 /*
2842 * The reference already has the desired
2843 * value, so we don't need to write it.
2844 */
2845 } else {
2846 ret = write_ref_to_lockfile(
2847 refs, lock, &update->new_oid,
2848 update->flags & REF_SKIP_OID_VERIFICATION,
2849 err);
2850 if (ret) {
2851 char *write_err = strbuf_detach(err, NULL);
2852
2853 /*
2854 * The lock was freed upon failure of
2855 * write_ref_to_lockfile():
2856 */
2857 update->backend_data = NULL;
2858 strbuf_addf(err,
2859 "cannot update ref '%s': %s",
2860 update->refname, write_err);
2861 free(write_err);
2862 goto out;
2863 } else {
2864 update->flags |= REF_NEEDS_COMMIT;
2865 }
2866 }
2867 }
2868 if (!(update->flags & REF_NEEDS_COMMIT)) {
2869 /*
2870 * We didn't call write_ref_to_lockfile(), so
2871 * the lockfile is still open. Close it to
2872 * free up the file descriptor:
2873 */
2874 if (close_ref_gently(lock)) {
2875 strbuf_addf(err, "couldn't close '%s.lock'",
2876 update->refname);
2877 ret = REF_TRANSACTION_ERROR_GENERIC;
2878 goto out;
2879 }
2880 }
2881
2882 out:
2883 strbuf_release(&referent);
2884 return ret;
2885 }
2886
2887 /*
2888 * Unlock any references in `transaction` that are still locked, and
2889 * mark the transaction closed.
2890 */
2891 static void files_transaction_cleanup(struct files_ref_store *refs,
2892 struct ref_transaction *transaction)
2893 {
2894 size_t i;
2895 struct files_transaction_backend_data *backend_data =
2896 transaction->backend_data;
2897 struct strbuf err = STRBUF_INIT;
2898
2899 for (i = 0; i < transaction->nr; i++) {
2900 struct ref_update *update = transaction->updates[i];
2901 struct ref_lock *lock = update->backend_data;
2902
2903 if (lock) {
2904 unlock_ref(lock);
2905 try_remove_empty_parents(refs, update->refname,
2906 REMOVE_EMPTY_PARENTS_REF);
2907 update->backend_data = NULL;
2908 }
2909 }
2910
2911 if (backend_data) {
2912 if (backend_data->packed_transaction &&
2913 ref_transaction_abort(backend_data->packed_transaction, &err)) {
2914 error("error aborting transaction: %s", err.buf);
2915 strbuf_release(&err);
2916 }
2917
2918 if (backend_data->packed_refs_locked)
2919 packed_refs_unlock(refs->packed_ref_store);
2920
2921 strmap_clear(&backend_data->ref_locks, 0);
2922
2923 free(backend_data);
2924 }
2925
2926 transaction->state = REF_TRANSACTION_CLOSED;
2927 }
2928
2929 static int files_transaction_prepare(struct ref_store *ref_store,
2930 struct ref_transaction *transaction,
2931 struct strbuf *err)
2932 {
2933 struct files_ref_store *refs =
2934 files_downcast(ref_store, REF_STORE_WRITE,
2935 "ref_transaction_prepare");
2936 size_t i;
2937 int ret = 0;
2938 struct string_list refnames_to_check = STRING_LIST_INIT_DUP;
2939 char *head_ref = NULL;
2940 int head_type;
2941 struct files_transaction_backend_data *backend_data;
2942 struct ref_transaction *packed_transaction = NULL;
2943
2944 assert(err);
2945
2946 if (transaction->flags & REF_TRANSACTION_FLAG_INITIAL)
2947 goto cleanup;
2948 if (!transaction->nr)
2949 goto cleanup;
2950
2951 CALLOC_ARRAY(backend_data, 1);
2952 strmap_init(&backend_data->ref_locks);
2953 transaction->backend_data = backend_data;
2954
2955 /*
2956 * Fail if any of the updates use REF_IS_PRUNING without REF_NO_DEREF.
2957 */
2958 for (i = 0; i < transaction->nr; i++) {
2959 struct ref_update *update = transaction->updates[i];
2960
2961 if ((update->flags & REF_IS_PRUNING) &&
2962 !(update->flags & REF_NO_DEREF))
2963 BUG("REF_IS_PRUNING set without REF_NO_DEREF");
2964 }
2965
2966 /*
2967 * Special hack: If a branch is updated directly and HEAD
2968 * points to it (may happen on the remote side of a push
2969 * for example) then logically the HEAD reflog should be
2970 * updated too.
2971 *
2972 * A generic solution would require reverse symref lookups,
2973 * but finding all symrefs pointing to a given branch would be
2974 * rather costly for this rare event (the direct update of a
2975 * branch) to be worth it. So let's cheat and check with HEAD
2976 * only, which should cover 99% of all usage scenarios (even
2977 * 100% of the default ones).
2978 *
2979 * So if HEAD is a symbolic reference, then record the name of
2980 * the reference that it points to. If we see an update of
2981 * head_ref within the transaction, then split_head_update()
2982 * arranges for the reflog of HEAD to be updated, too.
2983 */
2984 head_ref = refs_resolve_refdup(ref_store, "HEAD",
2985 RESOLVE_REF_NO_RECURSE,
2986 NULL, &head_type);
2987
2988 if (head_ref && !(head_type & REF_ISSYMREF)) {
2989 FREE_AND_NULL(head_ref);
2990 }
2991
2992 /*
2993 * Acquire all locks, verify old values if provided, check
2994 * that new values are valid, and write new values to the
2995 * lockfiles, ready to be activated. Only keep one lockfile
2996 * open at a time to avoid running out of file descriptors.
2997 * Note that lock_ref_for_update() might append more updates
2998 * to the transaction.
2999 */
3000 for (i = 0; i < transaction->nr; i++) {
3001 struct ref_update *update = transaction->updates[i];
3002
3003 ret = lock_ref_for_update(refs, update, i, transaction,
3004 head_ref, &refnames_to_check,
3005 err);
3006 if (ret) {
3007 if (ref_transaction_maybe_set_rejected(transaction, i, ret)) {
3008 strbuf_reset(err);
3009 ret = 0;
3010
3011 continue;
3012 }
3013 goto cleanup;
3014 }
3015
3016 if (update->flags & REF_DELETING &&
3017 !(update->flags & REF_LOG_ONLY) &&
3018 !(update->flags & REF_IS_PRUNING)) {
3019 /*
3020 * This reference has to be deleted from
3021 * packed-refs if it exists there.
3022 */
3023 if (!packed_transaction) {
3024 packed_transaction = ref_store_transaction_begin(
3025 refs->packed_ref_store,
3026 transaction->flags, err);
3027 if (!packed_transaction) {
3028 ret = REF_TRANSACTION_ERROR_GENERIC;
3029 goto cleanup;
3030 }
3031
3032 backend_data->packed_transaction =
3033 packed_transaction;
3034 }
3035
3036 ref_transaction_add_update(
3037 packed_transaction, update->refname,
3038 REF_HAVE_NEW | REF_NO_DEREF,
3039 &update->new_oid, NULL,
3040 NULL, NULL, NULL, NULL);
3041 }
3042 }
3043
3044 /*
3045 * Verify that none of the loose reference that we're about to write
3046 * conflict with any existing packed references. Ideally, we'd do this
3047 * check after the packed-refs are locked so that the file cannot
3048 * change underneath our feet. But introducing such a lock now would
3049 * probably do more harm than good as users rely on there not being a
3050 * global lock with the "files" backend.
3051 *
3052 * Another alternative would be to do the check after the (optional)
3053 * lock, but that would extend the time we spend in the globally-locked
3054 * state.
3055 *
3056 * So instead, we accept the race for now.
3057 */
3058 if (refs_verify_refnames_available(refs->packed_ref_store, &refnames_to_check,
3059 &transaction->refnames, NULL, transaction,
3060 0, err)) {
3061 ret = REF_TRANSACTION_ERROR_NAME_CONFLICT;
3062 goto cleanup;
3063 }
3064
3065 if (packed_transaction) {
3066 if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
3067 ret = REF_TRANSACTION_ERROR_GENERIC;
3068 goto cleanup;
3069 }
3070 backend_data->packed_refs_locked = 1;
3071
3072 if (is_packed_transaction_needed(refs->packed_ref_store,
3073 packed_transaction)) {
3074 ret = ref_transaction_prepare(packed_transaction, err);
3075 /*
3076 * A failure during the prepare step will abort
3077 * itself, but not free. Do that now, and disconnect
3078 * from the files_transaction so it does not try to
3079 * abort us when we hit the cleanup code below.
3080 */
3081 if (ret) {
3082 ref_transaction_free(packed_transaction);
3083 backend_data->packed_transaction = NULL;
3084 }
3085 } else {
3086 /*
3087 * We can skip rewriting the `packed-refs`
3088 * file. But we do need to leave it locked, so
3089 * that somebody else doesn't pack a reference
3090 * that we are trying to delete.
3091 *
3092 * We need to disconnect our transaction from
3093 * backend_data, since the abort (whether successful or
3094 * not) will free it.
3095 */
3096 backend_data->packed_transaction = NULL;
3097 if (ref_transaction_abort(packed_transaction, err)) {
3098 ret = REF_TRANSACTION_ERROR_GENERIC;
3099 goto cleanup;
3100 }
3101 }
3102 }
3103
3104 cleanup:
3105 free(head_ref);
3106 string_list_clear(&refnames_to_check, 1);
3107
3108 if (ret)
3109 files_transaction_cleanup(refs, transaction);
3110 else
3111 transaction->state = REF_TRANSACTION_PREPARED;
3112
3113 return ret;
3114 }
3115
3116 static int parse_and_write_reflog(struct files_ref_store *refs,
3117 struct ref_update *update,
3118 struct ref_lock *lock,
3119 struct strbuf *err)
3120 {
3121 struct object_id *old_oid = &lock->old_oid;
3122
3123 if (update->flags & REF_LOG_USE_PROVIDED_OIDS) {
3124 if (!(update->flags & REF_HAVE_OLD) ||
3125 !(update->flags & REF_HAVE_NEW) ||
3126 !(update->flags & REF_LOG_ONLY)) {
3127 strbuf_addf(err, _("trying to write reflog for '%s' "
3128 "with incomplete values"), update->refname);
3129 return REF_TRANSACTION_ERROR_GENERIC;
3130 }
3131
3132 old_oid = &update->old_oid;
3133 }
3134
3135 if (update->new_target) {
3136 /*
3137 * We want to get the resolved OID for the target, to ensure
3138 * that the correct value is added to the reflog.
3139 */
3140 if (!refs_resolve_ref_unsafe(&refs->base, update->new_target,
3141 RESOLVE_REF_READING,
3142 &update->new_oid, NULL)) {
3143 /*
3144 * TODO: currently we skip creating reflogs for dangling
3145 * symref updates. It would be nice to capture this as
3146 * zero oid updates however.
3147 */
3148 return 0;
3149 }
3150 }
3151
3152 if (files_log_ref_write(refs, lock->ref_name, old_oid,
3153 &update->new_oid, update->committer_info,
3154 update->msg, update->flags, err)) {
3155 char *old_msg = strbuf_detach(err, NULL);
3156
3157 strbuf_addf(err, "cannot update the ref '%s': %s",
3158 lock->ref_name, old_msg);
3159 free(old_msg);
3160 unlock_ref(lock);
3161 update->backend_data = NULL;
3162 return -1;
3163 }
3164
3165 return 0;
3166 }
3167
3168 static int ref_present(const char *refname, const char *referent UNUSED,
3169 const struct object_id *oid UNUSED,
3170 int flags UNUSED,
3171 void *cb_data)
3172 {
3173 struct string_list *affected_refnames = cb_data;
3174
3175 return string_list_has_string(affected_refnames, refname);
3176 }
3177
3178 static int files_transaction_finish_initial(struct files_ref_store *refs,
3179 struct ref_transaction *transaction,
3180 struct strbuf *err)
3181 {
3182 size_t i;
3183 int ret = 0;
3184 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
3185 struct string_list refnames_to_check = STRING_LIST_INIT_NODUP;
3186 struct ref_transaction *packed_transaction = NULL;
3187 struct ref_transaction *loose_transaction = NULL;
3188
3189 assert(err);
3190
3191 if (transaction->state != REF_TRANSACTION_PREPARED)
3192 BUG("commit called for transaction that is not prepared");
3193
3194 /*
3195 * It's really undefined to call this function in an active
3196 * repository or when there are existing references: we are
3197 * only locking and changing packed-refs, so (1) any
3198 * simultaneous processes might try to change a reference at
3199 * the same time we do, and (2) any existing loose versions of
3200 * the references that we are setting would have precedence
3201 * over our values. But some remote helpers create the remote
3202 * "HEAD" and "master" branches before calling this function,
3203 * so here we really only check that none of the references
3204 * that we are creating already exists.
3205 */
3206 if (refs_for_each_rawref(&refs->base, ref_present,
3207 &transaction->refnames))
3208 BUG("initial ref transaction called with existing refs");
3209
3210 packed_transaction = ref_store_transaction_begin(refs->packed_ref_store,
3211 transaction->flags, err);
3212 if (!packed_transaction) {
3213 ret = REF_TRANSACTION_ERROR_GENERIC;
3214 goto cleanup;
3215 }
3216
3217 for (i = 0; i < transaction->nr; i++) {
3218 struct ref_update *update = transaction->updates[i];
3219
3220 if (!(update->flags & REF_LOG_ONLY) &&
3221 (update->flags & REF_HAVE_OLD) &&
3222 !is_null_oid(&update->old_oid))
3223 BUG("initial ref transaction with old_sha1 set");
3224
3225 string_list_append(&refnames_to_check, update->refname);
3226
3227 /*
3228 * packed-refs don't support symbolic refs, root refs and reflogs,
3229 * so we have to queue these references via the loose transaction.
3230 */
3231 if (update->new_target ||
3232 is_root_ref(update->refname) ||
3233 (update->flags & REF_LOG_ONLY)) {
3234 if (!loose_transaction) {
3235 loose_transaction = ref_store_transaction_begin(&refs->base, 0, err);
3236 if (!loose_transaction) {
3237 ret = REF_TRANSACTION_ERROR_GENERIC;
3238 goto cleanup;
3239 }
3240 }
3241
3242 if (update->flags & REF_LOG_ONLY)
3243 ref_transaction_add_update(loose_transaction, update->refname,
3244 update->flags, &update->new_oid,
3245 &update->old_oid, NULL, NULL,
3246 update->committer_info, update->msg);
3247 else
3248 ref_transaction_add_update(loose_transaction, update->refname,
3249 update->flags & ~REF_HAVE_OLD,
3250 update->new_target ? NULL : &update->new_oid, NULL,
3251 update->new_target, NULL, update->committer_info,
3252 NULL);
3253 } else {
3254 ref_transaction_add_update(packed_transaction, update->refname,
3255 update->flags & ~REF_HAVE_OLD,
3256 &update->new_oid, &update->old_oid,
3257 NULL, NULL, update->committer_info, NULL);
3258 }
3259 }
3260
3261 if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
3262 ret = REF_TRANSACTION_ERROR_GENERIC;
3263 goto cleanup;
3264 }
3265
3266 if (refs_verify_refnames_available(&refs->base, &refnames_to_check,
3267 &affected_refnames, NULL, transaction,
3268 1, err)) {
3269 packed_refs_unlock(refs->packed_ref_store);
3270 ret = REF_TRANSACTION_ERROR_NAME_CONFLICT;
3271 goto cleanup;
3272 }
3273
3274 if (ref_transaction_commit(packed_transaction, err)) {
3275 ret = REF_TRANSACTION_ERROR_GENERIC;
3276 goto cleanup;
3277 }
3278 packed_refs_unlock(refs->packed_ref_store);
3279
3280 if (loose_transaction) {
3281 if (ref_transaction_prepare(loose_transaction, err) ||
3282 ref_transaction_commit(loose_transaction, err)) {
3283 ret = REF_TRANSACTION_ERROR_GENERIC;
3284 goto cleanup;
3285 }
3286 }
3287
3288 cleanup:
3289 if (loose_transaction)
3290 ref_transaction_free(loose_transaction);
3291 if (packed_transaction)
3292 ref_transaction_free(packed_transaction);
3293 transaction->state = REF_TRANSACTION_CLOSED;
3294 string_list_clear(&affected_refnames, 0);
3295 string_list_clear(&refnames_to_check, 0);
3296 return ret;
3297 }
3298
3299 static int files_transaction_finish(struct ref_store *ref_store,
3300 struct ref_transaction *transaction,
3301 struct strbuf *err)
3302 {
3303 struct files_ref_store *refs =
3304 files_downcast(ref_store, 0, "ref_transaction_finish");
3305 size_t i;
3306 int ret = 0;
3307 struct strbuf sb = STRBUF_INIT;
3308 struct files_transaction_backend_data *backend_data;
3309 struct ref_transaction *packed_transaction;
3310
3311
3312 assert(err);
3313
3314 if (transaction->flags & REF_TRANSACTION_FLAG_INITIAL)
3315 return files_transaction_finish_initial(refs, transaction, err);
3316 if (!transaction->nr) {
3317 transaction->state = REF_TRANSACTION_CLOSED;
3318 return 0;
3319 }
3320
3321 backend_data = transaction->backend_data;
3322 packed_transaction = backend_data->packed_transaction;
3323
3324 /* Perform updates first so live commits remain referenced */
3325 for (i = 0; i < transaction->nr; i++) {
3326 struct ref_update *update = transaction->updates[i];
3327 struct ref_lock *lock = update->backend_data;
3328
3329 if (update->rejection_err)
3330 continue;
3331
3332 if (update->flags & REF_NEEDS_COMMIT ||
3333 update->flags & REF_LOG_ONLY) {
3334 if (parse_and_write_reflog(refs, update, lock, err)) {
3335 ret = REF_TRANSACTION_ERROR_GENERIC;
3336 goto cleanup;
3337 }
3338 }
3339
3340 /*
3341 * We try creating a symlink, if that succeeds we continue to the
3342 * next update. If not, we try and create a regular symref.
3343 */
3344 if (update->new_target && refs->prefer_symlink_refs)
3345 /*
3346 * By using the `NOT_CONSTANT()` trick, we can avoid
3347 * errors by `clang`'s `-Wunreachable` logic that would
3348 * report that the `continue` statement is not reachable
3349 * when `NO_SYMLINK_HEAD` is `#define`d.
3350 */
3351 if (NOT_CONSTANT(!create_ref_symlink(lock, update->new_target)))
3352 continue;
3353
3354 if (update->flags & REF_NEEDS_COMMIT) {
3355 clear_loose_ref_cache(refs);
3356 if (commit_ref(lock)) {
3357 strbuf_addf(err, "couldn't set '%s'", lock->ref_name);
3358 unlock_ref(lock);
3359 update->backend_data = NULL;
3360 ret = REF_TRANSACTION_ERROR_GENERIC;
3361 goto cleanup;
3362 }
3363 }
3364 }
3365
3366 /*
3367 * Now that updates are safely completed, we can perform
3368 * deletes. First delete the reflogs of any references that
3369 * will be deleted, since (in the unexpected event of an
3370 * error) leaving a reference without a reflog is less bad
3371 * than leaving a reflog without a reference (the latter is a
3372 * mildly invalid repository state):
3373 */
3374 for (i = 0; i < transaction->nr; i++) {
3375 struct ref_update *update = transaction->updates[i];
3376
3377 if (update->rejection_err)
3378 continue;
3379
3380 if (update->flags & REF_DELETING &&
3381 !(update->flags & REF_LOG_ONLY) &&
3382 !(update->flags & REF_IS_PRUNING)) {
3383 strbuf_reset(&sb);
3384 files_reflog_path(refs, &sb, update->refname);
3385 if (!unlink_or_warn(sb.buf))
3386 try_remove_empty_parents(refs, update->refname,
3387 REMOVE_EMPTY_PARENTS_REFLOG);
3388 }
3389 }
3390
3391 /*
3392 * Perform deletes now that updates are safely completed.
3393 *
3394 * First delete any packed versions of the references, while
3395 * retaining the packed-refs lock:
3396 */
3397 if (packed_transaction) {
3398 ret = ref_transaction_commit(packed_transaction, err);
3399 ref_transaction_free(packed_transaction);
3400 packed_transaction = NULL;
3401 backend_data->packed_transaction = NULL;
3402 if (ret)
3403 goto cleanup;
3404 }
3405
3406 /* Now delete the loose versions of the references: */
3407 for (i = 0; i < transaction->nr; i++) {
3408 struct ref_update *update = transaction->updates[i];
3409 struct ref_lock *lock = update->backend_data;
3410
3411 if (update->rejection_err)
3412 continue;
3413
3414 if (update->flags & REF_DELETING &&
3415 !(update->flags & REF_LOG_ONLY)) {
3416 update->flags |= REF_DELETED_RMDIR;
3417 if (!(update->type & REF_ISPACKED) ||
3418 update->type & REF_ISSYMREF) {
3419 /* It is a loose reference. */
3420 strbuf_reset(&sb);
3421 files_ref_path(refs, &sb, lock->ref_name);
3422 if (unlink_or_msg(sb.buf, err)) {
3423 ret = REF_TRANSACTION_ERROR_GENERIC;
3424 goto cleanup;
3425 }
3426 }
3427 }
3428 }
3429
3430 clear_loose_ref_cache(refs);
3431
3432 cleanup:
3433 files_transaction_cleanup(refs, transaction);
3434
3435 for (i = 0; i < transaction->nr; i++) {
3436 struct ref_update *update = transaction->updates[i];
3437
3438 if (update->flags & REF_DELETED_RMDIR) {
3439 /*
3440 * The reference was deleted. Delete any
3441 * empty parent directories. (Note that this
3442 * can only work because we have already
3443 * removed the lockfile.)
3444 */
3445 try_remove_empty_parents(refs, update->refname,
3446 REMOVE_EMPTY_PARENTS_REF);
3447 }
3448 }
3449
3450 strbuf_release(&sb);
3451 return ret;
3452 }
3453
3454 static int files_transaction_abort(struct ref_store *ref_store,
3455 struct ref_transaction *transaction,
3456 struct strbuf *err UNUSED)
3457 {
3458 struct files_ref_store *refs =
3459 files_downcast(ref_store, 0, "ref_transaction_abort");
3460
3461 files_transaction_cleanup(refs, transaction);
3462 return 0;
3463 }
3464
3465 struct expire_reflog_cb {
3466 reflog_expiry_should_prune_fn *should_prune_fn;
3467 void *policy_cb;
3468 FILE *newlog;
3469 struct object_id last_kept_oid;
3470 unsigned int rewrite:1,
3471 dry_run:1;
3472 };
3473
3474 static int expire_reflog_ent(const char *refname UNUSED,
3475 struct object_id *ooid, struct object_id *noid,
3476 const char *email, timestamp_t timestamp, int tz,
3477 const char *message, void *cb_data)
3478 {
3479 struct expire_reflog_cb *cb = cb_data;
3480 reflog_expiry_should_prune_fn *fn = cb->should_prune_fn;
3481
3482 if (cb->rewrite)
3483 ooid = &cb->last_kept_oid;
3484
3485 if (fn(ooid, noid, email, timestamp, tz, message, cb->policy_cb))
3486 return 0;
3487
3488 if (cb->dry_run)
3489 return 0; /* --dry-run */
3490
3491 fprintf(cb->newlog, "%s %s %s %"PRItime" %+05d\t%s", oid_to_hex(ooid),
3492 oid_to_hex(noid), email, timestamp, tz, message);
3493 oidcpy(&cb->last_kept_oid, noid);
3494
3495 return 0;
3496 }
3497
3498 static int files_reflog_expire(struct ref_store *ref_store,
3499 const char *refname,
3500 unsigned int expire_flags,
3501 reflog_expiry_prepare_fn prepare_fn,
3502 reflog_expiry_should_prune_fn should_prune_fn,
3503 reflog_expiry_cleanup_fn cleanup_fn,
3504 void *policy_cb_data)
3505 {
3506 struct files_ref_store *refs =
3507 files_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
3508 struct lock_file reflog_lock = LOCK_INIT;
3509 struct expire_reflog_cb cb;
3510 struct ref_lock *lock;
3511 struct strbuf log_file_sb = STRBUF_INIT;
3512 char *log_file;
3513 int status = 0;
3514 struct strbuf err = STRBUF_INIT;
3515 const struct object_id *oid;
3516
3517 memset(&cb, 0, sizeof(cb));
3518 cb.rewrite = !!(expire_flags & EXPIRE_REFLOGS_REWRITE);
3519 cb.dry_run = !!(expire_flags & EXPIRE_REFLOGS_DRY_RUN);
3520 cb.policy_cb = policy_cb_data;
3521 cb.should_prune_fn = should_prune_fn;
3522
3523 /*
3524 * The reflog file is locked by holding the lock on the
3525 * reference itself, plus we might need to update the
3526 * reference if --updateref was specified:
3527 */
3528 lock = lock_ref_oid_basic(refs, refname, &err);
3529 if (!lock) {
3530 error("cannot lock ref '%s': %s", refname, err.buf);
3531 strbuf_release(&err);
3532 return -1;
3533 }
3534 oid = &lock->old_oid;
3535
3536 /*
3537 * When refs are deleted, their reflog is deleted before the
3538 * ref itself is deleted. This is because there is no separate
3539 * lock for reflog; instead we take a lock on the ref with
3540 * lock_ref_oid_basic().
3541 *
3542 * If a race happens and the reflog doesn't exist after we've
3543 * acquired the lock that's OK. We've got nothing more to do;
3544 * We were asked to delete the reflog, but someone else
3545 * deleted it! The caller doesn't care that we deleted it,
3546 * just that it is deleted. So we can return successfully.
3547 */
3548 if (!refs_reflog_exists(ref_store, refname)) {
3549 unlock_ref(lock);
3550 return 0;
3551 }
3552
3553 files_reflog_path(refs, &log_file_sb, refname);
3554 log_file = strbuf_detach(&log_file_sb, NULL);
3555 if (!cb.dry_run) {
3556 /*
3557 * Even though holding $GIT_DIR/logs/$reflog.lock has
3558 * no locking implications, we use the lock_file
3559 * machinery here anyway because it does a lot of the
3560 * work we need, including cleaning up if the program
3561 * exits unexpectedly.
3562 */
3563 if (hold_lock_file_for_update(&reflog_lock, log_file, 0) < 0) {
3564 struct strbuf err = STRBUF_INIT;
3565 unable_to_lock_message(log_file, errno, &err);
3566 error("%s", err.buf);
3567 strbuf_release(&err);
3568 goto failure;
3569 }
3570 cb.newlog = fdopen_lock_file(&reflog_lock, "w");
3571 if (!cb.newlog) {
3572 error("cannot fdopen %s (%s)",
3573 get_lock_file_path(&reflog_lock), strerror(errno));
3574 goto failure;
3575 }
3576 }
3577
3578 (*prepare_fn)(refname, oid, cb.policy_cb);
3579 refs_for_each_reflog_ent(ref_store, refname, expire_reflog_ent, &cb);
3580 (*cleanup_fn)(cb.policy_cb);
3581
3582 if (!cb.dry_run) {
3583 /*
3584 * It doesn't make sense to adjust a reference pointed
3585 * to by a symbolic ref based on expiring entries in
3586 * the symbolic reference's reflog. Nor can we update
3587 * a reference if there are no remaining reflog
3588 * entries.
3589 */
3590 int update = 0;
3591
3592 if ((expire_flags & EXPIRE_REFLOGS_UPDATE_REF) &&
3593 !is_null_oid(&cb.last_kept_oid)) {
3594 int type;
3595 const char *ref;
3596
3597 ref = refs_resolve_ref_unsafe(&refs->base, refname,
3598 RESOLVE_REF_NO_RECURSE,
3599 NULL, &type);
3600 update = !!(ref && !(type & REF_ISSYMREF));
3601 }
3602
3603 if (close_lock_file_gently(&reflog_lock)) {
3604 status |= error("couldn't write %s: %s", log_file,
3605 strerror(errno));
3606 rollback_lock_file(&reflog_lock);
3607 } else if (update &&
3608 (write_in_full(get_lock_file_fd(&lock->lk),
3609 oid_to_hex(&cb.last_kept_oid), refs->base.repo->hash_algo->hexsz) < 0 ||
3610 write_str_in_full(get_lock_file_fd(&lock->lk), "\n") < 0 ||
3611 close_ref_gently(lock) < 0)) {
3612 status |= error("couldn't write %s",
3613 get_lock_file_path(&lock->lk));
3614 rollback_lock_file(&reflog_lock);
3615 } else if (commit_lock_file(&reflog_lock)) {
3616 status |= error("unable to write reflog '%s' (%s)",
3617 log_file, strerror(errno));
3618 } else if (update && commit_ref(lock)) {
3619 status |= error("couldn't set %s", lock->ref_name);
3620 }
3621 }
3622 free(log_file);
3623 unlock_ref(lock);
3624 return status;
3625
3626 failure:
3627 rollback_lock_file(&reflog_lock);
3628 free(log_file);
3629 unlock_ref(lock);
3630 return -1;
3631 }
3632
3633 static int files_ref_store_create_on_disk(struct ref_store *ref_store,
3634 int flags,
3635 struct strbuf *err UNUSED)
3636 {
3637 struct files_ref_store *refs =
3638 files_downcast(ref_store, REF_STORE_WRITE, "create");
3639 struct strbuf sb = STRBUF_INIT;
3640
3641 /*
3642 * We need to create a "refs" dir in any case so that older versions of
3643 * Git can tell that this is a repository. This serves two main purposes:
3644 *
3645 * - Clients will know to stop walking the parent-directory chain when
3646 * detecting the Git repository. Otherwise they may end up detecting
3647 * a Git repository in a parent directory instead.
3648 *
3649 * - Instead of failing to detect a repository with unknown reference
3650 * format altogether, old clients will print an error saying that
3651 * they do not understand the reference format extension.
3652 */
3653 strbuf_addf(&sb, "%s/refs", ref_store->gitdir);
3654 safe_create_dir(the_repository, sb.buf, 1);
3655 adjust_shared_perm(the_repository, sb.buf);
3656
3657 /*
3658 * There is no need to create directories for common refs when creating
3659 * a worktree ref store.
3660 */
3661 if (!(flags & REF_STORE_CREATE_ON_DISK_IS_WORKTREE)) {
3662 /*
3663 * Create .git/refs/{heads,tags}
3664 */
3665 strbuf_reset(&sb);
3666 files_ref_path(refs, &sb, "refs/heads");
3667 safe_create_dir(the_repository, sb.buf, 1);
3668
3669 strbuf_reset(&sb);
3670 files_ref_path(refs, &sb, "refs/tags");
3671 safe_create_dir(the_repository, sb.buf, 1);
3672 }
3673
3674 strbuf_release(&sb);
3675 return 0;
3676 }
3677
3678 struct remove_one_root_ref_data {
3679 const char *gitdir;
3680 struct strbuf *err;
3681 };
3682
3683 static int remove_one_root_ref(const char *refname,
3684 void *cb_data)
3685 {
3686 struct remove_one_root_ref_data *data = cb_data;
3687 struct strbuf buf = STRBUF_INIT;
3688 int ret = 0;
3689
3690 strbuf_addf(&buf, "%s/%s", data->gitdir, refname);
3691
3692 ret = unlink(buf.buf);
3693 if (ret < 0)
3694 strbuf_addf(data->err, "could not delete %s: %s\n",
3695 refname, strerror(errno));
3696
3697 strbuf_release(&buf);
3698 return ret;
3699 }
3700
3701 static int files_ref_store_remove_on_disk(struct ref_store *ref_store,
3702 struct strbuf *err)
3703 {
3704 struct files_ref_store *refs =
3705 files_downcast(ref_store, REF_STORE_WRITE, "remove");
3706 struct remove_one_root_ref_data data = {
3707 .gitdir = refs->base.gitdir,
3708 .err = err,
3709 };
3710 struct strbuf sb = STRBUF_INIT;
3711 int ret = 0;
3712
3713 strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
3714 if (remove_dir_recursively(&sb, 0) < 0) {
3715 strbuf_addf(err, "could not delete refs: %s",
3716 strerror(errno));
3717 ret = -1;
3718 }
3719 strbuf_reset(&sb);
3720
3721 strbuf_addf(&sb, "%s/logs", refs->base.gitdir);
3722 if (remove_dir_recursively(&sb, 0) < 0) {
3723 strbuf_addf(err, "could not delete logs: %s",
3724 strerror(errno));
3725 ret = -1;
3726 }
3727 strbuf_reset(&sb);
3728
3729 if (for_each_root_ref(refs, remove_one_root_ref, &data) < 0)
3730 ret = -1;
3731
3732 if (ref_store_remove_on_disk(refs->packed_ref_store, err) < 0)
3733 ret = -1;
3734
3735 strbuf_release(&sb);
3736 return ret;
3737 }
3738
3739 /*
3740 * For refs and reflogs, they share a unified interface when scanning
3741 * the whole directory. This function is used as the callback for each
3742 * regular file or symlink in the directory.
3743 */
3744 typedef int (*files_fsck_refs_fn)(struct ref_store *ref_store,
3745 struct fsck_options *o,
3746 const char *refname,
3747 struct dir_iterator *iter);
3748
3749 static int files_fsck_symref_target(struct fsck_options *o,
3750 struct fsck_ref_report *report,
3751 struct strbuf *referent,
3752 unsigned int symbolic_link)
3753 {
3754 int is_referent_root;
3755 char orig_last_byte;
3756 size_t orig_len;
3757 int ret = 0;
3758
3759 orig_len = referent->len;
3760 orig_last_byte = referent->buf[orig_len - 1];
3761 if (!symbolic_link)
3762 strbuf_rtrim(referent);
3763
3764 is_referent_root = is_root_ref(referent->buf);
3765 if (!is_referent_root &&
3766 !starts_with(referent->buf, "refs/") &&
3767 !starts_with(referent->buf, "worktrees/")) {
3768 ret = fsck_report_ref(o, report,
3769 FSCK_MSG_SYMREF_TARGET_IS_NOT_A_REF,
3770 "points to non-ref target '%s'", referent->buf);
3771
3772 }
3773
3774 if (!is_referent_root && check_refname_format(referent->buf, 0)) {
3775 ret = fsck_report_ref(o, report,
3776 FSCK_MSG_BAD_REFERENT_NAME,
3777 "points to invalid refname '%s'", referent->buf);
3778 goto out;
3779 }
3780
3781 if (symbolic_link)
3782 goto out;
3783
3784 if (referent->len == orig_len ||
3785 (referent->len < orig_len && orig_last_byte != '\n')) {
3786 ret = fsck_report_ref(o, report,
3787 FSCK_MSG_REF_MISSING_NEWLINE,
3788 "misses LF at the end");
3789 }
3790
3791 if (referent->len != orig_len && referent->len != orig_len - 1) {
3792 ret = fsck_report_ref(o, report,
3793 FSCK_MSG_TRAILING_REF_CONTENT,
3794 "has trailing whitespaces or newlines");
3795 }
3796
3797 out:
3798 return ret;
3799 }
3800
3801 static int files_fsck_refs_content(struct ref_store *ref_store,
3802 struct fsck_options *o,
3803 const char *target_name,
3804 struct dir_iterator *iter)
3805 {
3806 struct strbuf ref_content = STRBUF_INIT;
3807 struct strbuf abs_gitdir = STRBUF_INIT;
3808 struct strbuf referent = STRBUF_INIT;
3809 struct fsck_ref_report report = { 0 };
3810 const char *trailing = NULL;
3811 unsigned int type = 0;
3812 int failure_errno = 0;
3813 struct object_id oid;
3814 int ret = 0;
3815
3816 report.path = target_name;
3817
3818 if (S_ISLNK(iter->st.st_mode)) {
3819 const char *relative_referent_path = NULL;
3820
3821 ret = fsck_report_ref(o, &report,
3822 FSCK_MSG_SYMLINK_REF,
3823 "use deprecated symbolic link for symref");
3824
3825 strbuf_add_absolute_path(&abs_gitdir, ref_store->repo->gitdir);
3826 strbuf_normalize_path(&abs_gitdir);
3827 if (!is_dir_sep(abs_gitdir.buf[abs_gitdir.len - 1]))
3828 strbuf_addch(&abs_gitdir, '/');
3829
3830 strbuf_add_real_path(&ref_content, iter->path.buf);
3831 skip_prefix(ref_content.buf, abs_gitdir.buf,
3832 &relative_referent_path);
3833
3834 if (relative_referent_path)
3835 strbuf_addstr(&referent, relative_referent_path);
3836 else
3837 strbuf_addbuf(&referent, &ref_content);
3838
3839 ret |= files_fsck_symref_target(o, &report, &referent, 1);
3840 goto cleanup;
3841 }
3842
3843 if (strbuf_read_file(&ref_content, iter->path.buf, 0) < 0) {
3844 /*
3845 * Ref file could be removed by another concurrent process. We should
3846 * ignore this error and continue to the next ref.
3847 */
3848 if (errno == ENOENT)
3849 goto cleanup;
3850
3851 ret = error_errno(_("cannot read ref file '%s'"), iter->path.buf);
3852 goto cleanup;
3853 }
3854
3855 if (parse_loose_ref_contents(ref_store->repo->hash_algo,
3856 ref_content.buf, &oid, &referent,
3857 &type, &trailing, &failure_errno)) {
3858 strbuf_rtrim(&ref_content);
3859 ret = fsck_report_ref(o, &report,
3860 FSCK_MSG_BAD_REF_CONTENT,
3861 "%s", ref_content.buf);
3862 goto cleanup;
3863 }
3864
3865 if (!(type & REF_ISSYMREF)) {
3866 if (!*trailing) {
3867 ret = fsck_report_ref(o, &report,
3868 FSCK_MSG_REF_MISSING_NEWLINE,
3869 "misses LF at the end");
3870 goto cleanup;
3871 }
3872 if (*trailing != '\n' || *(trailing + 1)) {
3873 ret = fsck_report_ref(o, &report,
3874 FSCK_MSG_TRAILING_REF_CONTENT,
3875 "has trailing garbage: '%s'", trailing);
3876 goto cleanup;
3877 }
3878 } else {
3879 ret = files_fsck_symref_target(o, &report, &referent, 0);
3880 goto cleanup;
3881 }
3882
3883 cleanup:
3884 strbuf_release(&ref_content);
3885 strbuf_release(&referent);
3886 strbuf_release(&abs_gitdir);
3887 return ret;
3888 }
3889
3890 static int files_fsck_refs_name(struct ref_store *ref_store UNUSED,
3891 struct fsck_options *o,
3892 const char *refname,
3893 struct dir_iterator *iter)
3894 {
3895 struct strbuf sb = STRBUF_INIT;
3896 int ret = 0;
3897
3898 /*
3899 * Ignore the files ending with ".lock" as they may be lock files
3900 * However, do not allow bare ".lock" files.
3901 */
3902 if (iter->basename[0] != '.' && ends_with(iter->basename, ".lock"))
3903 goto cleanup;
3904
3905 /*
3906 * This works right now because we never check the root refs.
3907 */
3908 if (check_refname_format(refname, 0)) {
3909 struct fsck_ref_report report = { 0 };
3910
3911 report.path = refname;
3912 ret = fsck_report_ref(o, &report,
3913 FSCK_MSG_BAD_REF_NAME,
3914 "invalid refname format");
3915 }
3916
3917 cleanup:
3918 strbuf_release(&sb);
3919 return ret;
3920 }
3921
3922 static int files_fsck_refs_dir(struct ref_store *ref_store,
3923 struct fsck_options *o,
3924 const char *refs_check_dir,
3925 struct worktree *wt,
3926 files_fsck_refs_fn *fsck_refs_fn)
3927 {
3928 struct strbuf refname = STRBUF_INIT;
3929 struct strbuf sb = STRBUF_INIT;
3930 struct dir_iterator *iter;
3931 int iter_status;
3932 int ret = 0;
3933
3934 strbuf_addf(&sb, "%s/%s", ref_store->gitdir, refs_check_dir);
3935
3936 iter = dir_iterator_begin(sb.buf, 0);
3937 if (!iter) {
3938 if (errno == ENOENT && !is_main_worktree(wt))
3939 goto out;
3940
3941 ret = error_errno(_("cannot open directory %s"), sb.buf);
3942 goto out;
3943 }
3944
3945 while ((iter_status = dir_iterator_advance(iter)) == ITER_OK) {
3946 if (S_ISDIR(iter->st.st_mode)) {
3947 continue;
3948 } else if (S_ISREG(iter->st.st_mode) ||
3949 S_ISLNK(iter->st.st_mode)) {
3950 strbuf_reset(&refname);
3951
3952 if (!is_main_worktree(wt))
3953 strbuf_addf(&refname, "worktrees/%s/", wt->id);
3954 strbuf_addf(&refname, "%s/%s", refs_check_dir,
3955 iter->relative_path);
3956
3957 if (o->verbose)
3958 fprintf_ln(stderr, "Checking %s", refname.buf);
3959
3960 for (size_t i = 0; fsck_refs_fn[i]; i++) {
3961 if (fsck_refs_fn[i](ref_store, o, refname.buf, iter))
3962 ret = -1;
3963 }
3964 } else {
3965 struct fsck_ref_report report = { .path = iter->basename };
3966 if (fsck_report_ref(o, &report,
3967 FSCK_MSG_BAD_REF_FILETYPE,
3968 "unexpected file type"))
3969 ret = -1;
3970 }
3971 }
3972
3973 if (iter_status != ITER_DONE)
3974 ret = error(_("failed to iterate over '%s'"), sb.buf);
3975
3976 out:
3977 dir_iterator_free(iter);
3978 strbuf_release(&sb);
3979 strbuf_release(&refname);
3980 return ret;
3981 }
3982
3983 static int files_fsck_refs(struct ref_store *ref_store,
3984 struct fsck_options *o,
3985 struct worktree *wt)
3986 {
3987 files_fsck_refs_fn fsck_refs_fn[]= {
3988 files_fsck_refs_name,
3989 files_fsck_refs_content,
3990 NULL,
3991 };
3992
3993 return files_fsck_refs_dir(ref_store, o, "refs", wt, fsck_refs_fn);
3994 }
3995
3996 static int files_fsck(struct ref_store *ref_store,
3997 struct fsck_options *o,
3998 struct worktree *wt)
3999 {
4000 struct files_ref_store *refs =
4001 files_downcast(ref_store, REF_STORE_READ, "fsck");
4002
4003 return files_fsck_refs(ref_store, o, wt) |
4004 refs->packed_ref_store->be->fsck(refs->packed_ref_store, o, wt);
4005 }
4006
4007 struct ref_storage_be refs_be_files = {
4008 .name = "files",
4009 .init = files_ref_store_init,
4010 .release = files_ref_store_release,
4011 .create_on_disk = files_ref_store_create_on_disk,
4012 .remove_on_disk = files_ref_store_remove_on_disk,
4013
4014 .transaction_prepare = files_transaction_prepare,
4015 .transaction_finish = files_transaction_finish,
4016 .transaction_abort = files_transaction_abort,
4017
4018 .pack_refs = files_pack_refs,
4019 .optimize = files_optimize,
4020 .rename_ref = files_rename_ref,
4021 .copy_ref = files_copy_ref,
4022
4023 .iterator_begin = files_ref_iterator_begin,
4024 .read_raw_ref = files_read_raw_ref,
4025 .read_symbolic_ref = files_read_symbolic_ref,
4026
4027 .reflog_iterator_begin = files_reflog_iterator_begin,
4028 .for_each_reflog_ent = files_for_each_reflog_ent,
4029 .for_each_reflog_ent_reverse = files_for_each_reflog_ent_reverse,
4030 .reflog_exists = files_reflog_exists,
4031 .create_reflog = files_create_reflog,
4032 .delete_reflog = files_delete_reflog,
4033 .reflog_expire = files_reflog_expire,
4034
4035 .fsck = files_fsck,
4036 };