]> git.ipfire.org Git - thirdparty/git.git/blob - refs/reftable-backend.c
The ninth batch
[thirdparty/git.git] / refs / reftable-backend.c
1 #define USE_THE_REPOSITORY_VARIABLE
2
3 #include "../git-compat-util.h"
4 #include "../abspath.h"
5 #include "../chdir-notify.h"
6 #include "../config.h"
7 #include "../dir.h"
8 #include "../environment.h"
9 #include "../gettext.h"
10 #include "../hash.h"
11 #include "../hex.h"
12 #include "../iterator.h"
13 #include "../ident.h"
14 #include "../lockfile.h"
15 #include "../object.h"
16 #include "../path.h"
17 #include "../refs.h"
18 #include "../reftable/reftable-basics.h"
19 #include "../reftable/reftable-stack.h"
20 #include "../reftable/reftable-record.h"
21 #include "../reftable/reftable-error.h"
22 #include "../reftable/reftable-iterator.h"
23 #include "../repo-settings.h"
24 #include "../setup.h"
25 #include "../strmap.h"
26 #include "../trace2.h"
27 #include "../write-or-die.h"
28 #include "parse.h"
29 #include "refs-internal.h"
30
31 /*
32 * Used as a flag in ref_update::flags when the ref_update was via an
33 * update to HEAD.
34 */
35 #define REF_UPDATE_VIA_HEAD (1 << 8)
36
37 struct reftable_backend {
38 struct reftable_stack *stack;
39 struct reftable_iterator it;
40 };
41
42 static void reftable_backend_on_reload(void *payload)
43 {
44 struct reftable_backend *be = payload;
45 reftable_iterator_destroy(&be->it);
46 }
47
48 static int reftable_backend_init(struct reftable_backend *be,
49 const char *path,
50 const struct reftable_write_options *_opts)
51 {
52 struct reftable_write_options opts = *_opts;
53 opts.on_reload = reftable_backend_on_reload;
54 opts.on_reload_payload = be;
55 return reftable_new_stack(&be->stack, path, &opts);
56 }
57
58 static void reftable_backend_release(struct reftable_backend *be)
59 {
60 reftable_stack_destroy(be->stack);
61 be->stack = NULL;
62 reftable_iterator_destroy(&be->it);
63 }
64
65 static int reftable_backend_read_ref(struct reftable_backend *be,
66 const char *refname,
67 struct object_id *oid,
68 struct strbuf *referent,
69 unsigned int *type)
70 {
71 struct reftable_ref_record ref = {0};
72 int ret;
73
74 if (!be->it.ops) {
75 ret = reftable_stack_init_ref_iterator(be->stack, &be->it);
76 if (ret)
77 goto done;
78 }
79
80 ret = reftable_iterator_seek_ref(&be->it, refname);
81 if (ret)
82 goto done;
83
84 ret = reftable_iterator_next_ref(&be->it, &ref);
85 if (ret)
86 goto done;
87
88 if (strcmp(ref.refname, refname)) {
89 ret = 1;
90 goto done;
91 }
92
93 if (ref.value_type == REFTABLE_REF_SYMREF) {
94 strbuf_reset(referent);
95 strbuf_addstr(referent, ref.value.symref);
96 *type |= REF_ISSYMREF;
97 } else if (reftable_ref_record_val1(&ref)) {
98 unsigned int hash_id;
99
100 switch (reftable_stack_hash_id(be->stack)) {
101 case REFTABLE_HASH_SHA1:
102 hash_id = GIT_HASH_SHA1;
103 break;
104 case REFTABLE_HASH_SHA256:
105 hash_id = GIT_HASH_SHA256;
106 break;
107 default:
108 BUG("unhandled hash ID %d", reftable_stack_hash_id(be->stack));
109 }
110
111 oidread(oid, reftable_ref_record_val1(&ref),
112 &hash_algos[hash_id]);
113 } else {
114 /* We got a tombstone, which should not happen. */
115 BUG("unhandled reference value type %d", ref.value_type);
116 }
117
118 done:
119 assert(ret != REFTABLE_API_ERROR);
120 reftable_ref_record_release(&ref);
121 return ret;
122 }
123
124 struct reftable_ref_store {
125 struct ref_store base;
126
127 /*
128 * The main backend refers to the common dir and thus contains common
129 * refs as well as refs of the main repository.
130 */
131 struct reftable_backend main_backend;
132 /*
133 * The worktree backend refers to the gitdir in case the refdb is opened
134 * via a worktree. It thus contains the per-worktree refs.
135 */
136 struct reftable_backend worktree_backend;
137 /*
138 * Map of worktree backends by their respective worktree names. The map
139 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
140 */
141 struct strmap worktree_backends;
142 struct reftable_write_options write_options;
143
144 unsigned int store_flags;
145 enum log_refs_config log_all_ref_updates;
146 int err;
147 };
148
149 /*
150 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
151 * reftable_ref_store. required_flags is compared with ref_store's store_flags
152 * to ensure the ref_store has all required capabilities. "caller" is used in
153 * any necessary error messages.
154 */
155 static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
156 unsigned int required_flags,
157 const char *caller)
158 {
159 struct reftable_ref_store *refs;
160
161 if (ref_store->be != &refs_be_reftable)
162 BUG("ref_store is type \"%s\" not \"reftables\" in %s",
163 ref_store->be->name, caller);
164
165 refs = (struct reftable_ref_store *)ref_store;
166
167 if ((refs->store_flags & required_flags) != required_flags)
168 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
169 caller, required_flags, refs->store_flags);
170
171 return refs;
172 }
173
174 /*
175 * Some refs are global to the repository (refs/heads/{*}), while others are
176 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
177 * multiple separate databases (ie. multiple reftable/ directories), one for
178 * the shared refs, one for the current worktree refs, and one for each
179 * additional worktree. For reading, we merge the view of both the shared and
180 * the current worktree's refs, when necessary.
181 *
182 * This function also optionally assigns the rewritten reference name that is
183 * local to the stack. This translation is required when using worktree refs
184 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
185 * those references in their normalized form.
186 */
187 static int backend_for(struct reftable_backend **out,
188 struct reftable_ref_store *store,
189 const char *refname,
190 const char **rewritten_ref,
191 int reload)
192 {
193 struct reftable_backend *be;
194 const char *wtname;
195 int wtname_len;
196
197 if (!refname) {
198 be = &store->main_backend;
199 goto out;
200 }
201
202 switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
203 case REF_WORKTREE_OTHER: {
204 static struct strbuf wtname_buf = STRBUF_INIT;
205 struct strbuf wt_dir = STRBUF_INIT;
206
207 /*
208 * We're using a static buffer here so that we don't need to
209 * allocate the worktree name whenever we look up a reference.
210 * This could be avoided if the strmap interface knew how to
211 * handle keys with a length.
212 */
213 strbuf_reset(&wtname_buf);
214 strbuf_add(&wtname_buf, wtname, wtname_len);
215
216 /*
217 * There is an edge case here: when the worktree references the
218 * current worktree, then we set up the stack once via
219 * `worktree_backends` and once via `worktree_backend`. This is
220 * wasteful, but in the reading case it shouldn't matter. And
221 * in the writing case we would notice that the stack is locked
222 * already and error out when trying to write a reference via
223 * both stacks.
224 */
225 be = strmap_get(&store->worktree_backends, wtname_buf.buf);
226 if (!be) {
227 strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
228 store->base.repo->commondir, wtname_buf.buf);
229
230 CALLOC_ARRAY(be, 1);
231 store->err = reftable_backend_init(be, wt_dir.buf,
232 &store->write_options);
233 assert(store->err != REFTABLE_API_ERROR);
234
235 strmap_put(&store->worktree_backends, wtname_buf.buf, be);
236 }
237
238 strbuf_release(&wt_dir);
239 goto out;
240 }
241 case REF_WORKTREE_CURRENT:
242 /*
243 * If there is no worktree stack then we're currently in the
244 * main worktree. We thus return the main stack in that case.
245 */
246 if (!store->worktree_backend.stack)
247 be = &store->main_backend;
248 else
249 be = &store->worktree_backend;
250 goto out;
251 case REF_WORKTREE_MAIN:
252 case REF_WORKTREE_SHARED:
253 be = &store->main_backend;
254 goto out;
255 default:
256 BUG("unhandled worktree reference type");
257 }
258
259 out:
260 if (reload) {
261 int ret = reftable_stack_reload(be->stack);
262 if (ret)
263 return ret;
264 }
265 *out = be;
266
267 return 0;
268 }
269
270 static int should_write_log(struct reftable_ref_store *refs, const char *refname)
271 {
272 enum log_refs_config log_refs_cfg = refs->log_all_ref_updates;
273 if (log_refs_cfg == LOG_REFS_UNSET)
274 log_refs_cfg = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
275
276 switch (log_refs_cfg) {
277 case LOG_REFS_NONE:
278 return refs_reflog_exists(&refs->base, refname);
279 case LOG_REFS_ALWAYS:
280 return 1;
281 case LOG_REFS_NORMAL:
282 if (should_autocreate_reflog(log_refs_cfg, refname))
283 return 1;
284 return refs_reflog_exists(&refs->base, refname);
285 default:
286 BUG("unhandled core.logAllRefUpdates value %d", log_refs_cfg);
287 }
288 }
289
290 static void fill_reftable_log_record(struct reftable_log_record *log, const struct ident_split *split)
291 {
292 const char *tz_begin;
293 int sign = 1;
294
295 reftable_log_record_release(log);
296 log->value_type = REFTABLE_LOG_UPDATE;
297 log->value.update.name =
298 xstrndup(split->name_begin, split->name_end - split->name_begin);
299 log->value.update.email =
300 xstrndup(split->mail_begin, split->mail_end - split->mail_begin);
301 log->value.update.time = atol(split->date_begin);
302
303 tz_begin = split->tz_begin;
304 if (*tz_begin == '-') {
305 sign = -1;
306 tz_begin++;
307 }
308 if (*tz_begin == '+') {
309 sign = 1;
310 tz_begin++;
311 }
312
313 log->value.update.tz_offset = sign * atoi(tz_begin);
314 }
315
316 static int reftable_be_config(const char *var, const char *value,
317 const struct config_context *ctx,
318 void *_opts)
319 {
320 struct reftable_write_options *opts = _opts;
321
322 if (!strcmp(var, "reftable.blocksize")) {
323 unsigned long block_size = git_config_ulong(var, value, ctx->kvi);
324 if (block_size > 16777215)
325 die("reftable block size cannot exceed 16MB");
326 opts->block_size = block_size;
327 } else if (!strcmp(var, "reftable.restartinterval")) {
328 unsigned long restart_interval = git_config_ulong(var, value, ctx->kvi);
329 if (restart_interval > UINT16_MAX)
330 die("reftable block size cannot exceed %u", (unsigned)UINT16_MAX);
331 opts->restart_interval = restart_interval;
332 } else if (!strcmp(var, "reftable.indexobjects")) {
333 opts->skip_index_objects = !git_config_bool(var, value);
334 } else if (!strcmp(var, "reftable.geometricfactor")) {
335 unsigned long factor = git_config_ulong(var, value, ctx->kvi);
336 if (factor > UINT8_MAX)
337 die("reftable geometric factor cannot exceed %u", (unsigned)UINT8_MAX);
338 opts->auto_compaction_factor = factor;
339 } else if (!strcmp(var, "reftable.locktimeout")) {
340 int64_t lock_timeout = git_config_int64(var, value, ctx->kvi);
341 if (lock_timeout > LONG_MAX)
342 die("reftable lock timeout cannot exceed %"PRIdMAX, (intmax_t)LONG_MAX);
343 if (lock_timeout < 0 && lock_timeout != -1)
344 die("reftable lock timeout does not support negative values other than -1");
345 opts->lock_timeout_ms = lock_timeout;
346 }
347
348 return 0;
349 }
350
351 static int reftable_be_fsync(int fd)
352 {
353 return fsync_component(FSYNC_COMPONENT_REFERENCE, fd);
354 }
355
356 static struct ref_store *reftable_be_init(struct repository *repo,
357 const char *gitdir,
358 unsigned int store_flags)
359 {
360 struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
361 struct strbuf path = STRBUF_INIT;
362 int is_worktree;
363 mode_t mask;
364
365 mask = umask(0);
366 umask(mask);
367
368 base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
369 strmap_init(&refs->worktree_backends);
370 refs->store_flags = store_flags;
371 refs->log_all_ref_updates = repo_settings_get_log_all_ref_updates(repo);
372
373 switch (repo->hash_algo->format_id) {
374 case GIT_SHA1_FORMAT_ID:
375 refs->write_options.hash_id = REFTABLE_HASH_SHA1;
376 break;
377 case GIT_SHA256_FORMAT_ID:
378 refs->write_options.hash_id = REFTABLE_HASH_SHA256;
379 break;
380 default:
381 BUG("unknown hash algorithm %d", repo->hash_algo->format_id);
382 }
383 refs->write_options.default_permissions = calc_shared_perm(the_repository, 0666 & ~mask);
384 refs->write_options.disable_auto_compact =
385 !git_env_bool("GIT_TEST_REFTABLE_AUTOCOMPACTION", 1);
386 refs->write_options.lock_timeout_ms = 100;
387 refs->write_options.fsync = reftable_be_fsync;
388
389 git_config(reftable_be_config, &refs->write_options);
390
391 /*
392 * It is somewhat unfortunate that we have to mirror the default block
393 * size of the reftable library here. But given that the write options
394 * wouldn't be updated by the library here, and given that we require
395 * the proper block size to trim reflog message so that they fit, we
396 * must set up a proper value here.
397 */
398 if (!refs->write_options.block_size)
399 refs->write_options.block_size = 4096;
400
401 /*
402 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
403 * This stack contains both the shared and the main worktree refs.
404 *
405 * Note that we don't try to resolve the path in case we have a
406 * worktree because `get_common_dir_noenv()` already does it for us.
407 */
408 is_worktree = get_common_dir_noenv(&path, gitdir);
409 if (!is_worktree) {
410 strbuf_reset(&path);
411 strbuf_realpath(&path, gitdir, 0);
412 }
413 strbuf_addstr(&path, "/reftable");
414 refs->err = reftable_backend_init(&refs->main_backend, path.buf,
415 &refs->write_options);
416 if (refs->err)
417 goto done;
418
419 /*
420 * If we're in a worktree we also need to set up the worktree reftable
421 * stack that is contained in the per-worktree GIT_DIR.
422 *
423 * Ideally, we would also add the stack to our worktree stack map. But
424 * we have no way to figure out the worktree name here and thus can't
425 * do it efficiently.
426 */
427 if (is_worktree) {
428 strbuf_reset(&path);
429 strbuf_addf(&path, "%s/reftable", gitdir);
430
431 refs->err = reftable_backend_init(&refs->worktree_backend, path.buf,
432 &refs->write_options);
433 if (refs->err)
434 goto done;
435 }
436
437 chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
438
439 done:
440 assert(refs->err != REFTABLE_API_ERROR);
441 strbuf_release(&path);
442 return &refs->base;
443 }
444
445 static void reftable_be_release(struct ref_store *ref_store)
446 {
447 struct reftable_ref_store *refs = reftable_be_downcast(ref_store, 0, "release");
448 struct strmap_entry *entry;
449 struct hashmap_iter iter;
450
451 if (refs->main_backend.stack)
452 reftable_backend_release(&refs->main_backend);
453 if (refs->worktree_backend.stack)
454 reftable_backend_release(&refs->worktree_backend);
455
456 strmap_for_each_entry(&refs->worktree_backends, &iter, entry) {
457 struct reftable_backend *be = entry->value;
458 reftable_backend_release(be);
459 free(be);
460 }
461 strmap_clear(&refs->worktree_backends, 0);
462 }
463
464 static int reftable_be_create_on_disk(struct ref_store *ref_store,
465 int flags UNUSED,
466 struct strbuf *err UNUSED)
467 {
468 struct reftable_ref_store *refs =
469 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create");
470 struct strbuf sb = STRBUF_INIT;
471
472 strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
473 safe_create_dir(the_repository, sb.buf, 1);
474 strbuf_reset(&sb);
475
476 strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
477 write_file(sb.buf, "ref: refs/heads/.invalid");
478 adjust_shared_perm(the_repository, sb.buf);
479 strbuf_reset(&sb);
480
481 strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
482 safe_create_dir(the_repository, sb.buf, 1);
483 strbuf_reset(&sb);
484
485 strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
486 write_file(sb.buf, "this repository uses the reftable format");
487 adjust_shared_perm(the_repository, sb.buf);
488
489 strbuf_release(&sb);
490 return 0;
491 }
492
493 static int reftable_be_remove_on_disk(struct ref_store *ref_store,
494 struct strbuf *err)
495 {
496 struct reftable_ref_store *refs =
497 reftable_be_downcast(ref_store, REF_STORE_WRITE, "remove");
498 struct strbuf sb = STRBUF_INIT;
499 int ret = 0;
500
501 /*
502 * Release the ref store such that all stacks are closed. This is
503 * required so that the "tables.list" file is not open anymore, which
504 * would otherwise make it impossible to remove the file on Windows.
505 */
506 reftable_be_release(ref_store);
507
508 strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
509 if (remove_dir_recursively(&sb, 0) < 0) {
510 strbuf_addf(err, "could not delete reftables: %s",
511 strerror(errno));
512 ret = -1;
513 }
514 strbuf_reset(&sb);
515
516 strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
517 if (unlink(sb.buf) < 0) {
518 strbuf_addf(err, "could not delete stub HEAD: %s",
519 strerror(errno));
520 ret = -1;
521 }
522 strbuf_reset(&sb);
523
524 strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
525 if (unlink(sb.buf) < 0) {
526 strbuf_addf(err, "could not delete stub heads: %s",
527 strerror(errno));
528 ret = -1;
529 }
530 strbuf_reset(&sb);
531
532 strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
533 if (rmdir(sb.buf) < 0) {
534 strbuf_addf(err, "could not delete refs directory: %s",
535 strerror(errno));
536 ret = -1;
537 }
538
539 strbuf_release(&sb);
540 return ret;
541 }
542
543 struct reftable_ref_iterator {
544 struct ref_iterator base;
545 struct reftable_ref_store *refs;
546 struct reftable_iterator iter;
547 struct reftable_ref_record ref;
548 struct object_id oid;
549
550 char *prefix;
551 size_t prefix_len;
552 char **exclude_patterns;
553 size_t exclude_patterns_index;
554 size_t exclude_patterns_strlen;
555 unsigned int flags;
556 int err;
557 };
558
559 /*
560 * Handle exclude patterns. Returns either `1`, which tells the caller that the
561 * current reference shall not be shown. Or `0`, which indicates that it should
562 * be shown.
563 */
564 static int should_exclude_current_ref(struct reftable_ref_iterator *iter)
565 {
566 while (iter->exclude_patterns[iter->exclude_patterns_index]) {
567 const char *pattern = iter->exclude_patterns[iter->exclude_patterns_index];
568 char *ref_after_pattern;
569 int cmp;
570
571 /*
572 * Lazily cache the pattern length so that we don't have to
573 * recompute it every time this function is called.
574 */
575 if (!iter->exclude_patterns_strlen)
576 iter->exclude_patterns_strlen = strlen(pattern);
577
578 /*
579 * When the reference name is lexicographically bigger than the
580 * current exclude pattern we know that it won't ever match any
581 * of the following references, either. We thus advance to the
582 * next pattern and re-check whether it matches.
583 *
584 * Otherwise, if it's smaller, then we do not have a match and
585 * thus want to show the current reference.
586 */
587 cmp = strncmp(iter->ref.refname, pattern,
588 iter->exclude_patterns_strlen);
589 if (cmp > 0) {
590 iter->exclude_patterns_index++;
591 iter->exclude_patterns_strlen = 0;
592 continue;
593 }
594 if (cmp < 0)
595 return 0;
596
597 /*
598 * The reference shares a prefix with the exclude pattern and
599 * shall thus be omitted. We skip all references that match the
600 * pattern by seeking to the first reference after the block of
601 * matches.
602 *
603 * This is done by appending the highest possible character to
604 * the pattern. Consequently, all references that have the
605 * pattern as prefix and whose suffix starts with anything in
606 * the range [0x00, 0xfe] are skipped. And given that 0xff is a
607 * non-printable character that shouldn't ever be in a ref name,
608 * we'd not yield any such record, either.
609 *
610 * Note that the seeked-to reference may also be excluded. This
611 * is not handled here though, but the caller is expected to
612 * loop and re-verify the next reference for us.
613 */
614 ref_after_pattern = xstrfmt("%s%c", pattern, 0xff);
615 iter->err = reftable_iterator_seek_ref(&iter->iter, ref_after_pattern);
616 iter->exclude_patterns_index++;
617 iter->exclude_patterns_strlen = 0;
618 trace2_counter_add(TRACE2_COUNTER_ID_REFTABLE_RESEEKS, 1);
619
620 free(ref_after_pattern);
621 return 1;
622 }
623
624 return 0;
625 }
626
627 static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
628 {
629 struct reftable_ref_iterator *iter =
630 (struct reftable_ref_iterator *)ref_iterator;
631 struct reftable_ref_store *refs = iter->refs;
632 const char *referent = NULL;
633
634 while (!iter->err) {
635 int flags = 0;
636
637 iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
638 if (iter->err)
639 break;
640
641 /*
642 * The files backend only lists references contained in "refs/" unless
643 * the root refs are to be included. We emulate the same behaviour here.
644 */
645 if (!starts_with(iter->ref.refname, "refs/") &&
646 !(iter->flags & DO_FOR_EACH_INCLUDE_ROOT_REFS &&
647 is_root_ref(iter->ref.refname))) {
648 continue;
649 }
650
651 if (iter->prefix_len &&
652 strncmp(iter->prefix, iter->ref.refname, iter->prefix_len)) {
653 iter->err = 1;
654 break;
655 }
656
657 if (iter->exclude_patterns && should_exclude_current_ref(iter))
658 continue;
659
660 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
661 parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
662 REF_WORKTREE_CURRENT)
663 continue;
664
665 switch (iter->ref.value_type) {
666 case REFTABLE_REF_VAL1:
667 oidread(&iter->oid, iter->ref.value.val1,
668 refs->base.repo->hash_algo);
669 break;
670 case REFTABLE_REF_VAL2:
671 oidread(&iter->oid, iter->ref.value.val2.value,
672 refs->base.repo->hash_algo);
673 break;
674 case REFTABLE_REF_SYMREF:
675 referent = refs_resolve_ref_unsafe(&iter->refs->base,
676 iter->ref.refname,
677 RESOLVE_REF_READING,
678 &iter->oid, &flags);
679 if (!referent)
680 oidclr(&iter->oid, refs->base.repo->hash_algo);
681 break;
682 default:
683 BUG("unhandled reference value type %d", iter->ref.value_type);
684 }
685
686 if (is_null_oid(&iter->oid))
687 flags |= REF_ISBROKEN;
688
689 if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
690 if (!refname_is_safe(iter->ref.refname))
691 die(_("refname is dangerous: %s"), iter->ref.refname);
692 oidclr(&iter->oid, refs->base.repo->hash_algo);
693 flags |= REF_BAD_NAME | REF_ISBROKEN;
694 }
695
696 if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
697 flags & REF_ISSYMREF &&
698 flags & REF_ISBROKEN)
699 continue;
700
701 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
702 !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
703 &iter->oid, flags))
704 continue;
705
706 iter->base.refname = iter->ref.refname;
707 iter->base.referent = referent;
708 iter->base.oid = &iter->oid;
709 iter->base.flags = flags;
710
711 break;
712 }
713
714 if (iter->err > 0)
715 return ITER_DONE;
716 if (iter->err < 0)
717 return ITER_ERROR;
718 return ITER_OK;
719 }
720
721 static int reftable_ref_iterator_seek(struct ref_iterator *ref_iterator,
722 const char *prefix)
723 {
724 struct reftable_ref_iterator *iter =
725 (struct reftable_ref_iterator *)ref_iterator;
726
727 free(iter->prefix);
728 iter->prefix = xstrdup_or_null(prefix);
729 iter->prefix_len = prefix ? strlen(prefix) : 0;
730 iter->err = reftable_iterator_seek_ref(&iter->iter, prefix);
731
732 return iter->err;
733 }
734
735 static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
736 struct object_id *peeled)
737 {
738 struct reftable_ref_iterator *iter =
739 (struct reftable_ref_iterator *)ref_iterator;
740
741 if (iter->ref.value_type == REFTABLE_REF_VAL2) {
742 oidread(peeled, iter->ref.value.val2.target_value,
743 iter->refs->base.repo->hash_algo);
744 return 0;
745 }
746
747 return -1;
748 }
749
750 static void reftable_ref_iterator_release(struct ref_iterator *ref_iterator)
751 {
752 struct reftable_ref_iterator *iter =
753 (struct reftable_ref_iterator *)ref_iterator;
754 reftable_ref_record_release(&iter->ref);
755 reftable_iterator_destroy(&iter->iter);
756 if (iter->exclude_patterns) {
757 for (size_t i = 0; iter->exclude_patterns[i]; i++)
758 free(iter->exclude_patterns[i]);
759 free(iter->exclude_patterns);
760 }
761 free(iter->prefix);
762 }
763
764 static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
765 .advance = reftable_ref_iterator_advance,
766 .seek = reftable_ref_iterator_seek,
767 .peel = reftable_ref_iterator_peel,
768 .release = reftable_ref_iterator_release,
769 };
770
771 static int qsort_strcmp(const void *va, const void *vb)
772 {
773 const char *a = *(const char **)va;
774 const char *b = *(const char **)vb;
775 return strcmp(a, b);
776 }
777
778 static char **filter_exclude_patterns(const char **exclude_patterns)
779 {
780 size_t filtered_size = 0, filtered_alloc = 0;
781 char **filtered = NULL;
782
783 if (!exclude_patterns)
784 return NULL;
785
786 for (size_t i = 0; ; i++) {
787 const char *exclude_pattern = exclude_patterns[i];
788 int has_glob = 0;
789
790 if (!exclude_pattern)
791 break;
792
793 for (const char *p = exclude_pattern; *p; p++) {
794 has_glob = is_glob_special(*p);
795 if (has_glob)
796 break;
797 }
798 if (has_glob)
799 continue;
800
801 ALLOC_GROW(filtered, filtered_size + 1, filtered_alloc);
802 filtered[filtered_size++] = xstrdup(exclude_pattern);
803 }
804
805 if (filtered_size) {
806 QSORT(filtered, filtered_size, qsort_strcmp);
807 ALLOC_GROW(filtered, filtered_size + 1, filtered_alloc);
808 filtered[filtered_size++] = NULL;
809 }
810
811 return filtered;
812 }
813
814 static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
815 struct reftable_stack *stack,
816 const char *prefix,
817 const char **exclude_patterns,
818 int flags)
819 {
820 struct reftable_ref_iterator *iter;
821 int ret;
822
823 iter = xcalloc(1, sizeof(*iter));
824 base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
825 iter->base.oid = &iter->oid;
826 iter->flags = flags;
827 iter->refs = refs;
828 iter->exclude_patterns = filter_exclude_patterns(exclude_patterns);
829
830 ret = refs->err;
831 if (ret)
832 goto done;
833
834 ret = reftable_stack_reload(stack);
835 if (ret)
836 goto done;
837
838 ret = reftable_stack_init_ref_iterator(stack, &iter->iter);
839 if (ret)
840 goto done;
841
842 ret = reftable_ref_iterator_seek(&iter->base, prefix);
843 if (ret)
844 goto done;
845
846 done:
847 iter->err = ret;
848 return iter;
849 }
850
851 static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
852 const char *prefix,
853 const char **exclude_patterns,
854 unsigned int flags)
855 {
856 struct reftable_ref_iterator *main_iter, *worktree_iter;
857 struct reftable_ref_store *refs;
858 unsigned int required_flags = REF_STORE_READ;
859
860 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
861 required_flags |= REF_STORE_ODB;
862 refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
863
864 main_iter = ref_iterator_for_stack(refs, refs->main_backend.stack, prefix,
865 exclude_patterns, flags);
866
867 /*
868 * The worktree stack is only set when we're in an actual worktree
869 * right now. If we aren't, then we return the common reftable
870 * iterator, only.
871 */
872 if (!refs->worktree_backend.stack)
873 return &main_iter->base;
874
875 /*
876 * Otherwise we merge both the common and the per-worktree refs into a
877 * single iterator.
878 */
879 worktree_iter = ref_iterator_for_stack(refs, refs->worktree_backend.stack, prefix,
880 exclude_patterns, flags);
881 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
882 ref_iterator_select, NULL);
883 }
884
885 static int reftable_be_read_raw_ref(struct ref_store *ref_store,
886 const char *refname,
887 struct object_id *oid,
888 struct strbuf *referent,
889 unsigned int *type,
890 int *failure_errno)
891 {
892 struct reftable_ref_store *refs =
893 reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
894 struct reftable_backend *be;
895 int ret;
896
897 if (refs->err < 0)
898 return refs->err;
899
900 ret = backend_for(&be, refs, refname, &refname, 1);
901 if (ret)
902 return ret;
903
904 ret = reftable_backend_read_ref(be, refname, oid, referent, type);
905 if (ret < 0)
906 return ret;
907 if (ret > 0) {
908 *failure_errno = ENOENT;
909 return -1;
910 }
911
912 return 0;
913 }
914
915 static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
916 const char *refname,
917 struct strbuf *referent)
918 {
919 struct reftable_ref_store *refs =
920 reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
921 struct reftable_backend *be;
922 struct object_id oid;
923 unsigned int type = 0;
924 int ret;
925
926 ret = backend_for(&be, refs, refname, &refname, 1);
927 if (ret)
928 return ret;
929
930 ret = reftable_backend_read_ref(be, refname, &oid, referent, &type);
931 if (ret)
932 ret = -1;
933 else if (type == REF_ISSYMREF)
934 ; /* happy */
935 else
936 ret = NOT_A_SYMREF;
937 return ret;
938 }
939
940 struct reftable_transaction_update {
941 struct ref_update *update;
942 struct object_id current_oid;
943 };
944
945 struct write_transaction_table_arg {
946 struct reftable_ref_store *refs;
947 struct reftable_backend *be;
948 struct reftable_addition *addition;
949 struct reftable_transaction_update *updates;
950 size_t updates_nr;
951 size_t updates_alloc;
952 size_t updates_expected;
953 uint64_t max_index;
954 };
955
956 struct reftable_transaction_data {
957 struct write_transaction_table_arg *args;
958 size_t args_nr, args_alloc;
959 };
960
961 static void free_transaction_data(struct reftable_transaction_data *tx_data)
962 {
963 if (!tx_data)
964 return;
965 for (size_t i = 0; i < tx_data->args_nr; i++) {
966 reftable_addition_destroy(tx_data->args[i].addition);
967 free(tx_data->args[i].updates);
968 }
969 free(tx_data->args);
970 free(tx_data);
971 }
972
973 /*
974 * Prepare transaction update for the given reference update. This will cause
975 * us to lock the corresponding reftable stack for concurrent modification.
976 */
977 static int prepare_transaction_update(struct write_transaction_table_arg **out,
978 struct reftable_ref_store *refs,
979 struct reftable_transaction_data *tx_data,
980 struct ref_update *update,
981 struct strbuf *err)
982 {
983 struct write_transaction_table_arg *arg = NULL;
984 struct reftable_backend *be;
985 size_t i;
986 int ret;
987
988 /*
989 * This function gets called in a loop, and we don't want to repeatedly
990 * reload the stack for every single ref update. Instead, we manually
991 * reload further down in the case where we haven't yet prepared the
992 * specific `reftable_backend`.
993 */
994 ret = backend_for(&be, refs, update->refname, NULL, 0);
995 if (ret)
996 return ret;
997
998 /*
999 * Search for a preexisting stack update. If there is one then we add
1000 * the update to it, otherwise we set up a new stack update.
1001 */
1002 for (i = 0; !arg && i < tx_data->args_nr; i++)
1003 if (tx_data->args[i].be == be)
1004 arg = &tx_data->args[i];
1005
1006 if (!arg) {
1007 struct reftable_addition *addition;
1008
1009 ret = reftable_stack_reload(be->stack);
1010 if (ret)
1011 return ret;
1012
1013 ret = reftable_stack_new_addition(&addition, be->stack,
1014 REFTABLE_STACK_NEW_ADDITION_RELOAD);
1015 if (ret) {
1016 if (ret == REFTABLE_LOCK_ERROR)
1017 strbuf_addstr(err, "cannot lock references");
1018 return ret;
1019 }
1020
1021 ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
1022 tx_data->args_alloc);
1023 arg = &tx_data->args[tx_data->args_nr++];
1024 arg->refs = refs;
1025 arg->be = be;
1026 arg->addition = addition;
1027 arg->updates = NULL;
1028 arg->updates_nr = 0;
1029 arg->updates_alloc = 0;
1030 arg->updates_expected = 0;
1031 arg->max_index = 0;
1032 }
1033
1034 arg->updates_expected++;
1035
1036 if (out)
1037 *out = arg;
1038
1039 return 0;
1040 }
1041
1042 /*
1043 * Queue a reference update for the correct stack. We potentially need to
1044 * handle multiple stack updates in a single transaction when it spans across
1045 * multiple worktrees.
1046 */
1047 static int queue_transaction_update(struct reftable_ref_store *refs,
1048 struct reftable_transaction_data *tx_data,
1049 struct ref_update *update,
1050 struct object_id *current_oid,
1051 struct strbuf *err)
1052 {
1053 struct write_transaction_table_arg *arg = NULL;
1054 int ret;
1055
1056 if (update->backend_data)
1057 BUG("reference update queued more than once");
1058
1059 ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
1060 if (ret < 0)
1061 return ret;
1062
1063 ALLOC_GROW(arg->updates, arg->updates_nr + 1,
1064 arg->updates_alloc);
1065 arg->updates[arg->updates_nr].update = update;
1066 oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
1067 update->backend_data = &arg->updates[arg->updates_nr++];
1068
1069 return 0;
1070 }
1071
1072 static enum ref_transaction_error prepare_single_update(struct reftable_ref_store *refs,
1073 struct reftable_transaction_data *tx_data,
1074 struct ref_transaction *transaction,
1075 struct reftable_backend *be,
1076 struct ref_update *u,
1077 size_t update_idx,
1078 struct string_list *refnames_to_check,
1079 unsigned int head_type,
1080 struct strbuf *head_referent,
1081 struct strbuf *referent,
1082 struct strbuf *err)
1083 {
1084 enum ref_transaction_error ret = 0;
1085 struct object_id current_oid = {0};
1086 const char *rewritten_ref;
1087
1088 /*
1089 * There is no need to reload the respective backends here as
1090 * we have already reloaded them when preparing the transaction
1091 * update. And given that the stacks have been locked there
1092 * shouldn't have been any concurrent modifications of the
1093 * stack.
1094 */
1095 ret = backend_for(&be, refs, u->refname, &rewritten_ref, 0);
1096 if (ret)
1097 return REF_TRANSACTION_ERROR_GENERIC;
1098
1099 /* Verify that the new object ID is valid. */
1100 if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
1101 !(u->flags & REF_SKIP_OID_VERIFICATION) &&
1102 !(u->flags & REF_LOG_ONLY)) {
1103 struct object *o = parse_object(refs->base.repo, &u->new_oid);
1104 if (!o) {
1105 strbuf_addf(err,
1106 _("trying to write ref '%s' with nonexistent object %s"),
1107 u->refname, oid_to_hex(&u->new_oid));
1108 return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE;
1109 }
1110
1111 if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
1112 strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
1113 oid_to_hex(&u->new_oid), u->refname);
1114 return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE;
1115 }
1116 }
1117
1118 /*
1119 * When we update the reference that HEAD points to we enqueue
1120 * a second log-only update for HEAD so that its reflog is
1121 * updated accordingly.
1122 */
1123 if (head_type == REF_ISSYMREF &&
1124 !(u->flags & REF_LOG_ONLY) &&
1125 !(u->flags & REF_UPDATE_VIA_HEAD) &&
1126 !strcmp(rewritten_ref, head_referent->buf)) {
1127 /*
1128 * First make sure that HEAD is not already in the
1129 * transaction. This check is O(lg N) in the transaction
1130 * size, but it happens at most once per transaction.
1131 */
1132 if (string_list_has_string(&transaction->refnames, "HEAD")) {
1133 /* An entry already existed */
1134 strbuf_addf(err,
1135 _("multiple updates for 'HEAD' (including one "
1136 "via its referent '%s') are not allowed"),
1137 u->refname);
1138 return REF_TRANSACTION_ERROR_NAME_CONFLICT;
1139 }
1140
1141 ref_transaction_add_update(
1142 transaction, "HEAD",
1143 u->flags | REF_LOG_ONLY | REF_NO_DEREF,
1144 &u->new_oid, &u->old_oid, NULL, NULL, NULL,
1145 u->msg);
1146 }
1147
1148 ret = reftable_backend_read_ref(be, rewritten_ref,
1149 &current_oid, referent, &u->type);
1150 if (ret < 0)
1151 return REF_TRANSACTION_ERROR_GENERIC;
1152 if (ret > 0 && !ref_update_expects_existing_old_ref(u)) {
1153 struct string_list_item *item;
1154 /*
1155 * The reference does not exist, and we either have no
1156 * old object ID or expect the reference to not exist.
1157 * We can thus skip below safety checks as well as the
1158 * symref splitting. But we do want to verify that
1159 * there is no conflicting reference here so that we
1160 * can output a proper error message instead of failing
1161 * at a later point.
1162 */
1163 item = string_list_append(refnames_to_check, u->refname);
1164 item->util = xmalloc(sizeof(update_idx));
1165 memcpy(item->util, &update_idx, sizeof(update_idx));
1166
1167 /*
1168 * There is no need to write the reference deletion
1169 * when the reference in question doesn't exist.
1170 */
1171 if ((u->flags & REF_HAVE_NEW) && !ref_update_has_null_new_value(u)) {
1172 ret = queue_transaction_update(refs, tx_data, u,
1173 &current_oid, err);
1174 if (ret)
1175 return REF_TRANSACTION_ERROR_GENERIC;
1176 }
1177
1178 return 0;
1179 }
1180 if (ret > 0) {
1181 /* The reference does not exist, but we expected it to. */
1182 strbuf_addf(err, _("cannot lock ref '%s': "
1183
1184
1185 "unable to resolve reference '%s'"),
1186 ref_update_original_update_refname(u), u->refname);
1187 return REF_TRANSACTION_ERROR_NONEXISTENT_REF;
1188 }
1189
1190 if (u->type & REF_ISSYMREF) {
1191 /*
1192 * The reftable stack is locked at this point already,
1193 * so it is safe to call `refs_resolve_ref_unsafe()`
1194 * here without causing races.
1195 */
1196 const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
1197 &current_oid, NULL);
1198
1199 if (u->flags & REF_NO_DEREF) {
1200 if (u->flags & REF_HAVE_OLD && !resolved) {
1201 strbuf_addf(err, _("cannot lock ref '%s': "
1202 "error reading reference"), u->refname);
1203 return REF_TRANSACTION_ERROR_GENERIC;
1204 }
1205 } else {
1206 struct ref_update *new_update;
1207 int new_flags;
1208
1209 new_flags = u->flags;
1210 if (!strcmp(rewritten_ref, "HEAD"))
1211 new_flags |= REF_UPDATE_VIA_HEAD;
1212
1213 if (string_list_has_string(&transaction->refnames, referent->buf)) {
1214 strbuf_addf(err,
1215 _("multiple updates for '%s' (including one "
1216 "via symref '%s') are not allowed"),
1217 referent->buf, u->refname);
1218 return REF_TRANSACTION_ERROR_NAME_CONFLICT;
1219 }
1220
1221 /*
1222 * If we are updating a symref (eg. HEAD), we should also
1223 * update the branch that the symref points to.
1224 *
1225 * This is generic functionality, and would be better
1226 * done in refs.c, but the current implementation is
1227 * intertwined with the locking in files-backend.c.
1228 */
1229 new_update = ref_transaction_add_update(
1230 transaction, referent->buf, new_flags,
1231 u->new_target ? NULL : &u->new_oid,
1232 u->old_target ? NULL : &u->old_oid,
1233 u->new_target, u->old_target,
1234 u->committer_info, u->msg);
1235
1236 new_update->parent_update = u;
1237
1238 /*
1239 * Change the symbolic ref update to log only. Also, it
1240 * doesn't need to check its old OID value, as that will be
1241 * done when new_update is processed.
1242 */
1243 u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
1244 u->flags &= ~REF_HAVE_OLD;
1245 }
1246 }
1247
1248 /*
1249 * Verify that the old object matches our expectations. Note
1250 * that the error messages here do not make a lot of sense in
1251 * the context of the reftable backend as we never lock
1252 * individual refs. But the error messages match what the files
1253 * backend returns, which keeps our tests happy.
1254 */
1255 if (u->old_target) {
1256 if (!(u->type & REF_ISSYMREF)) {
1257 strbuf_addf(err, _("cannot lock ref '%s': "
1258 "expected symref with target '%s': "
1259 "but is a regular ref"),
1260 ref_update_original_update_refname(u),
1261 u->old_target);
1262 return REF_TRANSACTION_ERROR_EXPECTED_SYMREF;
1263 }
1264
1265 ret = ref_update_check_old_target(referent->buf, u, err);
1266 if (ret)
1267 return ret;
1268 } else if ((u->flags & REF_HAVE_OLD) && !oideq(&current_oid, &u->old_oid)) {
1269 if (is_null_oid(&u->old_oid)) {
1270 strbuf_addf(err, _("cannot lock ref '%s': "
1271 "reference already exists"),
1272 ref_update_original_update_refname(u));
1273 return REF_TRANSACTION_ERROR_CREATE_EXISTS;
1274 } else if (is_null_oid(&current_oid)) {
1275 strbuf_addf(err, _("cannot lock ref '%s': "
1276 "reference is missing but expected %s"),
1277 ref_update_original_update_refname(u),
1278 oid_to_hex(&u->old_oid));
1279 return REF_TRANSACTION_ERROR_NONEXISTENT_REF;
1280 } else {
1281 strbuf_addf(err, _("cannot lock ref '%s': "
1282 "is at %s but expected %s"),
1283 ref_update_original_update_refname(u),
1284 oid_to_hex(&current_oid),
1285 oid_to_hex(&u->old_oid));
1286 return REF_TRANSACTION_ERROR_INCORRECT_OLD_VALUE;
1287 }
1288 }
1289
1290 /*
1291 * If all of the following conditions are true:
1292 *
1293 * - We're not about to write a symref.
1294 * - We're not about to write a log-only entry.
1295 * - Old and new object ID are different.
1296 *
1297 * Then we're essentially doing a no-op update that can be
1298 * skipped. This is not only for the sake of efficiency, but
1299 * also skips writing unneeded reflog entries.
1300 */
1301 if ((u->type & REF_ISSYMREF) ||
1302 (u->flags & REF_LOG_ONLY) ||
1303 (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid)))
1304 if (queue_transaction_update(refs, tx_data, u, &current_oid, err))
1305 return REF_TRANSACTION_ERROR_GENERIC;
1306
1307 return 0;
1308 }
1309
1310 static int reftable_be_transaction_prepare(struct ref_store *ref_store,
1311 struct ref_transaction *transaction,
1312 struct strbuf *err)
1313 {
1314 struct reftable_ref_store *refs =
1315 reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
1316 struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
1317 struct string_list refnames_to_check = STRING_LIST_INIT_NODUP;
1318 struct reftable_transaction_data *tx_data = NULL;
1319 struct reftable_backend *be;
1320 struct object_id head_oid;
1321 unsigned int head_type = 0;
1322 size_t i;
1323 int ret;
1324
1325 ret = refs->err;
1326 if (ret < 0)
1327 goto done;
1328
1329 tx_data = xcalloc(1, sizeof(*tx_data));
1330
1331 /*
1332 * Preprocess all updates. For one we check that there are no duplicate
1333 * reference updates in this transaction. Second, we lock all stacks
1334 * that will be modified during the transaction.
1335 */
1336 for (i = 0; i < transaction->nr; i++) {
1337 ret = prepare_transaction_update(NULL, refs, tx_data,
1338 transaction->updates[i], err);
1339 if (ret)
1340 goto done;
1341 }
1342
1343 /*
1344 * Now that we have counted updates per stack we can preallocate their
1345 * arrays. This avoids having to reallocate many times.
1346 */
1347 for (i = 0; i < tx_data->args_nr; i++) {
1348 CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
1349 tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
1350 }
1351
1352 /*
1353 * TODO: it's dubious whether we should reload the stack that "HEAD"
1354 * belongs to or not. In theory, it may happen that we only modify
1355 * stacks which are _not_ part of the "HEAD" stack. In that case we
1356 * wouldn't have prepared any transaction for its stack and would not
1357 * have reloaded it, which may mean that it is stale.
1358 *
1359 * On the other hand, reloading that stack without locking it feels
1360 * wrong, too, as the value of "HEAD" could be modified concurrently at
1361 * any point in time.
1362 */
1363 ret = backend_for(&be, refs, "HEAD", NULL, 0);
1364 if (ret)
1365 goto done;
1366
1367 ret = reftable_backend_read_ref(be, "HEAD", &head_oid,
1368 &head_referent, &head_type);
1369 if (ret < 0)
1370 goto done;
1371 ret = 0;
1372
1373 for (i = 0; i < transaction->nr; i++) {
1374 ret = prepare_single_update(refs, tx_data, transaction, be,
1375 transaction->updates[i], i,
1376 &refnames_to_check, head_type,
1377 &head_referent, &referent, err);
1378 if (ret) {
1379 if (ref_transaction_maybe_set_rejected(transaction, i, ret)) {
1380 strbuf_reset(err);
1381 ret = 0;
1382
1383 continue;
1384 }
1385 goto done;
1386 }
1387 }
1388
1389 ret = refs_verify_refnames_available(ref_store, &refnames_to_check,
1390 &transaction->refnames, NULL,
1391 transaction,
1392 transaction->flags & REF_TRANSACTION_FLAG_INITIAL,
1393 err);
1394 if (ret < 0)
1395 goto done;
1396
1397 transaction->backend_data = tx_data;
1398 transaction->state = REF_TRANSACTION_PREPARED;
1399
1400 done:
1401 if (ret < 0) {
1402 free_transaction_data(tx_data);
1403 transaction->state = REF_TRANSACTION_CLOSED;
1404 if (!err->len)
1405 strbuf_addf(err, _("reftable: transaction prepare: %s"),
1406 reftable_error_str(ret));
1407 }
1408 strbuf_release(&referent);
1409 strbuf_release(&head_referent);
1410 string_list_clear(&refnames_to_check, 1);
1411
1412 return ret;
1413 }
1414
1415 static int reftable_be_transaction_abort(struct ref_store *ref_store UNUSED,
1416 struct ref_transaction *transaction,
1417 struct strbuf *err UNUSED)
1418 {
1419 struct reftable_transaction_data *tx_data = transaction->backend_data;
1420 free_transaction_data(tx_data);
1421 transaction->state = REF_TRANSACTION_CLOSED;
1422 return 0;
1423 }
1424
1425 static int transaction_update_cmp(const void *a, const void *b)
1426 {
1427 struct reftable_transaction_update *update_a = (struct reftable_transaction_update *)a;
1428 struct reftable_transaction_update *update_b = (struct reftable_transaction_update *)b;
1429
1430 /*
1431 * If there is an index set, it should take preference (default is 0).
1432 * This ensures that updates with indexes are sorted amongst themselves.
1433 */
1434 if (update_a->update->index || update_b->update->index)
1435 return update_a->update->index - update_b->update->index;
1436
1437 return strcmp(update_a->update->refname, update_b->update->refname);
1438 }
1439
1440 static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
1441 {
1442 struct write_transaction_table_arg *arg = cb_data;
1443 uint64_t ts = reftable_stack_next_update_index(arg->be->stack);
1444 struct reftable_log_record *logs = NULL;
1445 struct ident_split committer_ident = {0};
1446 size_t logs_nr = 0, logs_alloc = 0, i;
1447 const char *committer_info;
1448 int ret = 0;
1449
1450 committer_info = git_committer_info(0);
1451 if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
1452 BUG("failed splitting committer info");
1453
1454 QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
1455
1456 /*
1457 * During reflog migration, we add indexes for a single reflog with
1458 * multiple entries. Each entry will contain a different update_index,
1459 * so set the limits accordingly.
1460 */
1461 ret = reftable_writer_set_limits(writer, ts, ts + arg->max_index);
1462 if (ret < 0)
1463 goto done;
1464
1465 for (i = 0; i < arg->updates_nr; i++) {
1466 struct reftable_transaction_update *tx_update = &arg->updates[i];
1467 struct ref_update *u = tx_update->update;
1468
1469 if (u->rejection_err)
1470 continue;
1471
1472 /*
1473 * Write a reflog entry when updating a ref to point to
1474 * something new in either of the following cases:
1475 *
1476 * - The reference is about to be deleted. We always want to
1477 * delete the reflog in that case.
1478 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1479 * the reflog entry.
1480 * - `core.logAllRefUpdates` tells us to create the reflog for
1481 * the given ref.
1482 */
1483 if ((u->flags & REF_HAVE_NEW) &&
1484 !(u->type & REF_ISSYMREF) &&
1485 ref_update_has_null_new_value(u)) {
1486 struct reftable_log_record log = {0};
1487 struct reftable_iterator it = {0};
1488
1489 ret = reftable_stack_init_log_iterator(arg->be->stack, &it);
1490 if (ret < 0)
1491 goto done;
1492
1493 /*
1494 * When deleting refs we also delete all reflog entries
1495 * with them. While it is not strictly required to
1496 * delete reflogs together with their refs, this
1497 * matches the behaviour of the files backend.
1498 *
1499 * Unfortunately, we have no better way than to delete
1500 * all reflog entries one by one.
1501 */
1502 ret = reftable_iterator_seek_log(&it, u->refname);
1503 while (ret == 0) {
1504 struct reftable_log_record *tombstone;
1505
1506 ret = reftable_iterator_next_log(&it, &log);
1507 if (ret < 0)
1508 break;
1509 if (ret > 0 || strcmp(log.refname, u->refname)) {
1510 ret = 0;
1511 break;
1512 }
1513
1514 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1515 tombstone = &logs[logs_nr++];
1516 tombstone->refname = xstrdup(u->refname);
1517 tombstone->value_type = REFTABLE_LOG_DELETION;
1518 tombstone->update_index = log.update_index;
1519 }
1520
1521 reftable_log_record_release(&log);
1522 reftable_iterator_destroy(&it);
1523
1524 if (ret)
1525 goto done;
1526 } else if (!(u->flags & REF_SKIP_CREATE_REFLOG) &&
1527 (u->flags & REF_HAVE_NEW) &&
1528 (u->flags & REF_FORCE_CREATE_REFLOG ||
1529 should_write_log(arg->refs, u->refname))) {
1530 struct reftable_log_record *log;
1531 int create_reflog = 1;
1532
1533 if (u->new_target) {
1534 if (!refs_resolve_ref_unsafe(&arg->refs->base, u->new_target,
1535 RESOLVE_REF_READING, &u->new_oid, NULL)) {
1536 /*
1537 * TODO: currently we skip creating reflogs for dangling
1538 * symref updates. It would be nice to capture this as
1539 * zero oid updates however.
1540 */
1541 create_reflog = 0;
1542 }
1543 }
1544
1545 if (create_reflog) {
1546 struct ident_split c;
1547
1548 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1549 log = &logs[logs_nr++];
1550 memset(log, 0, sizeof(*log));
1551
1552 if (u->committer_info) {
1553 if (split_ident_line(&c, u->committer_info,
1554 strlen(u->committer_info)))
1555 BUG("failed splitting committer info");
1556 } else {
1557 c = committer_ident;
1558 }
1559
1560 fill_reftable_log_record(log, &c);
1561
1562 /*
1563 * Updates are sorted by the writer. So updates for the same
1564 * refname need to contain different update indices.
1565 */
1566 log->update_index = ts + u->index;
1567
1568 log->refname = xstrdup(u->refname);
1569 memcpy(log->value.update.new_hash,
1570 u->new_oid.hash, GIT_MAX_RAWSZ);
1571 memcpy(log->value.update.old_hash,
1572 tx_update->current_oid.hash, GIT_MAX_RAWSZ);
1573 log->value.update.message =
1574 xstrndup(u->msg, arg->refs->write_options.block_size / 2);
1575 }
1576 }
1577
1578 if (u->flags & REF_LOG_ONLY)
1579 continue;
1580
1581 if (u->new_target) {
1582 struct reftable_ref_record ref = {
1583 .refname = (char *)u->refname,
1584 .value_type = REFTABLE_REF_SYMREF,
1585 .value.symref = (char *)u->new_target,
1586 .update_index = ts,
1587 };
1588
1589 ret = reftable_writer_add_ref(writer, &ref);
1590 if (ret < 0)
1591 goto done;
1592 } else if ((u->flags & REF_HAVE_NEW) && ref_update_has_null_new_value(u)) {
1593 struct reftable_ref_record ref = {
1594 .refname = (char *)u->refname,
1595 .update_index = ts,
1596 .value_type = REFTABLE_REF_DELETION,
1597 };
1598
1599 ret = reftable_writer_add_ref(writer, &ref);
1600 if (ret < 0)
1601 goto done;
1602 } else if (u->flags & REF_HAVE_NEW) {
1603 struct reftable_ref_record ref = {0};
1604 struct object_id peeled;
1605 int peel_error;
1606
1607 ref.refname = (char *)u->refname;
1608 ref.update_index = ts;
1609
1610 peel_error = peel_object(arg->refs->base.repo, &u->new_oid, &peeled);
1611 if (!peel_error) {
1612 ref.value_type = REFTABLE_REF_VAL2;
1613 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
1614 memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
1615 } else if (!is_null_oid(&u->new_oid)) {
1616 ref.value_type = REFTABLE_REF_VAL1;
1617 memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
1618 }
1619
1620 ret = reftable_writer_add_ref(writer, &ref);
1621 if (ret < 0)
1622 goto done;
1623 }
1624 }
1625
1626 /*
1627 * Logs are written at the end so that we do not have intermixed ref
1628 * and log blocks.
1629 */
1630 if (logs) {
1631 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1632 if (ret < 0)
1633 goto done;
1634 }
1635
1636 done:
1637 assert(ret != REFTABLE_API_ERROR);
1638 for (i = 0; i < logs_nr; i++)
1639 reftable_log_record_release(&logs[i]);
1640 free(logs);
1641 return ret;
1642 }
1643
1644 static int reftable_be_transaction_finish(struct ref_store *ref_store UNUSED,
1645 struct ref_transaction *transaction,
1646 struct strbuf *err)
1647 {
1648 struct reftable_transaction_data *tx_data = transaction->backend_data;
1649 int ret = 0;
1650
1651 for (size_t i = 0; i < tx_data->args_nr; i++) {
1652 tx_data->args[i].max_index = transaction->max_index;
1653
1654 ret = reftable_addition_add(tx_data->args[i].addition,
1655 write_transaction_table, &tx_data->args[i]);
1656 if (ret < 0)
1657 goto done;
1658
1659 ret = reftable_addition_commit(tx_data->args[i].addition);
1660 if (ret < 0)
1661 goto done;
1662 }
1663
1664 done:
1665 assert(ret != REFTABLE_API_ERROR);
1666 free_transaction_data(tx_data);
1667 transaction->state = REF_TRANSACTION_CLOSED;
1668
1669 if (ret) {
1670 strbuf_addf(err, _("reftable: transaction failure: %s"),
1671 reftable_error_str(ret));
1672 return -1;
1673 }
1674 return ret;
1675 }
1676
1677 static int reftable_be_pack_refs(struct ref_store *ref_store,
1678 struct pack_refs_opts *opts)
1679 {
1680 struct reftable_ref_store *refs =
1681 reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "pack_refs");
1682 struct reftable_stack *stack;
1683 int ret;
1684
1685 if (refs->err)
1686 return refs->err;
1687
1688 stack = refs->worktree_backend.stack;
1689 if (!stack)
1690 stack = refs->main_backend.stack;
1691
1692 if (opts->flags & PACK_REFS_AUTO)
1693 ret = reftable_stack_auto_compact(stack);
1694 else
1695 ret = reftable_stack_compact_all(stack, NULL);
1696 if (ret < 0) {
1697 ret = error(_("unable to compact stack: %s"),
1698 reftable_error_str(ret));
1699 goto out;
1700 }
1701
1702 ret = reftable_stack_clean(stack);
1703 if (ret)
1704 goto out;
1705
1706 out:
1707 return ret;
1708 }
1709
1710 struct write_create_symref_arg {
1711 struct reftable_ref_store *refs;
1712 struct reftable_stack *stack;
1713 struct strbuf *err;
1714 const char *refname;
1715 const char *target;
1716 const char *logmsg;
1717 };
1718
1719 struct write_copy_arg {
1720 struct reftable_ref_store *refs;
1721 struct reftable_backend *be;
1722 const char *oldname;
1723 const char *newname;
1724 const char *logmsg;
1725 int delete_old;
1726 };
1727
1728 static int write_copy_table(struct reftable_writer *writer, void *cb_data)
1729 {
1730 struct write_copy_arg *arg = cb_data;
1731 uint64_t deletion_ts, creation_ts;
1732 struct reftable_ref_record old_ref = {0}, refs[2] = {0};
1733 struct reftable_log_record old_log = {0}, *logs = NULL;
1734 struct reftable_iterator it = {0};
1735 struct string_list skip = STRING_LIST_INIT_NODUP;
1736 struct ident_split committer_ident = {0};
1737 struct strbuf errbuf = STRBUF_INIT;
1738 size_t logs_nr = 0, logs_alloc = 0, i;
1739 const char *committer_info;
1740 int ret;
1741
1742 committer_info = git_committer_info(0);
1743 if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
1744 BUG("failed splitting committer info");
1745
1746 if (reftable_stack_read_ref(arg->be->stack, arg->oldname, &old_ref)) {
1747 ret = error(_("refname %s not found"), arg->oldname);
1748 goto done;
1749 }
1750 if (old_ref.value_type == REFTABLE_REF_SYMREF) {
1751 ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
1752 arg->oldname);
1753 goto done;
1754 }
1755
1756 /*
1757 * There's nothing to do in case the old and new name are the same, so
1758 * we exit early in that case.
1759 */
1760 if (!strcmp(arg->oldname, arg->newname)) {
1761 ret = 0;
1762 goto done;
1763 }
1764
1765 /*
1766 * Verify that the new refname is available.
1767 */
1768 if (arg->delete_old)
1769 string_list_insert(&skip, arg->oldname);
1770 ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
1771 NULL, &skip, 0, &errbuf);
1772 if (ret < 0) {
1773 error("%s", errbuf.buf);
1774 goto done;
1775 }
1776
1777 /*
1778 * When deleting the old reference we have to use two update indices:
1779 * once to delete the old ref and its reflog, and once to create the
1780 * new ref and its reflog. They need to be staged with two separate
1781 * indices because the new reflog needs to encode both the deletion of
1782 * the old branch and the creation of the new branch, and we cannot do
1783 * two changes to a reflog in a single update.
1784 */
1785 deletion_ts = creation_ts = reftable_stack_next_update_index(arg->be->stack);
1786 if (arg->delete_old)
1787 creation_ts++;
1788 ret = reftable_writer_set_limits(writer, deletion_ts, creation_ts);
1789 if (ret < 0)
1790 goto done;
1791
1792 /*
1793 * Add the new reference. If this is a rename then we also delete the
1794 * old reference.
1795 */
1796 refs[0] = old_ref;
1797 refs[0].refname = xstrdup(arg->newname);
1798 refs[0].update_index = creation_ts;
1799 if (arg->delete_old) {
1800 refs[1].refname = xstrdup(arg->oldname);
1801 refs[1].value_type = REFTABLE_REF_DELETION;
1802 refs[1].update_index = deletion_ts;
1803 }
1804 ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
1805 if (ret < 0)
1806 goto done;
1807
1808 /*
1809 * When deleting the old branch we need to create a reflog entry on the
1810 * new branch name that indicates that the old branch has been deleted
1811 * and then recreated. This is a tad weird, but matches what the files
1812 * backend does.
1813 */
1814 if (arg->delete_old) {
1815 struct strbuf head_referent = STRBUF_INIT;
1816 struct object_id head_oid;
1817 int append_head_reflog;
1818 unsigned head_type = 0;
1819
1820 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1821 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1822 fill_reftable_log_record(&logs[logs_nr], &committer_ident);
1823 logs[logs_nr].refname = xstrdup(arg->newname);
1824 logs[logs_nr].update_index = deletion_ts;
1825 logs[logs_nr].value.update.message =
1826 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1827 memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1828 logs_nr++;
1829
1830 ret = reftable_backend_read_ref(arg->be, "HEAD", &head_oid,
1831 &head_referent, &head_type);
1832 if (ret < 0)
1833 goto done;
1834 append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
1835 strbuf_release(&head_referent);
1836
1837 /*
1838 * The files backend uses `refs_delete_ref()` to delete the old
1839 * branch name, which will append a reflog entry for HEAD in
1840 * case it points to the old branch.
1841 */
1842 if (append_head_reflog) {
1843 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1844 logs[logs_nr] = logs[logs_nr - 1];
1845 logs[logs_nr].refname = xstrdup("HEAD");
1846 logs[logs_nr].value.update.name =
1847 xstrdup(logs[logs_nr].value.update.name);
1848 logs[logs_nr].value.update.email =
1849 xstrdup(logs[logs_nr].value.update.email);
1850 logs[logs_nr].value.update.message =
1851 xstrdup(logs[logs_nr].value.update.message);
1852 logs_nr++;
1853 }
1854 }
1855
1856 /*
1857 * Create the reflog entry for the newly created branch.
1858 */
1859 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1860 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1861 fill_reftable_log_record(&logs[logs_nr], &committer_ident);
1862 logs[logs_nr].refname = xstrdup(arg->newname);
1863 logs[logs_nr].update_index = creation_ts;
1864 logs[logs_nr].value.update.message =
1865 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1866 memcpy(logs[logs_nr].value.update.new_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1867 logs_nr++;
1868
1869 /*
1870 * In addition to writing the reflog entry for the new branch, we also
1871 * copy over all log entries from the old reflog. Last but not least,
1872 * when renaming we also have to delete all the old reflog entries.
1873 */
1874 ret = reftable_stack_init_log_iterator(arg->be->stack, &it);
1875 if (ret < 0)
1876 goto done;
1877
1878 ret = reftable_iterator_seek_log(&it, arg->oldname);
1879 if (ret < 0)
1880 goto done;
1881
1882 while (1) {
1883 ret = reftable_iterator_next_log(&it, &old_log);
1884 if (ret < 0)
1885 goto done;
1886 if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
1887 ret = 0;
1888 break;
1889 }
1890
1891 free(old_log.refname);
1892
1893 /*
1894 * Copy over the old reflog entry with the new refname.
1895 */
1896 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1897 logs[logs_nr] = old_log;
1898 logs[logs_nr].refname = xstrdup(arg->newname);
1899 logs_nr++;
1900
1901 /*
1902 * Delete the old reflog entry in case we are renaming.
1903 */
1904 if (arg->delete_old) {
1905 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1906 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1907 logs[logs_nr].refname = xstrdup(arg->oldname);
1908 logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
1909 logs[logs_nr].update_index = old_log.update_index;
1910 logs_nr++;
1911 }
1912
1913 /*
1914 * Transfer ownership of the log record we're iterating over to
1915 * the array of log records. Otherwise, the pointers would get
1916 * free'd or reallocated by the iterator.
1917 */
1918 memset(&old_log, 0, sizeof(old_log));
1919 }
1920
1921 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1922 if (ret < 0)
1923 goto done;
1924
1925 done:
1926 assert(ret != REFTABLE_API_ERROR);
1927 reftable_iterator_destroy(&it);
1928 string_list_clear(&skip, 0);
1929 strbuf_release(&errbuf);
1930 for (i = 0; i < logs_nr; i++)
1931 reftable_log_record_release(&logs[i]);
1932 free(logs);
1933 for (i = 0; i < ARRAY_SIZE(refs); i++)
1934 reftable_ref_record_release(&refs[i]);
1935 reftable_ref_record_release(&old_ref);
1936 reftable_log_record_release(&old_log);
1937 return ret;
1938 }
1939
1940 static int reftable_be_rename_ref(struct ref_store *ref_store,
1941 const char *oldrefname,
1942 const char *newrefname,
1943 const char *logmsg)
1944 {
1945 struct reftable_ref_store *refs =
1946 reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1947 struct write_copy_arg arg = {
1948 .refs = refs,
1949 .oldname = oldrefname,
1950 .newname = newrefname,
1951 .logmsg = logmsg,
1952 .delete_old = 1,
1953 };
1954 int ret;
1955
1956 ret = refs->err;
1957 if (ret < 0)
1958 goto done;
1959
1960 ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
1961 if (ret)
1962 goto done;
1963 ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg);
1964
1965 done:
1966 assert(ret != REFTABLE_API_ERROR);
1967 return ret;
1968 }
1969
1970 static int reftable_be_copy_ref(struct ref_store *ref_store,
1971 const char *oldrefname,
1972 const char *newrefname,
1973 const char *logmsg)
1974 {
1975 struct reftable_ref_store *refs =
1976 reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
1977 struct write_copy_arg arg = {
1978 .refs = refs,
1979 .oldname = oldrefname,
1980 .newname = newrefname,
1981 .logmsg = logmsg,
1982 };
1983 int ret;
1984
1985 ret = refs->err;
1986 if (ret < 0)
1987 goto done;
1988
1989 ret = backend_for(&arg.be, refs, newrefname, &newrefname, 1);
1990 if (ret)
1991 goto done;
1992 ret = reftable_stack_add(arg.be->stack, &write_copy_table, &arg);
1993
1994 done:
1995 assert(ret != REFTABLE_API_ERROR);
1996 return ret;
1997 }
1998
1999 struct reftable_reflog_iterator {
2000 struct ref_iterator base;
2001 struct reftable_ref_store *refs;
2002 struct reftable_iterator iter;
2003 struct reftable_log_record log;
2004 struct strbuf last_name;
2005 int err;
2006 };
2007
2008 static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
2009 {
2010 struct reftable_reflog_iterator *iter =
2011 (struct reftable_reflog_iterator *)ref_iterator;
2012
2013 while (!iter->err) {
2014 iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
2015 if (iter->err)
2016 break;
2017
2018 /*
2019 * We want the refnames that we have reflogs for, so we skip if
2020 * we've already produced this name. This could be faster by
2021 * seeking directly to reflog@update_index==0.
2022 */
2023 if (!strcmp(iter->log.refname, iter->last_name.buf))
2024 continue;
2025
2026 if (check_refname_format(iter->log.refname,
2027 REFNAME_ALLOW_ONELEVEL))
2028 continue;
2029
2030 strbuf_reset(&iter->last_name);
2031 strbuf_addstr(&iter->last_name, iter->log.refname);
2032 iter->base.refname = iter->log.refname;
2033
2034 break;
2035 }
2036
2037 if (iter->err > 0)
2038 return ITER_DONE;
2039 if (iter->err < 0)
2040 return ITER_ERROR;
2041 return ITER_OK;
2042 }
2043
2044 static int reftable_reflog_iterator_seek(struct ref_iterator *ref_iterator UNUSED,
2045 const char *prefix UNUSED)
2046 {
2047 BUG("reftable reflog iterator cannot be seeked");
2048 return -1;
2049 }
2050
2051 static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator UNUSED,
2052 struct object_id *peeled UNUSED)
2053 {
2054 BUG("reftable reflog iterator cannot be peeled");
2055 return -1;
2056 }
2057
2058 static void reftable_reflog_iterator_release(struct ref_iterator *ref_iterator)
2059 {
2060 struct reftable_reflog_iterator *iter =
2061 (struct reftable_reflog_iterator *)ref_iterator;
2062 reftable_log_record_release(&iter->log);
2063 reftable_iterator_destroy(&iter->iter);
2064 strbuf_release(&iter->last_name);
2065 }
2066
2067 static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
2068 .advance = reftable_reflog_iterator_advance,
2069 .seek = reftable_reflog_iterator_seek,
2070 .peel = reftable_reflog_iterator_peel,
2071 .release = reftable_reflog_iterator_release,
2072 };
2073
2074 static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
2075 struct reftable_stack *stack)
2076 {
2077 struct reftable_reflog_iterator *iter;
2078 int ret;
2079
2080 iter = xcalloc(1, sizeof(*iter));
2081 base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable);
2082 strbuf_init(&iter->last_name, 0);
2083 iter->refs = refs;
2084
2085 ret = refs->err;
2086 if (ret)
2087 goto done;
2088
2089 ret = reftable_stack_reload(stack);
2090 if (ret < 0)
2091 goto done;
2092
2093 ret = reftable_stack_init_log_iterator(stack, &iter->iter);
2094 if (ret < 0)
2095 goto done;
2096
2097 ret = reftable_iterator_seek_log(&iter->iter, "");
2098 if (ret < 0)
2099 goto done;
2100
2101 done:
2102 iter->err = ret;
2103 return iter;
2104 }
2105
2106 static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
2107 {
2108 struct reftable_ref_store *refs =
2109 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
2110 struct reftable_reflog_iterator *main_iter, *worktree_iter;
2111
2112 main_iter = reflog_iterator_for_stack(refs, refs->main_backend.stack);
2113 if (!refs->worktree_backend.stack)
2114 return &main_iter->base;
2115
2116 worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_backend.stack);
2117
2118 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
2119 ref_iterator_select, NULL);
2120 }
2121
2122 static int yield_log_record(struct reftable_ref_store *refs,
2123 struct reftable_log_record *log,
2124 each_reflog_ent_fn fn,
2125 void *cb_data)
2126 {
2127 struct object_id old_oid, new_oid;
2128 const char *full_committer;
2129
2130 oidread(&old_oid, log->value.update.old_hash, refs->base.repo->hash_algo);
2131 oidread(&new_oid, log->value.update.new_hash, refs->base.repo->hash_algo);
2132
2133 /*
2134 * When both the old object ID and the new object ID are null
2135 * then this is the reflog existence marker. The caller must
2136 * not be aware of it.
2137 */
2138 if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
2139 return 0;
2140
2141 full_committer = fmt_ident(log->value.update.name, log->value.update.email,
2142 WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
2143 return fn(&old_oid, &new_oid, full_committer,
2144 log->value.update.time, log->value.update.tz_offset,
2145 log->value.update.message, cb_data);
2146 }
2147
2148 static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
2149 const char *refname,
2150 each_reflog_ent_fn fn,
2151 void *cb_data)
2152 {
2153 struct reftable_ref_store *refs =
2154 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
2155 struct reftable_log_record log = {0};
2156 struct reftable_iterator it = {0};
2157 struct reftable_backend *be;
2158 int ret;
2159
2160 if (refs->err < 0)
2161 return refs->err;
2162
2163 /*
2164 * TODO: we should adapt this callsite to reload the stack. There is no
2165 * obvious reason why we shouldn't.
2166 */
2167 ret = backend_for(&be, refs, refname, &refname, 0);
2168 if (ret)
2169 goto done;
2170
2171 ret = reftable_stack_init_log_iterator(be->stack, &it);
2172 if (ret < 0)
2173 goto done;
2174
2175 ret = reftable_iterator_seek_log(&it, refname);
2176 while (!ret) {
2177 ret = reftable_iterator_next_log(&it, &log);
2178 if (ret < 0)
2179 break;
2180 if (ret > 0 || strcmp(log.refname, refname)) {
2181 ret = 0;
2182 break;
2183 }
2184
2185 ret = yield_log_record(refs, &log, fn, cb_data);
2186 if (ret)
2187 break;
2188 }
2189
2190 done:
2191 reftable_log_record_release(&log);
2192 reftable_iterator_destroy(&it);
2193 return ret;
2194 }
2195
2196 static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
2197 const char *refname,
2198 each_reflog_ent_fn fn,
2199 void *cb_data)
2200 {
2201 struct reftable_ref_store *refs =
2202 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
2203 struct reftable_log_record *logs = NULL;
2204 struct reftable_iterator it = {0};
2205 struct reftable_backend *be;
2206 size_t logs_alloc = 0, logs_nr = 0, i;
2207 int ret;
2208
2209 if (refs->err < 0)
2210 return refs->err;
2211
2212 /*
2213 * TODO: we should adapt this callsite to reload the stack. There is no
2214 * obvious reason why we shouldn't.
2215 */
2216 ret = backend_for(&be, refs, refname, &refname, 0);
2217 if (ret)
2218 goto done;
2219
2220 ret = reftable_stack_init_log_iterator(be->stack, &it);
2221 if (ret < 0)
2222 goto done;
2223
2224 ret = reftable_iterator_seek_log(&it, refname);
2225 while (!ret) {
2226 struct reftable_log_record log = {0};
2227
2228 ret = reftable_iterator_next_log(&it, &log);
2229 if (ret < 0)
2230 goto done;
2231 if (ret > 0 || strcmp(log.refname, refname)) {
2232 reftable_log_record_release(&log);
2233 ret = 0;
2234 break;
2235 }
2236
2237 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2238 logs[logs_nr++] = log;
2239 }
2240
2241 for (i = logs_nr; i--;) {
2242 ret = yield_log_record(refs, &logs[i], fn, cb_data);
2243 if (ret)
2244 goto done;
2245 }
2246
2247 done:
2248 reftable_iterator_destroy(&it);
2249 for (i = 0; i < logs_nr; i++)
2250 reftable_log_record_release(&logs[i]);
2251 free(logs);
2252 return ret;
2253 }
2254
2255 static int reftable_be_reflog_exists(struct ref_store *ref_store,
2256 const char *refname)
2257 {
2258 struct reftable_ref_store *refs =
2259 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
2260 struct reftable_log_record log = {0};
2261 struct reftable_iterator it = {0};
2262 struct reftable_backend *be;
2263 int ret;
2264
2265 ret = refs->err;
2266 if (ret < 0)
2267 goto done;
2268
2269 ret = backend_for(&be, refs, refname, &refname, 1);
2270 if (ret < 0)
2271 goto done;
2272
2273 ret = reftable_stack_init_log_iterator(be->stack, &it);
2274 if (ret < 0)
2275 goto done;
2276
2277 ret = reftable_iterator_seek_log(&it, refname);
2278 if (ret < 0)
2279 goto done;
2280
2281 /*
2282 * Check whether we get at least one log record for the given ref name.
2283 * If so, the reflog exists, otherwise it doesn't.
2284 */
2285 ret = reftable_iterator_next_log(&it, &log);
2286 if (ret < 0)
2287 goto done;
2288 if (ret > 0) {
2289 ret = 0;
2290 goto done;
2291 }
2292
2293 ret = strcmp(log.refname, refname) == 0;
2294
2295 done:
2296 reftable_iterator_destroy(&it);
2297 reftable_log_record_release(&log);
2298 if (ret < 0)
2299 ret = 0;
2300 return ret;
2301 }
2302
2303 struct write_reflog_existence_arg {
2304 struct reftable_ref_store *refs;
2305 const char *refname;
2306 struct reftable_stack *stack;
2307 };
2308
2309 static int write_reflog_existence_table(struct reftable_writer *writer,
2310 void *cb_data)
2311 {
2312 struct write_reflog_existence_arg *arg = cb_data;
2313 uint64_t ts = reftable_stack_next_update_index(arg->stack);
2314 struct reftable_log_record log = {0};
2315 int ret;
2316
2317 ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
2318 if (ret <= 0)
2319 goto done;
2320
2321 ret = reftable_writer_set_limits(writer, ts, ts);
2322 if (ret < 0)
2323 goto done;
2324
2325 /*
2326 * The existence entry has both old and new object ID set to the
2327 * null object ID. Our iterators are aware of this and will not present
2328 * them to their callers.
2329 */
2330 log.refname = xstrdup(arg->refname);
2331 log.update_index = ts;
2332 log.value_type = REFTABLE_LOG_UPDATE;
2333 ret = reftable_writer_add_log(writer, &log);
2334
2335 done:
2336 assert(ret != REFTABLE_API_ERROR);
2337 reftable_log_record_release(&log);
2338 return ret;
2339 }
2340
2341 static int reftable_be_create_reflog(struct ref_store *ref_store,
2342 const char *refname,
2343 struct strbuf *errmsg UNUSED)
2344 {
2345 struct reftable_ref_store *refs =
2346 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
2347 struct reftable_backend *be;
2348 struct write_reflog_existence_arg arg = {
2349 .refs = refs,
2350 .refname = refname,
2351 };
2352 int ret;
2353
2354 ret = refs->err;
2355 if (ret < 0)
2356 goto done;
2357
2358 ret = backend_for(&be, refs, refname, &refname, 1);
2359 if (ret)
2360 goto done;
2361 arg.stack = be->stack;
2362
2363 ret = reftable_stack_add(be->stack, &write_reflog_existence_table, &arg);
2364
2365 done:
2366 return ret;
2367 }
2368
2369 struct write_reflog_delete_arg {
2370 struct reftable_stack *stack;
2371 const char *refname;
2372 };
2373
2374 static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
2375 {
2376 struct write_reflog_delete_arg *arg = cb_data;
2377 struct reftable_log_record log = {0}, tombstone = {0};
2378 struct reftable_iterator it = {0};
2379 uint64_t ts = reftable_stack_next_update_index(arg->stack);
2380 int ret;
2381
2382 ret = reftable_writer_set_limits(writer, ts, ts);
2383 if (ret < 0)
2384 goto out;
2385
2386 ret = reftable_stack_init_log_iterator(arg->stack, &it);
2387 if (ret < 0)
2388 goto out;
2389
2390 /*
2391 * In order to delete a table we need to delete all reflog entries one
2392 * by one. This is inefficient, but the reftable format does not have a
2393 * better marker right now.
2394 */
2395 ret = reftable_iterator_seek_log(&it, arg->refname);
2396 while (ret == 0) {
2397 ret = reftable_iterator_next_log(&it, &log);
2398 if (ret < 0)
2399 break;
2400 if (ret > 0 || strcmp(log.refname, arg->refname)) {
2401 ret = 0;
2402 break;
2403 }
2404
2405 tombstone.refname = (char *)arg->refname;
2406 tombstone.value_type = REFTABLE_LOG_DELETION;
2407 tombstone.update_index = log.update_index;
2408
2409 ret = reftable_writer_add_log(writer, &tombstone);
2410 }
2411
2412 out:
2413 reftable_log_record_release(&log);
2414 reftable_iterator_destroy(&it);
2415 return ret;
2416 }
2417
2418 static int reftable_be_delete_reflog(struct ref_store *ref_store,
2419 const char *refname)
2420 {
2421 struct reftable_ref_store *refs =
2422 reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
2423 struct reftable_backend *be;
2424 struct write_reflog_delete_arg arg = {
2425 .refname = refname,
2426 };
2427 int ret;
2428
2429 ret = backend_for(&be, refs, refname, &refname, 1);
2430 if (ret)
2431 return ret;
2432 arg.stack = be->stack;
2433
2434 ret = reftable_stack_add(be->stack, &write_reflog_delete_table, &arg);
2435
2436 assert(ret != REFTABLE_API_ERROR);
2437 return ret;
2438 }
2439
2440 struct reflog_expiry_arg {
2441 struct reftable_ref_store *refs;
2442 struct reftable_stack *stack;
2443 struct reftable_log_record *records;
2444 struct object_id update_oid;
2445 const char *refname;
2446 size_t len;
2447 };
2448
2449 static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
2450 {
2451 struct reflog_expiry_arg *arg = cb_data;
2452 uint64_t ts = reftable_stack_next_update_index(arg->stack);
2453 uint64_t live_records = 0;
2454 size_t i;
2455 int ret;
2456
2457 for (i = 0; i < arg->len; i++)
2458 if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
2459 live_records++;
2460
2461 ret = reftable_writer_set_limits(writer, ts, ts);
2462 if (ret < 0)
2463 return ret;
2464
2465 if (!is_null_oid(&arg->update_oid)) {
2466 struct reftable_ref_record ref = {0};
2467 struct object_id peeled;
2468
2469 ref.refname = (char *)arg->refname;
2470 ref.update_index = ts;
2471
2472 if (!peel_object(arg->refs->base.repo, &arg->update_oid, &peeled)) {
2473 ref.value_type = REFTABLE_REF_VAL2;
2474 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
2475 memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
2476 } else {
2477 ref.value_type = REFTABLE_REF_VAL1;
2478 memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
2479 }
2480
2481 ret = reftable_writer_add_ref(writer, &ref);
2482 if (ret < 0)
2483 return ret;
2484 }
2485
2486 /*
2487 * When there are no more entries left in the reflog we empty it
2488 * completely, but write a placeholder reflog entry that indicates that
2489 * the reflog still exists.
2490 */
2491 if (!live_records) {
2492 struct reftable_log_record log = {
2493 .refname = (char *)arg->refname,
2494 .value_type = REFTABLE_LOG_UPDATE,
2495 .update_index = ts,
2496 };
2497
2498 ret = reftable_writer_add_log(writer, &log);
2499 if (ret)
2500 return ret;
2501 }
2502
2503 for (i = 0; i < arg->len; i++) {
2504 ret = reftable_writer_add_log(writer, &arg->records[i]);
2505 if (ret)
2506 return ret;
2507 }
2508
2509 return 0;
2510 }
2511
2512 static int reftable_be_reflog_expire(struct ref_store *ref_store,
2513 const char *refname,
2514 unsigned int flags,
2515 reflog_expiry_prepare_fn prepare_fn,
2516 reflog_expiry_should_prune_fn should_prune_fn,
2517 reflog_expiry_cleanup_fn cleanup_fn,
2518 void *policy_cb_data)
2519 {
2520 /*
2521 * For log expiry, we write tombstones for every single reflog entry
2522 * that is to be expired. This means that the entries are still
2523 * retrievable by delving into the stack, and expiring entries
2524 * paradoxically takes extra memory. This memory is only reclaimed when
2525 * compacting the reftable stack.
2526 *
2527 * It would be better if the refs backend supported an API that sets a
2528 * criterion for all refs, passing the criterion to pack_refs().
2529 *
2530 * On the plus side, because we do the expiration per ref, we can easily
2531 * insert the reflog existence dummies.
2532 */
2533 struct reftable_ref_store *refs =
2534 reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2535 struct reftable_log_record *logs = NULL;
2536 struct reftable_log_record *rewritten = NULL;
2537 struct reftable_iterator it = {0};
2538 struct reftable_addition *add = NULL;
2539 struct reflog_expiry_arg arg = {0};
2540 struct reftable_backend *be;
2541 struct object_id oid = {0};
2542 struct strbuf referent = STRBUF_INIT;
2543 uint8_t *last_hash = NULL;
2544 size_t logs_nr = 0, logs_alloc = 0, i;
2545 unsigned int type = 0;
2546 int ret;
2547
2548 if (refs->err < 0)
2549 return refs->err;
2550
2551 ret = backend_for(&be, refs, refname, &refname, 1);
2552 if (ret < 0)
2553 goto done;
2554
2555 ret = reftable_stack_init_log_iterator(be->stack, &it);
2556 if (ret < 0)
2557 goto done;
2558
2559 ret = reftable_iterator_seek_log(&it, refname);
2560 if (ret < 0)
2561 goto done;
2562
2563 ret = reftable_stack_new_addition(&add, be->stack, 0);
2564 if (ret < 0)
2565 goto done;
2566
2567 ret = reftable_backend_read_ref(be, refname, &oid, &referent, &type);
2568 if (ret < 0)
2569 goto done;
2570 prepare_fn(refname, &oid, policy_cb_data);
2571
2572 while (1) {
2573 struct reftable_log_record log = {0};
2574 struct object_id old_oid, new_oid;
2575
2576 ret = reftable_iterator_next_log(&it, &log);
2577 if (ret < 0)
2578 goto done;
2579 if (ret > 0 || strcmp(log.refname, refname)) {
2580 reftable_log_record_release(&log);
2581 break;
2582 }
2583
2584 oidread(&old_oid, log.value.update.old_hash,
2585 ref_store->repo->hash_algo);
2586 oidread(&new_oid, log.value.update.new_hash,
2587 ref_store->repo->hash_algo);
2588
2589 /*
2590 * Skip over the reflog existence marker. We will add it back
2591 * in when there are no live reflog records.
2592 */
2593 if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
2594 reftable_log_record_release(&log);
2595 continue;
2596 }
2597
2598 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2599 logs[logs_nr++] = log;
2600 }
2601
2602 /*
2603 * We need to rewrite all reflog entries according to the pruning
2604 * callback function:
2605 *
2606 * - If a reflog entry shall be pruned we mark the record for
2607 * deletion.
2608 *
2609 * - Otherwise we may have to rewrite the chain of reflog entries so
2610 * that gaps created by just-deleted records get backfilled.
2611 */
2612 CALLOC_ARRAY(rewritten, logs_nr);
2613 for (i = logs_nr; i--;) {
2614 struct reftable_log_record *dest = &rewritten[i];
2615 struct object_id old_oid, new_oid;
2616
2617 *dest = logs[i];
2618 oidread(&old_oid, logs[i].value.update.old_hash,
2619 ref_store->repo->hash_algo);
2620 oidread(&new_oid, logs[i].value.update.new_hash,
2621 ref_store->repo->hash_algo);
2622
2623 if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
2624 (timestamp_t)logs[i].value.update.time,
2625 logs[i].value.update.tz_offset,
2626 logs[i].value.update.message,
2627 policy_cb_data)) {
2628 dest->value_type = REFTABLE_LOG_DELETION;
2629 } else {
2630 if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
2631 memcpy(dest->value.update.old_hash, last_hash, GIT_MAX_RAWSZ);
2632 last_hash = logs[i].value.update.new_hash;
2633 }
2634 }
2635
2636 if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash && !is_null_oid(&oid))
2637 oidread(&arg.update_oid, last_hash, ref_store->repo->hash_algo);
2638
2639 arg.refs = refs;
2640 arg.records = rewritten;
2641 arg.len = logs_nr;
2642 arg.stack = be->stack;
2643 arg.refname = refname;
2644
2645 ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
2646 if (ret < 0)
2647 goto done;
2648
2649 /*
2650 * Future improvement: we could skip writing records that were
2651 * not changed.
2652 */
2653 if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
2654 ret = reftable_addition_commit(add);
2655
2656 done:
2657 if (add)
2658 cleanup_fn(policy_cb_data);
2659 assert(ret != REFTABLE_API_ERROR);
2660
2661 reftable_iterator_destroy(&it);
2662 reftable_addition_destroy(add);
2663 for (i = 0; i < logs_nr; i++)
2664 reftable_log_record_release(&logs[i]);
2665 strbuf_release(&referent);
2666 free(logs);
2667 free(rewritten);
2668 return ret;
2669 }
2670
2671 static int reftable_be_fsck(struct ref_store *ref_store UNUSED,
2672 struct fsck_options *o UNUSED,
2673 struct worktree *wt UNUSED)
2674 {
2675 return 0;
2676 }
2677
2678 struct ref_storage_be refs_be_reftable = {
2679 .name = "reftable",
2680 .init = reftable_be_init,
2681 .release = reftable_be_release,
2682 .create_on_disk = reftable_be_create_on_disk,
2683 .remove_on_disk = reftable_be_remove_on_disk,
2684
2685 .transaction_prepare = reftable_be_transaction_prepare,
2686 .transaction_finish = reftable_be_transaction_finish,
2687 .transaction_abort = reftable_be_transaction_abort,
2688
2689 .pack_refs = reftable_be_pack_refs,
2690 .rename_ref = reftable_be_rename_ref,
2691 .copy_ref = reftable_be_copy_ref,
2692
2693 .iterator_begin = reftable_be_iterator_begin,
2694 .read_raw_ref = reftable_be_read_raw_ref,
2695 .read_symbolic_ref = reftable_be_read_symbolic_ref,
2696
2697 .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
2698 .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
2699 .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
2700 .reflog_exists = reftable_be_reflog_exists,
2701 .create_reflog = reftable_be_create_reflog,
2702 .delete_reflog = reftable_be_delete_reflog,
2703 .reflog_expire = reftable_be_reflog_expire,
2704
2705 .fsck = reftable_be_fsck,
2706 };