]> git.ipfire.org Git - thirdparty/git.git/blob - refs/reftable-backend.c
The twelfth batch
[thirdparty/git.git] / refs / reftable-backend.c
1 #include "../git-compat-util.h"
2 #include "../abspath.h"
3 #include "../chdir-notify.h"
4 #include "../config.h"
5 #include "../environment.h"
6 #include "../gettext.h"
7 #include "../hash.h"
8 #include "../hex.h"
9 #include "../iterator.h"
10 #include "../ident.h"
11 #include "../lockfile.h"
12 #include "../object.h"
13 #include "../path.h"
14 #include "../refs.h"
15 #include "../reftable/reftable-stack.h"
16 #include "../reftable/reftable-record.h"
17 #include "../reftable/reftable-error.h"
18 #include "../reftable/reftable-iterator.h"
19 #include "../setup.h"
20 #include "../strmap.h"
21 #include "parse.h"
22 #include "refs-internal.h"
23
24 /*
25 * Used as a flag in ref_update::flags when the ref_update was via an
26 * update to HEAD.
27 */
28 #define REF_UPDATE_VIA_HEAD (1 << 8)
29
30 struct reftable_ref_store {
31 struct ref_store base;
32
33 /*
34 * The main stack refers to the common dir and thus contains common
35 * refs as well as refs of the main repository.
36 */
37 struct reftable_stack *main_stack;
38 /*
39 * The worktree stack refers to the gitdir in case the refdb is opened
40 * via a worktree. It thus contains the per-worktree refs.
41 */
42 struct reftable_stack *worktree_stack;
43 /*
44 * Map of worktree stacks by their respective worktree names. The map
45 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
46 */
47 struct strmap worktree_stacks;
48 struct reftable_write_options write_options;
49
50 unsigned int store_flags;
51 int err;
52 };
53
54 /*
55 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
56 * reftable_ref_store. required_flags is compared with ref_store's store_flags
57 * to ensure the ref_store has all required capabilities. "caller" is used in
58 * any necessary error messages.
59 */
60 static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
61 unsigned int required_flags,
62 const char *caller)
63 {
64 struct reftable_ref_store *refs;
65
66 if (ref_store->be != &refs_be_reftable)
67 BUG("ref_store is type \"%s\" not \"reftables\" in %s",
68 ref_store->be->name, caller);
69
70 refs = (struct reftable_ref_store *)ref_store;
71
72 if ((refs->store_flags & required_flags) != required_flags)
73 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
74 caller, required_flags, refs->store_flags);
75
76 return refs;
77 }
78
79 /*
80 * Some refs are global to the repository (refs/heads/{*}), while others are
81 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
82 * multiple separate databases (ie. multiple reftable/ directories), one for
83 * the shared refs, one for the current worktree refs, and one for each
84 * additional worktree. For reading, we merge the view of both the shared and
85 * the current worktree's refs, when necessary.
86 *
87 * This function also optionally assigns the rewritten reference name that is
88 * local to the stack. This translation is required when using worktree refs
89 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
90 * those references in their normalized form.
91 */
92 static struct reftable_stack *stack_for(struct reftable_ref_store *store,
93 const char *refname,
94 const char **rewritten_ref)
95 {
96 const char *wtname;
97 int wtname_len;
98
99 if (!refname)
100 return store->main_stack;
101
102 switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
103 case REF_WORKTREE_OTHER: {
104 static struct strbuf wtname_buf = STRBUF_INIT;
105 struct strbuf wt_dir = STRBUF_INIT;
106 struct reftable_stack *stack;
107
108 /*
109 * We're using a static buffer here so that we don't need to
110 * allocate the worktree name whenever we look up a reference.
111 * This could be avoided if the strmap interface knew how to
112 * handle keys with a length.
113 */
114 strbuf_reset(&wtname_buf);
115 strbuf_add(&wtname_buf, wtname, wtname_len);
116
117 /*
118 * There is an edge case here: when the worktree references the
119 * current worktree, then we set up the stack once via
120 * `worktree_stacks` and once via `worktree_stack`. This is
121 * wasteful, but in the reading case it shouldn't matter. And
122 * in the writing case we would notice that the stack is locked
123 * already and error out when trying to write a reference via
124 * both stacks.
125 */
126 stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
127 if (!stack) {
128 strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
129 store->base.repo->commondir, wtname_buf.buf);
130
131 store->err = reftable_new_stack(&stack, wt_dir.buf,
132 &store->write_options);
133 assert(store->err != REFTABLE_API_ERROR);
134 strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
135 }
136
137 strbuf_release(&wt_dir);
138 return stack;
139 }
140 case REF_WORKTREE_CURRENT:
141 /*
142 * If there is no worktree stack then we're currently in the
143 * main worktree. We thus return the main stack in that case.
144 */
145 if (!store->worktree_stack)
146 return store->main_stack;
147 return store->worktree_stack;
148 case REF_WORKTREE_MAIN:
149 case REF_WORKTREE_SHARED:
150 return store->main_stack;
151 default:
152 BUG("unhandled worktree reference type");
153 }
154 }
155
156 static int should_write_log(struct ref_store *refs, const char *refname)
157 {
158 if (log_all_ref_updates == LOG_REFS_UNSET)
159 log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
160
161 switch (log_all_ref_updates) {
162 case LOG_REFS_NONE:
163 return refs_reflog_exists(refs, refname);
164 case LOG_REFS_ALWAYS:
165 return 1;
166 case LOG_REFS_NORMAL:
167 if (should_autocreate_reflog(refname))
168 return 1;
169 return refs_reflog_exists(refs, refname);
170 default:
171 BUG("unhandled core.logAllRefUpdates value %d", log_all_ref_updates);
172 }
173 }
174
175 static void fill_reftable_log_record(struct reftable_log_record *log, const struct ident_split *split)
176 {
177 const char *tz_begin;
178 int sign = 1;
179
180 reftable_log_record_release(log);
181 log->value_type = REFTABLE_LOG_UPDATE;
182 log->value.update.name =
183 xstrndup(split->name_begin, split->name_end - split->name_begin);
184 log->value.update.email =
185 xstrndup(split->mail_begin, split->mail_end - split->mail_begin);
186 log->value.update.time = atol(split->date_begin);
187
188 tz_begin = split->tz_begin;
189 if (*tz_begin == '-') {
190 sign = -1;
191 tz_begin++;
192 }
193 if (*tz_begin == '+') {
194 sign = 1;
195 tz_begin++;
196 }
197
198 log->value.update.tz_offset = sign * atoi(tz_begin);
199 }
200
201 static int read_ref_without_reload(struct reftable_stack *stack,
202 const char *refname,
203 struct object_id *oid,
204 struct strbuf *referent,
205 unsigned int *type)
206 {
207 struct reftable_ref_record ref = {0};
208 int ret;
209
210 ret = reftable_stack_read_ref(stack, refname, &ref);
211 if (ret)
212 goto done;
213
214 if (ref.value_type == REFTABLE_REF_SYMREF) {
215 strbuf_reset(referent);
216 strbuf_addstr(referent, ref.value.symref);
217 *type |= REF_ISSYMREF;
218 } else if (reftable_ref_record_val1(&ref)) {
219 oidread(oid, reftable_ref_record_val1(&ref));
220 } else {
221 /* We got a tombstone, which should not happen. */
222 BUG("unhandled reference value type %d", ref.value_type);
223 }
224
225 done:
226 assert(ret != REFTABLE_API_ERROR);
227 reftable_ref_record_release(&ref);
228 return ret;
229 }
230
231 static int reftable_be_config(const char *var, const char *value,
232 const struct config_context *ctx,
233 void *_opts)
234 {
235 struct reftable_write_options *opts = _opts;
236
237 if (!strcmp(var, "reftable.blocksize")) {
238 unsigned long block_size = git_config_ulong(var, value, ctx->kvi);
239 if (block_size > 16777215)
240 die("reftable block size cannot exceed 16MB");
241 opts->block_size = block_size;
242 } else if (!strcmp(var, "reftable.restartinterval")) {
243 unsigned long restart_interval = git_config_ulong(var, value, ctx->kvi);
244 if (restart_interval > UINT16_MAX)
245 die("reftable block size cannot exceed %u", (unsigned)UINT16_MAX);
246 opts->restart_interval = restart_interval;
247 } else if (!strcmp(var, "reftable.indexobjects")) {
248 opts->skip_index_objects = !git_config_bool(var, value);
249 } else if (!strcmp(var, "reftable.geometricfactor")) {
250 unsigned long factor = git_config_ulong(var, value, ctx->kvi);
251 if (factor > UINT8_MAX)
252 die("reftable geometric factor cannot exceed %u", (unsigned)UINT8_MAX);
253 opts->auto_compaction_factor = factor;
254 }
255
256 return 0;
257 }
258
259 static struct ref_store *reftable_be_init(struct repository *repo,
260 const char *gitdir,
261 unsigned int store_flags)
262 {
263 struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
264 struct strbuf path = STRBUF_INIT;
265 int is_worktree;
266 mode_t mask;
267
268 mask = umask(0);
269 umask(mask);
270
271 base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
272 strmap_init(&refs->worktree_stacks);
273 refs->store_flags = store_flags;
274
275 refs->write_options.hash_id = repo->hash_algo->format_id;
276 refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
277 refs->write_options.disable_auto_compact =
278 !git_env_bool("GIT_TEST_REFTABLE_AUTOCOMPACTION", 1);
279
280 git_config(reftable_be_config, &refs->write_options);
281
282 /*
283 * It is somewhat unfortunate that we have to mirror the default block
284 * size of the reftable library here. But given that the write options
285 * wouldn't be updated by the library here, and given that we require
286 * the proper block size to trim reflog message so that they fit, we
287 * must set up a proper value here.
288 */
289 if (!refs->write_options.block_size)
290 refs->write_options.block_size = 4096;
291
292 /*
293 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
294 * This stack contains both the shared and the main worktree refs.
295 *
296 * Note that we don't try to resolve the path in case we have a
297 * worktree because `get_common_dir_noenv()` already does it for us.
298 */
299 is_worktree = get_common_dir_noenv(&path, gitdir);
300 if (!is_worktree) {
301 strbuf_reset(&path);
302 strbuf_realpath(&path, gitdir, 0);
303 }
304 strbuf_addstr(&path, "/reftable");
305 refs->err = reftable_new_stack(&refs->main_stack, path.buf,
306 &refs->write_options);
307 if (refs->err)
308 goto done;
309
310 /*
311 * If we're in a worktree we also need to set up the worktree reftable
312 * stack that is contained in the per-worktree GIT_DIR.
313 *
314 * Ideally, we would also add the stack to our worktree stack map. But
315 * we have no way to figure out the worktree name here and thus can't
316 * do it efficiently.
317 */
318 if (is_worktree) {
319 strbuf_reset(&path);
320 strbuf_addf(&path, "%s/reftable", gitdir);
321
322 refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
323 &refs->write_options);
324 if (refs->err)
325 goto done;
326 }
327
328 chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
329
330 done:
331 assert(refs->err != REFTABLE_API_ERROR);
332 strbuf_release(&path);
333 return &refs->base;
334 }
335
336 static void reftable_be_release(struct ref_store *ref_store)
337 {
338 struct reftable_ref_store *refs = reftable_be_downcast(ref_store, 0, "release");
339 struct strmap_entry *entry;
340 struct hashmap_iter iter;
341
342 if (refs->main_stack) {
343 reftable_stack_destroy(refs->main_stack);
344 refs->main_stack = NULL;
345 }
346
347 if (refs->worktree_stack) {
348 reftable_stack_destroy(refs->worktree_stack);
349 refs->worktree_stack = NULL;
350 }
351
352 strmap_for_each_entry(&refs->worktree_stacks, &iter, entry)
353 reftable_stack_destroy(entry->value);
354 strmap_clear(&refs->worktree_stacks, 0);
355 }
356
357 static int reftable_be_create_on_disk(struct ref_store *ref_store,
358 int flags UNUSED,
359 struct strbuf *err UNUSED)
360 {
361 struct reftable_ref_store *refs =
362 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create");
363 struct strbuf sb = STRBUF_INIT;
364
365 strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
366 safe_create_dir(sb.buf, 1);
367 strbuf_reset(&sb);
368
369 strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
370 write_file(sb.buf, "ref: refs/heads/.invalid");
371 adjust_shared_perm(sb.buf);
372 strbuf_reset(&sb);
373
374 strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
375 safe_create_dir(sb.buf, 1);
376 strbuf_reset(&sb);
377
378 strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
379 write_file(sb.buf, "this repository uses the reftable format");
380 adjust_shared_perm(sb.buf);
381
382 strbuf_release(&sb);
383 return 0;
384 }
385
386 struct reftable_ref_iterator {
387 struct ref_iterator base;
388 struct reftable_ref_store *refs;
389 struct reftable_iterator iter;
390 struct reftable_ref_record ref;
391 struct object_id oid;
392
393 const char *prefix;
394 size_t prefix_len;
395 unsigned int flags;
396 int err;
397 };
398
399 static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
400 {
401 struct reftable_ref_iterator *iter =
402 (struct reftable_ref_iterator *)ref_iterator;
403 struct reftable_ref_store *refs = iter->refs;
404
405 while (!iter->err) {
406 int flags = 0;
407
408 iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
409 if (iter->err)
410 break;
411
412 /*
413 * The files backend only lists references contained in "refs/" unless
414 * the root refs are to be included. We emulate the same behaviour here.
415 */
416 if (!starts_with(iter->ref.refname, "refs/") &&
417 !(iter->flags & DO_FOR_EACH_INCLUDE_ROOT_REFS &&
418 is_root_ref(iter->ref.refname))) {
419 continue;
420 }
421
422 if (iter->prefix_len &&
423 strncmp(iter->prefix, iter->ref.refname, iter->prefix_len)) {
424 iter->err = 1;
425 break;
426 }
427
428 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
429 parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
430 REF_WORKTREE_CURRENT)
431 continue;
432
433 switch (iter->ref.value_type) {
434 case REFTABLE_REF_VAL1:
435 oidread(&iter->oid, iter->ref.value.val1);
436 break;
437 case REFTABLE_REF_VAL2:
438 oidread(&iter->oid, iter->ref.value.val2.value);
439 break;
440 case REFTABLE_REF_SYMREF:
441 if (!refs_resolve_ref_unsafe(&iter->refs->base, iter->ref.refname,
442 RESOLVE_REF_READING, &iter->oid, &flags))
443 oidclr(&iter->oid);
444 break;
445 default:
446 BUG("unhandled reference value type %d", iter->ref.value_type);
447 }
448
449 if (is_null_oid(&iter->oid))
450 flags |= REF_ISBROKEN;
451
452 if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
453 if (!refname_is_safe(iter->ref.refname))
454 die(_("refname is dangerous: %s"), iter->ref.refname);
455 oidclr(&iter->oid);
456 flags |= REF_BAD_NAME | REF_ISBROKEN;
457 }
458
459 if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
460 flags & REF_ISSYMREF &&
461 flags & REF_ISBROKEN)
462 continue;
463
464 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
465 !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
466 &iter->oid, flags))
467 continue;
468
469 iter->base.refname = iter->ref.refname;
470 iter->base.oid = &iter->oid;
471 iter->base.flags = flags;
472
473 break;
474 }
475
476 if (iter->err > 0) {
477 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
478 return ITER_ERROR;
479 return ITER_DONE;
480 }
481
482 if (iter->err < 0) {
483 ref_iterator_abort(ref_iterator);
484 return ITER_ERROR;
485 }
486
487 return ITER_OK;
488 }
489
490 static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
491 struct object_id *peeled)
492 {
493 struct reftable_ref_iterator *iter =
494 (struct reftable_ref_iterator *)ref_iterator;
495
496 if (iter->ref.value_type == REFTABLE_REF_VAL2) {
497 oidread(peeled, iter->ref.value.val2.target_value);
498 return 0;
499 }
500
501 return -1;
502 }
503
504 static int reftable_ref_iterator_abort(struct ref_iterator *ref_iterator)
505 {
506 struct reftable_ref_iterator *iter =
507 (struct reftable_ref_iterator *)ref_iterator;
508 reftable_ref_record_release(&iter->ref);
509 reftable_iterator_destroy(&iter->iter);
510 free(iter);
511 return ITER_DONE;
512 }
513
514 static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
515 .advance = reftable_ref_iterator_advance,
516 .peel = reftable_ref_iterator_peel,
517 .abort = reftable_ref_iterator_abort
518 };
519
520 static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
521 struct reftable_stack *stack,
522 const char *prefix,
523 int flags)
524 {
525 struct reftable_ref_iterator *iter;
526 int ret;
527
528 iter = xcalloc(1, sizeof(*iter));
529 base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
530 iter->prefix = prefix;
531 iter->prefix_len = prefix ? strlen(prefix) : 0;
532 iter->base.oid = &iter->oid;
533 iter->flags = flags;
534 iter->refs = refs;
535
536 ret = refs->err;
537 if (ret)
538 goto done;
539
540 ret = reftable_stack_reload(stack);
541 if (ret)
542 goto done;
543
544 reftable_stack_init_ref_iterator(stack, &iter->iter);
545 ret = reftable_iterator_seek_ref(&iter->iter, prefix);
546 if (ret)
547 goto done;
548
549 done:
550 iter->err = ret;
551 return iter;
552 }
553
554 static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
555 const char *prefix,
556 const char **exclude_patterns,
557 unsigned int flags)
558 {
559 struct reftable_ref_iterator *main_iter, *worktree_iter;
560 struct reftable_ref_store *refs;
561 unsigned int required_flags = REF_STORE_READ;
562
563 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
564 required_flags |= REF_STORE_ODB;
565 refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
566
567 main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix, flags);
568
569 /*
570 * The worktree stack is only set when we're in an actual worktree
571 * right now. If we aren't, then we return the common reftable
572 * iterator, only.
573 */
574 if (!refs->worktree_stack)
575 return &main_iter->base;
576
577 /*
578 * Otherwise we merge both the common and the per-worktree refs into a
579 * single iterator.
580 */
581 worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix, flags);
582 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
583 ref_iterator_select, NULL);
584 }
585
586 static int reftable_be_read_raw_ref(struct ref_store *ref_store,
587 const char *refname,
588 struct object_id *oid,
589 struct strbuf *referent,
590 unsigned int *type,
591 int *failure_errno)
592 {
593 struct reftable_ref_store *refs =
594 reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
595 struct reftable_stack *stack = stack_for(refs, refname, &refname);
596 int ret;
597
598 if (refs->err < 0)
599 return refs->err;
600
601 ret = reftable_stack_reload(stack);
602 if (ret)
603 return ret;
604
605 ret = read_ref_without_reload(stack, refname, oid, referent, type);
606 if (ret < 0)
607 return ret;
608 if (ret > 0) {
609 *failure_errno = ENOENT;
610 return -1;
611 }
612
613 return 0;
614 }
615
616 static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
617 const char *refname,
618 struct strbuf *referent)
619 {
620 struct reftable_ref_store *refs =
621 reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
622 struct reftable_stack *stack = stack_for(refs, refname, &refname);
623 struct reftable_ref_record ref = {0};
624 int ret;
625
626 ret = reftable_stack_reload(stack);
627 if (ret)
628 return ret;
629
630 ret = reftable_stack_read_ref(stack, refname, &ref);
631 if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
632 strbuf_addstr(referent, ref.value.symref);
633 else
634 ret = -1;
635
636 reftable_ref_record_release(&ref);
637 return ret;
638 }
639
640 struct reftable_transaction_update {
641 struct ref_update *update;
642 struct object_id current_oid;
643 };
644
645 struct write_transaction_table_arg {
646 struct reftable_ref_store *refs;
647 struct reftable_stack *stack;
648 struct reftable_addition *addition;
649 struct reftable_transaction_update *updates;
650 size_t updates_nr;
651 size_t updates_alloc;
652 size_t updates_expected;
653 };
654
655 struct reftable_transaction_data {
656 struct write_transaction_table_arg *args;
657 size_t args_nr, args_alloc;
658 };
659
660 static void free_transaction_data(struct reftable_transaction_data *tx_data)
661 {
662 if (!tx_data)
663 return;
664 for (size_t i = 0; i < tx_data->args_nr; i++) {
665 reftable_addition_destroy(tx_data->args[i].addition);
666 free(tx_data->args[i].updates);
667 }
668 free(tx_data->args);
669 free(tx_data);
670 }
671
672 /*
673 * Prepare transaction update for the given reference update. This will cause
674 * us to lock the corresponding reftable stack for concurrent modification.
675 */
676 static int prepare_transaction_update(struct write_transaction_table_arg **out,
677 struct reftable_ref_store *refs,
678 struct reftable_transaction_data *tx_data,
679 struct ref_update *update,
680 struct strbuf *err)
681 {
682 struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
683 struct write_transaction_table_arg *arg = NULL;
684 size_t i;
685 int ret;
686
687 /*
688 * Search for a preexisting stack update. If there is one then we add
689 * the update to it, otherwise we set up a new stack update.
690 */
691 for (i = 0; !arg && i < tx_data->args_nr; i++)
692 if (tx_data->args[i].stack == stack)
693 arg = &tx_data->args[i];
694
695 if (!arg) {
696 struct reftable_addition *addition;
697
698 ret = reftable_stack_reload(stack);
699 if (ret)
700 return ret;
701
702 ret = reftable_stack_new_addition(&addition, stack);
703 if (ret) {
704 if (ret == REFTABLE_LOCK_ERROR)
705 strbuf_addstr(err, "cannot lock references");
706 return ret;
707 }
708
709 ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
710 tx_data->args_alloc);
711 arg = &tx_data->args[tx_data->args_nr++];
712 arg->refs = refs;
713 arg->stack = stack;
714 arg->addition = addition;
715 arg->updates = NULL;
716 arg->updates_nr = 0;
717 arg->updates_alloc = 0;
718 arg->updates_expected = 0;
719 }
720
721 arg->updates_expected++;
722
723 if (out)
724 *out = arg;
725
726 return 0;
727 }
728
729 /*
730 * Queue a reference update for the correct stack. We potentially need to
731 * handle multiple stack updates in a single transaction when it spans across
732 * multiple worktrees.
733 */
734 static int queue_transaction_update(struct reftable_ref_store *refs,
735 struct reftable_transaction_data *tx_data,
736 struct ref_update *update,
737 struct object_id *current_oid,
738 struct strbuf *err)
739 {
740 struct write_transaction_table_arg *arg = NULL;
741 int ret;
742
743 if (update->backend_data)
744 BUG("reference update queued more than once");
745
746 ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
747 if (ret < 0)
748 return ret;
749
750 ALLOC_GROW(arg->updates, arg->updates_nr + 1,
751 arg->updates_alloc);
752 arg->updates[arg->updates_nr].update = update;
753 oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
754 update->backend_data = &arg->updates[arg->updates_nr++];
755
756 return 0;
757 }
758
759 static int reftable_be_transaction_prepare(struct ref_store *ref_store,
760 struct ref_transaction *transaction,
761 struct strbuf *err)
762 {
763 struct reftable_ref_store *refs =
764 reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
765 struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
766 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
767 struct reftable_transaction_data *tx_data = NULL;
768 struct object_id head_oid;
769 unsigned int head_type = 0;
770 size_t i;
771 int ret;
772
773 ret = refs->err;
774 if (ret < 0)
775 goto done;
776
777 tx_data = xcalloc(1, sizeof(*tx_data));
778
779 /*
780 * Preprocess all updates. For one we check that there are no duplicate
781 * reference updates in this transaction. Second, we lock all stacks
782 * that will be modified during the transaction.
783 */
784 for (i = 0; i < transaction->nr; i++) {
785 ret = prepare_transaction_update(NULL, refs, tx_data,
786 transaction->updates[i], err);
787 if (ret)
788 goto done;
789
790 string_list_append(&affected_refnames,
791 transaction->updates[i]->refname);
792 }
793
794 /*
795 * Now that we have counted updates per stack we can preallocate their
796 * arrays. This avoids having to reallocate many times.
797 */
798 for (i = 0; i < tx_data->args_nr; i++) {
799 CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
800 tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
801 }
802
803 /*
804 * Fail if a refname appears more than once in the transaction.
805 * This code is taken from the files backend and is a good candidate to
806 * be moved into the generic layer.
807 */
808 string_list_sort(&affected_refnames);
809 if (ref_update_reject_duplicates(&affected_refnames, err)) {
810 ret = TRANSACTION_GENERIC_ERROR;
811 goto done;
812 }
813
814 ret = read_ref_without_reload(stack_for(refs, "HEAD", NULL), "HEAD", &head_oid,
815 &head_referent, &head_type);
816 if (ret < 0)
817 goto done;
818 ret = 0;
819
820 for (i = 0; i < transaction->nr; i++) {
821 struct ref_update *u = transaction->updates[i];
822 struct object_id current_oid = {0};
823 struct reftable_stack *stack;
824 const char *rewritten_ref;
825
826 stack = stack_for(refs, u->refname, &rewritten_ref);
827
828 /* Verify that the new object ID is valid. */
829 if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
830 !(u->flags & REF_SKIP_OID_VERIFICATION) &&
831 !(u->flags & REF_LOG_ONLY)) {
832 struct object *o = parse_object(refs->base.repo, &u->new_oid);
833 if (!o) {
834 strbuf_addf(err,
835 _("trying to write ref '%s' with nonexistent object %s"),
836 u->refname, oid_to_hex(&u->new_oid));
837 ret = -1;
838 goto done;
839 }
840
841 if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
842 strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
843 oid_to_hex(&u->new_oid), u->refname);
844 ret = -1;
845 goto done;
846 }
847 }
848
849 /*
850 * When we update the reference that HEAD points to we enqueue
851 * a second log-only update for HEAD so that its reflog is
852 * updated accordingly.
853 */
854 if (head_type == REF_ISSYMREF &&
855 !(u->flags & REF_LOG_ONLY) &&
856 !(u->flags & REF_UPDATE_VIA_HEAD) &&
857 !strcmp(rewritten_ref, head_referent.buf)) {
858 struct ref_update *new_update;
859
860 /*
861 * First make sure that HEAD is not already in the
862 * transaction. This check is O(lg N) in the transaction
863 * size, but it happens at most once per transaction.
864 */
865 if (string_list_has_string(&affected_refnames, "HEAD")) {
866 /* An entry already existed */
867 strbuf_addf(err,
868 _("multiple updates for 'HEAD' (including one "
869 "via its referent '%s') are not allowed"),
870 u->refname);
871 ret = TRANSACTION_NAME_CONFLICT;
872 goto done;
873 }
874
875 new_update = ref_transaction_add_update(
876 transaction, "HEAD",
877 u->flags | REF_LOG_ONLY | REF_NO_DEREF,
878 &u->new_oid, &u->old_oid, NULL, NULL, u->msg);
879 string_list_insert(&affected_refnames, new_update->refname);
880 }
881
882 ret = read_ref_without_reload(stack, rewritten_ref,
883 &current_oid, &referent, &u->type);
884 if (ret < 0)
885 goto done;
886 if (ret > 0 && (!(u->flags & REF_HAVE_OLD) || is_null_oid(&u->old_oid))) {
887 /*
888 * The reference does not exist, and we either have no
889 * old object ID or expect the reference to not exist.
890 * We can thus skip below safety checks as well as the
891 * symref splitting. But we do want to verify that
892 * there is no conflicting reference here so that we
893 * can output a proper error message instead of failing
894 * at a later point.
895 */
896 ret = refs_verify_refname_available(ref_store, u->refname,
897 &affected_refnames, NULL, err);
898 if (ret < 0)
899 goto done;
900
901 /*
902 * There is no need to write the reference deletion
903 * when the reference in question doesn't exist.
904 */
905 if ((u->flags & REF_HAVE_NEW) && !ref_update_has_null_new_value(u)) {
906 ret = queue_transaction_update(refs, tx_data, u,
907 &current_oid, err);
908 if (ret)
909 goto done;
910 }
911
912 continue;
913 }
914 if (ret > 0) {
915 /* The reference does not exist, but we expected it to. */
916 strbuf_addf(err, _("cannot lock ref '%s': "
917 "unable to resolve reference '%s'"),
918 ref_update_original_update_refname(u), u->refname);
919 ret = -1;
920 goto done;
921 }
922
923 if (u->type & REF_ISSYMREF) {
924 /*
925 * The reftable stack is locked at this point already,
926 * so it is safe to call `refs_resolve_ref_unsafe()`
927 * here without causing races.
928 */
929 const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
930 &current_oid, NULL);
931
932 if (u->flags & REF_NO_DEREF) {
933 if (u->flags & REF_HAVE_OLD && !resolved) {
934 strbuf_addf(err, _("cannot lock ref '%s': "
935 "error reading reference"), u->refname);
936 ret = -1;
937 goto done;
938 }
939 } else {
940 struct ref_update *new_update;
941 int new_flags;
942
943 new_flags = u->flags;
944 if (!strcmp(rewritten_ref, "HEAD"))
945 new_flags |= REF_UPDATE_VIA_HEAD;
946
947 /*
948 * If we are updating a symref (eg. HEAD), we should also
949 * update the branch that the symref points to.
950 *
951 * This is generic functionality, and would be better
952 * done in refs.c, but the current implementation is
953 * intertwined with the locking in files-backend.c.
954 */
955 new_update = ref_transaction_add_update(
956 transaction, referent.buf, new_flags,
957 &u->new_oid, &u->old_oid, u->new_target,
958 u->old_target, u->msg);
959
960 new_update->parent_update = u;
961
962 /*
963 * Change the symbolic ref update to log only. Also, it
964 * doesn't need to check its old OID value, as that will be
965 * done when new_update is processed.
966 */
967 u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
968 u->flags &= ~REF_HAVE_OLD;
969
970 if (string_list_has_string(&affected_refnames, new_update->refname)) {
971 strbuf_addf(err,
972 _("multiple updates for '%s' (including one "
973 "via symref '%s') are not allowed"),
974 referent.buf, u->refname);
975 ret = TRANSACTION_NAME_CONFLICT;
976 goto done;
977 }
978 string_list_insert(&affected_refnames, new_update->refname);
979 }
980 }
981
982 /*
983 * Verify that the old object matches our expectations. Note
984 * that the error messages here do not make a lot of sense in
985 * the context of the reftable backend as we never lock
986 * individual refs. But the error messages match what the files
987 * backend returns, which keeps our tests happy.
988 */
989 if (u->old_target) {
990 if (ref_update_check_old_target(referent.buf, u, err)) {
991 ret = -1;
992 goto done;
993 }
994 } else if ((u->flags & REF_HAVE_OLD) && !oideq(&current_oid, &u->old_oid)) {
995 if (is_null_oid(&u->old_oid))
996 strbuf_addf(err, _("cannot lock ref '%s': "
997 "reference already exists"),
998 ref_update_original_update_refname(u));
999 else if (is_null_oid(&current_oid))
1000 strbuf_addf(err, _("cannot lock ref '%s': "
1001 "reference is missing but expected %s"),
1002 ref_update_original_update_refname(u),
1003 oid_to_hex(&u->old_oid));
1004 else
1005 strbuf_addf(err, _("cannot lock ref '%s': "
1006 "is at %s but expected %s"),
1007 ref_update_original_update_refname(u),
1008 oid_to_hex(&current_oid),
1009 oid_to_hex(&u->old_oid));
1010 ret = -1;
1011 goto done;
1012 }
1013
1014 /*
1015 * If all of the following conditions are true:
1016 *
1017 * - We're not about to write a symref.
1018 * - We're not about to write a log-only entry.
1019 * - Old and new object ID are different.
1020 *
1021 * Then we're essentially doing a no-op update that can be
1022 * skipped. This is not only for the sake of efficiency, but
1023 * also skips writing unneeded reflog entries.
1024 */
1025 if ((u->type & REF_ISSYMREF) ||
1026 (u->flags & REF_LOG_ONLY) ||
1027 (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid))) {
1028 ret = queue_transaction_update(refs, tx_data, u,
1029 &current_oid, err);
1030 if (ret)
1031 goto done;
1032 }
1033 }
1034
1035 transaction->backend_data = tx_data;
1036 transaction->state = REF_TRANSACTION_PREPARED;
1037
1038 done:
1039 assert(ret != REFTABLE_API_ERROR);
1040 if (ret < 0) {
1041 free_transaction_data(tx_data);
1042 transaction->state = REF_TRANSACTION_CLOSED;
1043 if (!err->len)
1044 strbuf_addf(err, _("reftable: transaction prepare: %s"),
1045 reftable_error_str(ret));
1046 }
1047 string_list_clear(&affected_refnames, 0);
1048 strbuf_release(&referent);
1049 strbuf_release(&head_referent);
1050
1051 return ret;
1052 }
1053
1054 static int reftable_be_transaction_abort(struct ref_store *ref_store,
1055 struct ref_transaction *transaction,
1056 struct strbuf *err)
1057 {
1058 struct reftable_transaction_data *tx_data = transaction->backend_data;
1059 free_transaction_data(tx_data);
1060 transaction->state = REF_TRANSACTION_CLOSED;
1061 return 0;
1062 }
1063
1064 static int transaction_update_cmp(const void *a, const void *b)
1065 {
1066 return strcmp(((struct reftable_transaction_update *)a)->update->refname,
1067 ((struct reftable_transaction_update *)b)->update->refname);
1068 }
1069
1070 static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
1071 {
1072 struct write_transaction_table_arg *arg = cb_data;
1073 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1074 struct reftable_log_record *logs = NULL;
1075 struct ident_split committer_ident = {0};
1076 size_t logs_nr = 0, logs_alloc = 0, i;
1077 const char *committer_info;
1078 int ret = 0;
1079
1080 committer_info = git_committer_info(0);
1081 if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
1082 BUG("failed splitting committer info");
1083
1084 QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
1085
1086 reftable_writer_set_limits(writer, ts, ts);
1087
1088 for (i = 0; i < arg->updates_nr; i++) {
1089 struct reftable_transaction_update *tx_update = &arg->updates[i];
1090 struct ref_update *u = tx_update->update;
1091
1092 /*
1093 * Write a reflog entry when updating a ref to point to
1094 * something new in either of the following cases:
1095 *
1096 * - The reference is about to be deleted. We always want to
1097 * delete the reflog in that case.
1098 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1099 * the reflog entry.
1100 * - `core.logAllRefUpdates` tells us to create the reflog for
1101 * the given ref.
1102 */
1103 if ((u->flags & REF_HAVE_NEW) &&
1104 !(u->type & REF_ISSYMREF) &&
1105 ref_update_has_null_new_value(u)) {
1106 struct reftable_log_record log = {0};
1107 struct reftable_iterator it = {0};
1108
1109 reftable_stack_init_log_iterator(arg->stack, &it);
1110
1111 /*
1112 * When deleting refs we also delete all reflog entries
1113 * with them. While it is not strictly required to
1114 * delete reflogs together with their refs, this
1115 * matches the behaviour of the files backend.
1116 *
1117 * Unfortunately, we have no better way than to delete
1118 * all reflog entries one by one.
1119 */
1120 ret = reftable_iterator_seek_log(&it, u->refname);
1121 while (ret == 0) {
1122 struct reftable_log_record *tombstone;
1123
1124 ret = reftable_iterator_next_log(&it, &log);
1125 if (ret < 0)
1126 break;
1127 if (ret > 0 || strcmp(log.refname, u->refname)) {
1128 ret = 0;
1129 break;
1130 }
1131
1132 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1133 tombstone = &logs[logs_nr++];
1134 tombstone->refname = xstrdup(u->refname);
1135 tombstone->value_type = REFTABLE_LOG_DELETION;
1136 tombstone->update_index = log.update_index;
1137 }
1138
1139 reftable_log_record_release(&log);
1140 reftable_iterator_destroy(&it);
1141
1142 if (ret)
1143 goto done;
1144 } else if (u->flags & REF_HAVE_NEW &&
1145 (u->flags & REF_FORCE_CREATE_REFLOG ||
1146 should_write_log(&arg->refs->base, u->refname))) {
1147 struct reftable_log_record *log;
1148 int create_reflog = 1;
1149
1150 if (u->new_target) {
1151 if (!refs_resolve_ref_unsafe(&arg->refs->base, u->new_target,
1152 RESOLVE_REF_READING, &u->new_oid, NULL)) {
1153 /*
1154 * TODO: currently we skip creating reflogs for dangling
1155 * symref updates. It would be nice to capture this as
1156 * zero oid updates however.
1157 */
1158 create_reflog = 0;
1159 }
1160 }
1161
1162 if (create_reflog) {
1163 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1164 log = &logs[logs_nr++];
1165 memset(log, 0, sizeof(*log));
1166
1167 fill_reftable_log_record(log, &committer_ident);
1168 log->update_index = ts;
1169 log->refname = xstrdup(u->refname);
1170 memcpy(log->value.update.new_hash,
1171 u->new_oid.hash, GIT_MAX_RAWSZ);
1172 memcpy(log->value.update.old_hash,
1173 tx_update->current_oid.hash, GIT_MAX_RAWSZ);
1174 log->value.update.message =
1175 xstrndup(u->msg, arg->refs->write_options.block_size / 2);
1176 }
1177 }
1178
1179 if (u->flags & REF_LOG_ONLY)
1180 continue;
1181
1182 if (u->new_target) {
1183 struct reftable_ref_record ref = {
1184 .refname = (char *)u->refname,
1185 .value_type = REFTABLE_REF_SYMREF,
1186 .value.symref = (char *)u->new_target,
1187 .update_index = ts,
1188 };
1189
1190 ret = reftable_writer_add_ref(writer, &ref);
1191 if (ret < 0)
1192 goto done;
1193 } else if ((u->flags & REF_HAVE_NEW) && ref_update_has_null_new_value(u)) {
1194 struct reftable_ref_record ref = {
1195 .refname = (char *)u->refname,
1196 .update_index = ts,
1197 .value_type = REFTABLE_REF_DELETION,
1198 };
1199
1200 ret = reftable_writer_add_ref(writer, &ref);
1201 if (ret < 0)
1202 goto done;
1203 } else if (u->flags & REF_HAVE_NEW) {
1204 struct reftable_ref_record ref = {0};
1205 struct object_id peeled;
1206 int peel_error;
1207
1208 ref.refname = (char *)u->refname;
1209 ref.update_index = ts;
1210
1211 peel_error = peel_object(arg->refs->base.repo, &u->new_oid, &peeled);
1212 if (!peel_error) {
1213 ref.value_type = REFTABLE_REF_VAL2;
1214 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
1215 memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
1216 } else if (!is_null_oid(&u->new_oid)) {
1217 ref.value_type = REFTABLE_REF_VAL1;
1218 memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
1219 }
1220
1221 ret = reftable_writer_add_ref(writer, &ref);
1222 if (ret < 0)
1223 goto done;
1224 }
1225 }
1226
1227 /*
1228 * Logs are written at the end so that we do not have intermixed ref
1229 * and log blocks.
1230 */
1231 if (logs) {
1232 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1233 if (ret < 0)
1234 goto done;
1235 }
1236
1237 done:
1238 assert(ret != REFTABLE_API_ERROR);
1239 for (i = 0; i < logs_nr; i++)
1240 reftable_log_record_release(&logs[i]);
1241 free(logs);
1242 return ret;
1243 }
1244
1245 static int reftable_be_transaction_finish(struct ref_store *ref_store,
1246 struct ref_transaction *transaction,
1247 struct strbuf *err)
1248 {
1249 struct reftable_transaction_data *tx_data = transaction->backend_data;
1250 int ret = 0;
1251
1252 for (size_t i = 0; i < tx_data->args_nr; i++) {
1253 ret = reftable_addition_add(tx_data->args[i].addition,
1254 write_transaction_table, &tx_data->args[i]);
1255 if (ret < 0)
1256 goto done;
1257
1258 ret = reftable_addition_commit(tx_data->args[i].addition);
1259 if (ret < 0)
1260 goto done;
1261 }
1262
1263 done:
1264 assert(ret != REFTABLE_API_ERROR);
1265 free_transaction_data(tx_data);
1266 transaction->state = REF_TRANSACTION_CLOSED;
1267
1268 if (ret) {
1269 strbuf_addf(err, _("reftable: transaction failure: %s"),
1270 reftable_error_str(ret));
1271 return -1;
1272 }
1273 return ret;
1274 }
1275
1276 static int reftable_be_initial_transaction_commit(struct ref_store *ref_store UNUSED,
1277 struct ref_transaction *transaction,
1278 struct strbuf *err)
1279 {
1280 return ref_transaction_commit(transaction, err);
1281 }
1282
1283 static int reftable_be_pack_refs(struct ref_store *ref_store,
1284 struct pack_refs_opts *opts)
1285 {
1286 struct reftable_ref_store *refs =
1287 reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "pack_refs");
1288 struct reftable_stack *stack;
1289 int ret;
1290
1291 if (refs->err)
1292 return refs->err;
1293
1294 stack = refs->worktree_stack;
1295 if (!stack)
1296 stack = refs->main_stack;
1297
1298 if (opts->flags & PACK_REFS_AUTO)
1299 ret = reftable_stack_auto_compact(stack);
1300 else
1301 ret = reftable_stack_compact_all(stack, NULL);
1302 if (ret < 0) {
1303 ret = error(_("unable to compact stack: %s"),
1304 reftable_error_str(ret));
1305 goto out;
1306 }
1307
1308 ret = reftable_stack_clean(stack);
1309 if (ret)
1310 goto out;
1311
1312 out:
1313 return ret;
1314 }
1315
1316 struct write_create_symref_arg {
1317 struct reftable_ref_store *refs;
1318 struct reftable_stack *stack;
1319 struct strbuf *err;
1320 const char *refname;
1321 const char *target;
1322 const char *logmsg;
1323 };
1324
1325 struct write_copy_arg {
1326 struct reftable_ref_store *refs;
1327 struct reftable_stack *stack;
1328 const char *oldname;
1329 const char *newname;
1330 const char *logmsg;
1331 int delete_old;
1332 };
1333
1334 static int write_copy_table(struct reftable_writer *writer, void *cb_data)
1335 {
1336 struct write_copy_arg *arg = cb_data;
1337 uint64_t deletion_ts, creation_ts;
1338 struct reftable_ref_record old_ref = {0}, refs[2] = {0};
1339 struct reftable_log_record old_log = {0}, *logs = NULL;
1340 struct reftable_iterator it = {0};
1341 struct string_list skip = STRING_LIST_INIT_NODUP;
1342 struct ident_split committer_ident = {0};
1343 struct strbuf errbuf = STRBUF_INIT;
1344 size_t logs_nr = 0, logs_alloc = 0, i;
1345 const char *committer_info;
1346 int ret;
1347
1348 committer_info = git_committer_info(0);
1349 if (split_ident_line(&committer_ident, committer_info, strlen(committer_info)))
1350 BUG("failed splitting committer info");
1351
1352 if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
1353 ret = error(_("refname %s not found"), arg->oldname);
1354 goto done;
1355 }
1356 if (old_ref.value_type == REFTABLE_REF_SYMREF) {
1357 ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
1358 arg->oldname);
1359 goto done;
1360 }
1361
1362 /*
1363 * There's nothing to do in case the old and new name are the same, so
1364 * we exit early in that case.
1365 */
1366 if (!strcmp(arg->oldname, arg->newname)) {
1367 ret = 0;
1368 goto done;
1369 }
1370
1371 /*
1372 * Verify that the new refname is available.
1373 */
1374 if (arg->delete_old)
1375 string_list_insert(&skip, arg->oldname);
1376 ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
1377 NULL, &skip, &errbuf);
1378 if (ret < 0) {
1379 error("%s", errbuf.buf);
1380 goto done;
1381 }
1382
1383 /*
1384 * When deleting the old reference we have to use two update indices:
1385 * once to delete the old ref and its reflog, and once to create the
1386 * new ref and its reflog. They need to be staged with two separate
1387 * indices because the new reflog needs to encode both the deletion of
1388 * the old branch and the creation of the new branch, and we cannot do
1389 * two changes to a reflog in a single update.
1390 */
1391 deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
1392 if (arg->delete_old)
1393 creation_ts++;
1394 reftable_writer_set_limits(writer, deletion_ts, creation_ts);
1395
1396 /*
1397 * Add the new reference. If this is a rename then we also delete the
1398 * old reference.
1399 */
1400 refs[0] = old_ref;
1401 refs[0].refname = (char *)arg->newname;
1402 refs[0].update_index = creation_ts;
1403 if (arg->delete_old) {
1404 refs[1].refname = (char *)arg->oldname;
1405 refs[1].value_type = REFTABLE_REF_DELETION;
1406 refs[1].update_index = deletion_ts;
1407 }
1408 ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
1409 if (ret < 0)
1410 goto done;
1411
1412 /*
1413 * When deleting the old branch we need to create a reflog entry on the
1414 * new branch name that indicates that the old branch has been deleted
1415 * and then recreated. This is a tad weird, but matches what the files
1416 * backend does.
1417 */
1418 if (arg->delete_old) {
1419 struct strbuf head_referent = STRBUF_INIT;
1420 struct object_id head_oid;
1421 int append_head_reflog;
1422 unsigned head_type = 0;
1423
1424 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1425 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1426 fill_reftable_log_record(&logs[logs_nr], &committer_ident);
1427 logs[logs_nr].refname = (char *)arg->newname;
1428 logs[logs_nr].update_index = deletion_ts;
1429 logs[logs_nr].value.update.message =
1430 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1431 memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1432 logs_nr++;
1433
1434 ret = read_ref_without_reload(arg->stack, "HEAD", &head_oid, &head_referent, &head_type);
1435 if (ret < 0)
1436 goto done;
1437 append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
1438 strbuf_release(&head_referent);
1439
1440 /*
1441 * The files backend uses `refs_delete_ref()` to delete the old
1442 * branch name, which will append a reflog entry for HEAD in
1443 * case it points to the old branch.
1444 */
1445 if (append_head_reflog) {
1446 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1447 logs[logs_nr] = logs[logs_nr - 1];
1448 logs[logs_nr].refname = "HEAD";
1449 logs_nr++;
1450 }
1451 }
1452
1453 /*
1454 * Create the reflog entry for the newly created branch.
1455 */
1456 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1457 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1458 fill_reftable_log_record(&logs[logs_nr], &committer_ident);
1459 logs[logs_nr].refname = (char *)arg->newname;
1460 logs[logs_nr].update_index = creation_ts;
1461 logs[logs_nr].value.update.message =
1462 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1463 memcpy(logs[logs_nr].value.update.new_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1464 logs_nr++;
1465
1466 /*
1467 * In addition to writing the reflog entry for the new branch, we also
1468 * copy over all log entries from the old reflog. Last but not least,
1469 * when renaming we also have to delete all the old reflog entries.
1470 */
1471 reftable_stack_init_log_iterator(arg->stack, &it);
1472 ret = reftable_iterator_seek_log(&it, arg->oldname);
1473 if (ret < 0)
1474 goto done;
1475
1476 while (1) {
1477 ret = reftable_iterator_next_log(&it, &old_log);
1478 if (ret < 0)
1479 goto done;
1480 if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
1481 ret = 0;
1482 break;
1483 }
1484
1485 free(old_log.refname);
1486
1487 /*
1488 * Copy over the old reflog entry with the new refname.
1489 */
1490 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1491 logs[logs_nr] = old_log;
1492 logs[logs_nr].refname = (char *)arg->newname;
1493 logs_nr++;
1494
1495 /*
1496 * Delete the old reflog entry in case we are renaming.
1497 */
1498 if (arg->delete_old) {
1499 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1500 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1501 logs[logs_nr].refname = (char *)arg->oldname;
1502 logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
1503 logs[logs_nr].update_index = old_log.update_index;
1504 logs_nr++;
1505 }
1506
1507 /*
1508 * Transfer ownership of the log record we're iterating over to
1509 * the array of log records. Otherwise, the pointers would get
1510 * free'd or reallocated by the iterator.
1511 */
1512 memset(&old_log, 0, sizeof(old_log));
1513 }
1514
1515 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1516 if (ret < 0)
1517 goto done;
1518
1519 done:
1520 assert(ret != REFTABLE_API_ERROR);
1521 reftable_iterator_destroy(&it);
1522 string_list_clear(&skip, 0);
1523 strbuf_release(&errbuf);
1524 for (i = 0; i < logs_nr; i++) {
1525 if (!strcmp(logs[i].refname, "HEAD"))
1526 continue;
1527 logs[i].refname = NULL;
1528 reftable_log_record_release(&logs[i]);
1529 }
1530 free(logs);
1531 reftable_ref_record_release(&old_ref);
1532 reftable_log_record_release(&old_log);
1533 return ret;
1534 }
1535
1536 static int reftable_be_rename_ref(struct ref_store *ref_store,
1537 const char *oldrefname,
1538 const char *newrefname,
1539 const char *logmsg)
1540 {
1541 struct reftable_ref_store *refs =
1542 reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1543 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1544 struct write_copy_arg arg = {
1545 .refs = refs,
1546 .stack = stack,
1547 .oldname = oldrefname,
1548 .newname = newrefname,
1549 .logmsg = logmsg,
1550 .delete_old = 1,
1551 };
1552 int ret;
1553
1554 ret = refs->err;
1555 if (ret < 0)
1556 goto done;
1557
1558 ret = reftable_stack_reload(stack);
1559 if (ret)
1560 goto done;
1561 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1562
1563 done:
1564 assert(ret != REFTABLE_API_ERROR);
1565 return ret;
1566 }
1567
1568 static int reftable_be_copy_ref(struct ref_store *ref_store,
1569 const char *oldrefname,
1570 const char *newrefname,
1571 const char *logmsg)
1572 {
1573 struct reftable_ref_store *refs =
1574 reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
1575 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1576 struct write_copy_arg arg = {
1577 .refs = refs,
1578 .stack = stack,
1579 .oldname = oldrefname,
1580 .newname = newrefname,
1581 .logmsg = logmsg,
1582 };
1583 int ret;
1584
1585 ret = refs->err;
1586 if (ret < 0)
1587 goto done;
1588
1589 ret = reftable_stack_reload(stack);
1590 if (ret)
1591 goto done;
1592 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1593
1594 done:
1595 assert(ret != REFTABLE_API_ERROR);
1596 return ret;
1597 }
1598
1599 struct reftable_reflog_iterator {
1600 struct ref_iterator base;
1601 struct reftable_ref_store *refs;
1602 struct reftable_iterator iter;
1603 struct reftable_log_record log;
1604 struct strbuf last_name;
1605 int err;
1606 };
1607
1608 static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
1609 {
1610 struct reftable_reflog_iterator *iter =
1611 (struct reftable_reflog_iterator *)ref_iterator;
1612
1613 while (!iter->err) {
1614 iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
1615 if (iter->err)
1616 break;
1617
1618 /*
1619 * We want the refnames that we have reflogs for, so we skip if
1620 * we've already produced this name. This could be faster by
1621 * seeking directly to reflog@update_index==0.
1622 */
1623 if (!strcmp(iter->log.refname, iter->last_name.buf))
1624 continue;
1625
1626 if (check_refname_format(iter->log.refname,
1627 REFNAME_ALLOW_ONELEVEL))
1628 continue;
1629
1630 strbuf_reset(&iter->last_name);
1631 strbuf_addstr(&iter->last_name, iter->log.refname);
1632 iter->base.refname = iter->log.refname;
1633
1634 break;
1635 }
1636
1637 if (iter->err > 0) {
1638 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
1639 return ITER_ERROR;
1640 return ITER_DONE;
1641 }
1642
1643 if (iter->err < 0) {
1644 ref_iterator_abort(ref_iterator);
1645 return ITER_ERROR;
1646 }
1647
1648 return ITER_OK;
1649 }
1650
1651 static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator,
1652 struct object_id *peeled)
1653 {
1654 BUG("reftable reflog iterator cannot be peeled");
1655 return -1;
1656 }
1657
1658 static int reftable_reflog_iterator_abort(struct ref_iterator *ref_iterator)
1659 {
1660 struct reftable_reflog_iterator *iter =
1661 (struct reftable_reflog_iterator *)ref_iterator;
1662 reftable_log_record_release(&iter->log);
1663 reftable_iterator_destroy(&iter->iter);
1664 strbuf_release(&iter->last_name);
1665 free(iter);
1666 return ITER_DONE;
1667 }
1668
1669 static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
1670 .advance = reftable_reflog_iterator_advance,
1671 .peel = reftable_reflog_iterator_peel,
1672 .abort = reftable_reflog_iterator_abort
1673 };
1674
1675 static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
1676 struct reftable_stack *stack)
1677 {
1678 struct reftable_reflog_iterator *iter;
1679 int ret;
1680
1681 iter = xcalloc(1, sizeof(*iter));
1682 base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable);
1683 strbuf_init(&iter->last_name, 0);
1684 iter->refs = refs;
1685
1686 ret = refs->err;
1687 if (ret)
1688 goto done;
1689
1690 ret = reftable_stack_reload(stack);
1691 if (ret < 0)
1692 goto done;
1693
1694 reftable_stack_init_log_iterator(stack, &iter->iter);
1695 ret = reftable_iterator_seek_log(&iter->iter, "");
1696 if (ret < 0)
1697 goto done;
1698
1699 done:
1700 iter->err = ret;
1701 return iter;
1702 }
1703
1704 static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
1705 {
1706 struct reftable_ref_store *refs =
1707 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
1708 struct reftable_reflog_iterator *main_iter, *worktree_iter;
1709
1710 main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
1711 if (!refs->worktree_stack)
1712 return &main_iter->base;
1713
1714 worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
1715
1716 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
1717 ref_iterator_select, NULL);
1718 }
1719
1720 static int yield_log_record(struct reftable_log_record *log,
1721 each_reflog_ent_fn fn,
1722 void *cb_data)
1723 {
1724 struct object_id old_oid, new_oid;
1725 const char *full_committer;
1726
1727 oidread(&old_oid, log->value.update.old_hash);
1728 oidread(&new_oid, log->value.update.new_hash);
1729
1730 /*
1731 * When both the old object ID and the new object ID are null
1732 * then this is the reflog existence marker. The caller must
1733 * not be aware of it.
1734 */
1735 if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
1736 return 0;
1737
1738 full_committer = fmt_ident(log->value.update.name, log->value.update.email,
1739 WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
1740 return fn(&old_oid, &new_oid, full_committer,
1741 log->value.update.time, log->value.update.tz_offset,
1742 log->value.update.message, cb_data);
1743 }
1744
1745 static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1746 const char *refname,
1747 each_reflog_ent_fn fn,
1748 void *cb_data)
1749 {
1750 struct reftable_ref_store *refs =
1751 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
1752 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1753 struct reftable_log_record log = {0};
1754 struct reftable_iterator it = {0};
1755 int ret;
1756
1757 if (refs->err < 0)
1758 return refs->err;
1759
1760 reftable_stack_init_log_iterator(stack, &it);
1761 ret = reftable_iterator_seek_log(&it, refname);
1762 while (!ret) {
1763 ret = reftable_iterator_next_log(&it, &log);
1764 if (ret < 0)
1765 break;
1766 if (ret > 0 || strcmp(log.refname, refname)) {
1767 ret = 0;
1768 break;
1769 }
1770
1771 ret = yield_log_record(&log, fn, cb_data);
1772 if (ret)
1773 break;
1774 }
1775
1776 reftable_log_record_release(&log);
1777 reftable_iterator_destroy(&it);
1778 return ret;
1779 }
1780
1781 static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
1782 const char *refname,
1783 each_reflog_ent_fn fn,
1784 void *cb_data)
1785 {
1786 struct reftable_ref_store *refs =
1787 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
1788 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1789 struct reftable_log_record *logs = NULL;
1790 struct reftable_iterator it = {0};
1791 size_t logs_alloc = 0, logs_nr = 0, i;
1792 int ret;
1793
1794 if (refs->err < 0)
1795 return refs->err;
1796
1797 reftable_stack_init_log_iterator(stack, &it);
1798 ret = reftable_iterator_seek_log(&it, refname);
1799 while (!ret) {
1800 struct reftable_log_record log = {0};
1801
1802 ret = reftable_iterator_next_log(&it, &log);
1803 if (ret < 0)
1804 goto done;
1805 if (ret > 0 || strcmp(log.refname, refname)) {
1806 reftable_log_record_release(&log);
1807 ret = 0;
1808 break;
1809 }
1810
1811 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1812 logs[logs_nr++] = log;
1813 }
1814
1815 for (i = logs_nr; i--;) {
1816 ret = yield_log_record(&logs[i], fn, cb_data);
1817 if (ret)
1818 goto done;
1819 }
1820
1821 done:
1822 reftable_iterator_destroy(&it);
1823 for (i = 0; i < logs_nr; i++)
1824 reftable_log_record_release(&logs[i]);
1825 free(logs);
1826 return ret;
1827 }
1828
1829 static int reftable_be_reflog_exists(struct ref_store *ref_store,
1830 const char *refname)
1831 {
1832 struct reftable_ref_store *refs =
1833 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1834 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1835 struct reftable_log_record log = {0};
1836 struct reftable_iterator it = {0};
1837 int ret;
1838
1839 ret = refs->err;
1840 if (ret < 0)
1841 goto done;
1842
1843 ret = reftable_stack_reload(stack);
1844 if (ret < 0)
1845 goto done;
1846
1847 reftable_stack_init_log_iterator(stack, &it);
1848 ret = reftable_iterator_seek_log(&it, refname);
1849 if (ret < 0)
1850 goto done;
1851
1852 /*
1853 * Check whether we get at least one log record for the given ref name.
1854 * If so, the reflog exists, otherwise it doesn't.
1855 */
1856 ret = reftable_iterator_next_log(&it, &log);
1857 if (ret < 0)
1858 goto done;
1859 if (ret > 0) {
1860 ret = 0;
1861 goto done;
1862 }
1863
1864 ret = strcmp(log.refname, refname) == 0;
1865
1866 done:
1867 reftable_iterator_destroy(&it);
1868 reftable_log_record_release(&log);
1869 if (ret < 0)
1870 ret = 0;
1871 return ret;
1872 }
1873
1874 struct write_reflog_existence_arg {
1875 struct reftable_ref_store *refs;
1876 const char *refname;
1877 struct reftable_stack *stack;
1878 };
1879
1880 static int write_reflog_existence_table(struct reftable_writer *writer,
1881 void *cb_data)
1882 {
1883 struct write_reflog_existence_arg *arg = cb_data;
1884 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1885 struct reftable_log_record log = {0};
1886 int ret;
1887
1888 ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
1889 if (ret <= 0)
1890 goto done;
1891
1892 reftable_writer_set_limits(writer, ts, ts);
1893
1894 /*
1895 * The existence entry has both old and new object ID set to the the
1896 * null object ID. Our iterators are aware of this and will not present
1897 * them to their callers.
1898 */
1899 log.refname = xstrdup(arg->refname);
1900 log.update_index = ts;
1901 log.value_type = REFTABLE_LOG_UPDATE;
1902 ret = reftable_writer_add_log(writer, &log);
1903
1904 done:
1905 assert(ret != REFTABLE_API_ERROR);
1906 reftable_log_record_release(&log);
1907 return ret;
1908 }
1909
1910 static int reftable_be_create_reflog(struct ref_store *ref_store,
1911 const char *refname,
1912 struct strbuf *errmsg)
1913 {
1914 struct reftable_ref_store *refs =
1915 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1916 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1917 struct write_reflog_existence_arg arg = {
1918 .refs = refs,
1919 .stack = stack,
1920 .refname = refname,
1921 };
1922 int ret;
1923
1924 ret = refs->err;
1925 if (ret < 0)
1926 goto done;
1927
1928 ret = reftable_stack_reload(stack);
1929 if (ret)
1930 goto done;
1931
1932 ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
1933
1934 done:
1935 return ret;
1936 }
1937
1938 struct write_reflog_delete_arg {
1939 struct reftable_stack *stack;
1940 const char *refname;
1941 };
1942
1943 static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
1944 {
1945 struct write_reflog_delete_arg *arg = cb_data;
1946 struct reftable_log_record log = {0}, tombstone = {0};
1947 struct reftable_iterator it = {0};
1948 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1949 int ret;
1950
1951 reftable_writer_set_limits(writer, ts, ts);
1952
1953 reftable_stack_init_log_iterator(arg->stack, &it);
1954
1955 /*
1956 * In order to delete a table we need to delete all reflog entries one
1957 * by one. This is inefficient, but the reftable format does not have a
1958 * better marker right now.
1959 */
1960 ret = reftable_iterator_seek_log(&it, arg->refname);
1961 while (ret == 0) {
1962 ret = reftable_iterator_next_log(&it, &log);
1963 if (ret < 0)
1964 break;
1965 if (ret > 0 || strcmp(log.refname, arg->refname)) {
1966 ret = 0;
1967 break;
1968 }
1969
1970 tombstone.refname = (char *)arg->refname;
1971 tombstone.value_type = REFTABLE_LOG_DELETION;
1972 tombstone.update_index = log.update_index;
1973
1974 ret = reftable_writer_add_log(writer, &tombstone);
1975 }
1976
1977 reftable_log_record_release(&log);
1978 reftable_iterator_destroy(&it);
1979 return ret;
1980 }
1981
1982 static int reftable_be_delete_reflog(struct ref_store *ref_store,
1983 const char *refname)
1984 {
1985 struct reftable_ref_store *refs =
1986 reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
1987 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1988 struct write_reflog_delete_arg arg = {
1989 .stack = stack,
1990 .refname = refname,
1991 };
1992 int ret;
1993
1994 ret = reftable_stack_reload(stack);
1995 if (ret)
1996 return ret;
1997 ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
1998
1999 assert(ret != REFTABLE_API_ERROR);
2000 return ret;
2001 }
2002
2003 struct reflog_expiry_arg {
2004 struct reftable_ref_store *refs;
2005 struct reftable_stack *stack;
2006 struct reftable_log_record *records;
2007 struct object_id update_oid;
2008 const char *refname;
2009 size_t len;
2010 };
2011
2012 static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
2013 {
2014 struct reflog_expiry_arg *arg = cb_data;
2015 uint64_t ts = reftable_stack_next_update_index(arg->stack);
2016 uint64_t live_records = 0;
2017 size_t i;
2018 int ret;
2019
2020 for (i = 0; i < arg->len; i++)
2021 if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
2022 live_records++;
2023
2024 reftable_writer_set_limits(writer, ts, ts);
2025
2026 if (!is_null_oid(&arg->update_oid)) {
2027 struct reftable_ref_record ref = {0};
2028 struct object_id peeled;
2029
2030 ref.refname = (char *)arg->refname;
2031 ref.update_index = ts;
2032
2033 if (!peel_object(arg->refs->base.repo, &arg->update_oid, &peeled)) {
2034 ref.value_type = REFTABLE_REF_VAL2;
2035 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
2036 memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
2037 } else {
2038 ref.value_type = REFTABLE_REF_VAL1;
2039 memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
2040 }
2041
2042 ret = reftable_writer_add_ref(writer, &ref);
2043 if (ret < 0)
2044 return ret;
2045 }
2046
2047 /*
2048 * When there are no more entries left in the reflog we empty it
2049 * completely, but write a placeholder reflog entry that indicates that
2050 * the reflog still exists.
2051 */
2052 if (!live_records) {
2053 struct reftable_log_record log = {
2054 .refname = (char *)arg->refname,
2055 .value_type = REFTABLE_LOG_UPDATE,
2056 .update_index = ts,
2057 };
2058
2059 ret = reftable_writer_add_log(writer, &log);
2060 if (ret)
2061 return ret;
2062 }
2063
2064 for (i = 0; i < arg->len; i++) {
2065 ret = reftable_writer_add_log(writer, &arg->records[i]);
2066 if (ret)
2067 return ret;
2068 }
2069
2070 return 0;
2071 }
2072
2073 static int reftable_be_reflog_expire(struct ref_store *ref_store,
2074 const char *refname,
2075 unsigned int flags,
2076 reflog_expiry_prepare_fn prepare_fn,
2077 reflog_expiry_should_prune_fn should_prune_fn,
2078 reflog_expiry_cleanup_fn cleanup_fn,
2079 void *policy_cb_data)
2080 {
2081 /*
2082 * For log expiry, we write tombstones for every single reflog entry
2083 * that is to be expired. This means that the entries are still
2084 * retrievable by delving into the stack, and expiring entries
2085 * paradoxically takes extra memory. This memory is only reclaimed when
2086 * compacting the reftable stack.
2087 *
2088 * It would be better if the refs backend supported an API that sets a
2089 * criterion for all refs, passing the criterion to pack_refs().
2090 *
2091 * On the plus side, because we do the expiration per ref, we can easily
2092 * insert the reflog existence dummies.
2093 */
2094 struct reftable_ref_store *refs =
2095 reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2096 struct reftable_stack *stack = stack_for(refs, refname, &refname);
2097 struct reftable_log_record *logs = NULL;
2098 struct reftable_log_record *rewritten = NULL;
2099 struct reftable_ref_record ref_record = {0};
2100 struct reftable_iterator it = {0};
2101 struct reftable_addition *add = NULL;
2102 struct reflog_expiry_arg arg = {0};
2103 struct object_id oid = {0};
2104 uint8_t *last_hash = NULL;
2105 size_t logs_nr = 0, logs_alloc = 0, i;
2106 int ret;
2107
2108 if (refs->err < 0)
2109 return refs->err;
2110
2111 ret = reftable_stack_reload(stack);
2112 if (ret < 0)
2113 goto done;
2114
2115 reftable_stack_init_log_iterator(stack, &it);
2116
2117 ret = reftable_iterator_seek_log(&it, refname);
2118 if (ret < 0)
2119 goto done;
2120
2121 ret = reftable_stack_new_addition(&add, stack);
2122 if (ret < 0)
2123 goto done;
2124
2125 ret = reftable_stack_read_ref(stack, refname, &ref_record);
2126 if (ret < 0)
2127 goto done;
2128 if (reftable_ref_record_val1(&ref_record))
2129 oidread(&oid, reftable_ref_record_val1(&ref_record));
2130 prepare_fn(refname, &oid, policy_cb_data);
2131
2132 while (1) {
2133 struct reftable_log_record log = {0};
2134 struct object_id old_oid, new_oid;
2135
2136 ret = reftable_iterator_next_log(&it, &log);
2137 if (ret < 0)
2138 goto done;
2139 if (ret > 0 || strcmp(log.refname, refname)) {
2140 reftable_log_record_release(&log);
2141 break;
2142 }
2143
2144 oidread(&old_oid, log.value.update.old_hash);
2145 oidread(&new_oid, log.value.update.new_hash);
2146
2147 /*
2148 * Skip over the reflog existence marker. We will add it back
2149 * in when there are no live reflog records.
2150 */
2151 if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
2152 reftable_log_record_release(&log);
2153 continue;
2154 }
2155
2156 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2157 logs[logs_nr++] = log;
2158 }
2159
2160 /*
2161 * We need to rewrite all reflog entries according to the pruning
2162 * callback function:
2163 *
2164 * - If a reflog entry shall be pruned we mark the record for
2165 * deletion.
2166 *
2167 * - Otherwise we may have to rewrite the chain of reflog entries so
2168 * that gaps created by just-deleted records get backfilled.
2169 */
2170 CALLOC_ARRAY(rewritten, logs_nr);
2171 for (i = logs_nr; i--;) {
2172 struct reftable_log_record *dest = &rewritten[i];
2173 struct object_id old_oid, new_oid;
2174
2175 *dest = logs[i];
2176 oidread(&old_oid, logs[i].value.update.old_hash);
2177 oidread(&new_oid, logs[i].value.update.new_hash);
2178
2179 if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
2180 (timestamp_t)logs[i].value.update.time,
2181 logs[i].value.update.tz_offset,
2182 logs[i].value.update.message,
2183 policy_cb_data)) {
2184 dest->value_type = REFTABLE_LOG_DELETION;
2185 } else {
2186 if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
2187 memcpy(dest->value.update.old_hash, last_hash, GIT_MAX_RAWSZ);
2188 last_hash = logs[i].value.update.new_hash;
2189 }
2190 }
2191
2192 if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
2193 reftable_ref_record_val1(&ref_record))
2194 oidread(&arg.update_oid, last_hash);
2195
2196 arg.refs = refs;
2197 arg.records = rewritten;
2198 arg.len = logs_nr;
2199 arg.stack = stack,
2200 arg.refname = refname,
2201
2202 ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
2203 if (ret < 0)
2204 goto done;
2205
2206 /*
2207 * Future improvement: we could skip writing records that were
2208 * not changed.
2209 */
2210 if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
2211 ret = reftable_addition_commit(add);
2212
2213 done:
2214 if (add)
2215 cleanup_fn(policy_cb_data);
2216 assert(ret != REFTABLE_API_ERROR);
2217
2218 reftable_ref_record_release(&ref_record);
2219 reftable_iterator_destroy(&it);
2220 reftable_addition_destroy(add);
2221 for (i = 0; i < logs_nr; i++)
2222 reftable_log_record_release(&logs[i]);
2223 free(logs);
2224 free(rewritten);
2225 return ret;
2226 }
2227
2228 struct ref_storage_be refs_be_reftable = {
2229 .name = "reftable",
2230 .init = reftable_be_init,
2231 .release = reftable_be_release,
2232 .create_on_disk = reftable_be_create_on_disk,
2233
2234 .transaction_prepare = reftable_be_transaction_prepare,
2235 .transaction_finish = reftable_be_transaction_finish,
2236 .transaction_abort = reftable_be_transaction_abort,
2237 .initial_transaction_commit = reftable_be_initial_transaction_commit,
2238
2239 .pack_refs = reftable_be_pack_refs,
2240 .rename_ref = reftable_be_rename_ref,
2241 .copy_ref = reftable_be_copy_ref,
2242
2243 .iterator_begin = reftable_be_iterator_begin,
2244 .read_raw_ref = reftable_be_read_raw_ref,
2245 .read_symbolic_ref = reftable_be_read_symbolic_ref,
2246
2247 .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
2248 .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
2249 .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
2250 .reflog_exists = reftable_be_reflog_exists,
2251 .create_reflog = reftable_be_create_reflog,
2252 .delete_reflog = reftable_be_delete_reflog,
2253 .reflog_expire = reftable_be_reflog_expire,
2254 };