]> git.ipfire.org Git - thirdparty/git.git/blob - refs/reftable-backend.c
Merge branch 'ps/reftable-iteration-perf-part2'
[thirdparty/git.git] / refs / reftable-backend.c
1 #include "../git-compat-util.h"
2 #include "../abspath.h"
3 #include "../chdir-notify.h"
4 #include "../environment.h"
5 #include "../gettext.h"
6 #include "../hash.h"
7 #include "../hex.h"
8 #include "../iterator.h"
9 #include "../ident.h"
10 #include "../lockfile.h"
11 #include "../object.h"
12 #include "../path.h"
13 #include "../refs.h"
14 #include "../reftable/reftable-stack.h"
15 #include "../reftable/reftable-record.h"
16 #include "../reftable/reftable-error.h"
17 #include "../reftable/reftable-iterator.h"
18 #include "../reftable/reftable-merged.h"
19 #include "../setup.h"
20 #include "../strmap.h"
21 #include "refs-internal.h"
22
23 /*
24 * Used as a flag in ref_update::flags when the ref_update was via an
25 * update to HEAD.
26 */
27 #define REF_UPDATE_VIA_HEAD (1 << 8)
28
29 struct reftable_ref_store {
30 struct ref_store base;
31
32 /*
33 * The main stack refers to the common dir and thus contains common
34 * refs as well as refs of the main repository.
35 */
36 struct reftable_stack *main_stack;
37 /*
38 * The worktree stack refers to the gitdir in case the refdb is opened
39 * via a worktree. It thus contains the per-worktree refs.
40 */
41 struct reftable_stack *worktree_stack;
42 /*
43 * Map of worktree stacks by their respective worktree names. The map
44 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
45 */
46 struct strmap worktree_stacks;
47 struct reftable_write_options write_options;
48
49 unsigned int store_flags;
50 int err;
51 };
52
53 /*
54 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
55 * reftable_ref_store. required_flags is compared with ref_store's store_flags
56 * to ensure the ref_store has all required capabilities. "caller" is used in
57 * any necessary error messages.
58 */
59 static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
60 unsigned int required_flags,
61 const char *caller)
62 {
63 struct reftable_ref_store *refs;
64
65 if (ref_store->be != &refs_be_reftable)
66 BUG("ref_store is type \"%s\" not \"reftables\" in %s",
67 ref_store->be->name, caller);
68
69 refs = (struct reftable_ref_store *)ref_store;
70
71 if ((refs->store_flags & required_flags) != required_flags)
72 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
73 caller, required_flags, refs->store_flags);
74
75 return refs;
76 }
77
78 /*
79 * Some refs are global to the repository (refs/heads/{*}), while others are
80 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
81 * multiple separate databases (ie. multiple reftable/ directories), one for
82 * the shared refs, one for the current worktree refs, and one for each
83 * additional worktree. For reading, we merge the view of both the shared and
84 * the current worktree's refs, when necessary.
85 *
86 * This function also optionally assigns the rewritten reference name that is
87 * local to the stack. This translation is required when using worktree refs
88 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
89 * those references in their normalized form.
90 */
91 static struct reftable_stack *stack_for(struct reftable_ref_store *store,
92 const char *refname,
93 const char **rewritten_ref)
94 {
95 const char *wtname;
96 int wtname_len;
97
98 if (!refname)
99 return store->main_stack;
100
101 switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
102 case REF_WORKTREE_OTHER: {
103 static struct strbuf wtname_buf = STRBUF_INIT;
104 struct strbuf wt_dir = STRBUF_INIT;
105 struct reftable_stack *stack;
106
107 /*
108 * We're using a static buffer here so that we don't need to
109 * allocate the worktree name whenever we look up a reference.
110 * This could be avoided if the strmap interface knew how to
111 * handle keys with a length.
112 */
113 strbuf_reset(&wtname_buf);
114 strbuf_add(&wtname_buf, wtname, wtname_len);
115
116 /*
117 * There is an edge case here: when the worktree references the
118 * current worktree, then we set up the stack once via
119 * `worktree_stacks` and once via `worktree_stack`. This is
120 * wasteful, but in the reading case it shouldn't matter. And
121 * in the writing case we would notice that the stack is locked
122 * already and error out when trying to write a reference via
123 * both stacks.
124 */
125 stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
126 if (!stack) {
127 strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
128 store->base.repo->commondir, wtname_buf.buf);
129
130 store->err = reftable_new_stack(&stack, wt_dir.buf,
131 store->write_options);
132 assert(store->err != REFTABLE_API_ERROR);
133 strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
134 }
135
136 strbuf_release(&wt_dir);
137 return stack;
138 }
139 case REF_WORKTREE_CURRENT:
140 /*
141 * If there is no worktree stack then we're currently in the
142 * main worktree. We thus return the main stack in that case.
143 */
144 if (!store->worktree_stack)
145 return store->main_stack;
146 return store->worktree_stack;
147 case REF_WORKTREE_MAIN:
148 case REF_WORKTREE_SHARED:
149 return store->main_stack;
150 default:
151 BUG("unhandled worktree reference type");
152 }
153 }
154
155 static int should_write_log(struct ref_store *refs, const char *refname)
156 {
157 if (log_all_ref_updates == LOG_REFS_UNSET)
158 log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
159
160 switch (log_all_ref_updates) {
161 case LOG_REFS_NONE:
162 return refs_reflog_exists(refs, refname);
163 case LOG_REFS_ALWAYS:
164 return 1;
165 case LOG_REFS_NORMAL:
166 if (should_autocreate_reflog(refname))
167 return 1;
168 return refs_reflog_exists(refs, refname);
169 default:
170 BUG("unhandled core.logAllRefUpdates value %d", log_all_ref_updates);
171 }
172 }
173
174 static void clear_reftable_log_record(struct reftable_log_record *log)
175 {
176 switch (log->value_type) {
177 case REFTABLE_LOG_UPDATE:
178 /*
179 * When we write log records, the hashes are owned by the
180 * caller and thus shouldn't be free'd.
181 */
182 log->value.update.old_hash = NULL;
183 log->value.update.new_hash = NULL;
184 break;
185 case REFTABLE_LOG_DELETION:
186 break;
187 }
188 reftable_log_record_release(log);
189 }
190
191 static void fill_reftable_log_record(struct reftable_log_record *log)
192 {
193 const char *info = git_committer_info(0);
194 struct ident_split split = {0};
195 int sign = 1;
196
197 if (split_ident_line(&split, info, strlen(info)))
198 BUG("failed splitting committer info");
199
200 reftable_log_record_release(log);
201 log->value_type = REFTABLE_LOG_UPDATE;
202 log->value.update.name =
203 xstrndup(split.name_begin, split.name_end - split.name_begin);
204 log->value.update.email =
205 xstrndup(split.mail_begin, split.mail_end - split.mail_begin);
206 log->value.update.time = atol(split.date_begin);
207 if (*split.tz_begin == '-') {
208 sign = -1;
209 split.tz_begin++;
210 }
211 if (*split.tz_begin == '+') {
212 sign = 1;
213 split.tz_begin++;
214 }
215
216 log->value.update.tz_offset = sign * atoi(split.tz_begin);
217 }
218
219 static int read_ref_without_reload(struct reftable_stack *stack,
220 const char *refname,
221 struct object_id *oid,
222 struct strbuf *referent,
223 unsigned int *type)
224 {
225 struct reftable_ref_record ref = {0};
226 int ret;
227
228 ret = reftable_stack_read_ref(stack, refname, &ref);
229 if (ret)
230 goto done;
231
232 if (ref.value_type == REFTABLE_REF_SYMREF) {
233 strbuf_reset(referent);
234 strbuf_addstr(referent, ref.value.symref);
235 *type |= REF_ISSYMREF;
236 } else if (reftable_ref_record_val1(&ref)) {
237 oidread(oid, reftable_ref_record_val1(&ref));
238 } else {
239 /* We got a tombstone, which should not happen. */
240 BUG("unhandled reference value type %d", ref.value_type);
241 }
242
243 done:
244 assert(ret != REFTABLE_API_ERROR);
245 reftable_ref_record_release(&ref);
246 return ret;
247 }
248
249 static struct ref_store *reftable_be_init(struct repository *repo,
250 const char *gitdir,
251 unsigned int store_flags)
252 {
253 struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
254 struct strbuf path = STRBUF_INIT;
255 int is_worktree;
256 mode_t mask;
257
258 mask = umask(0);
259 umask(mask);
260
261 base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
262 strmap_init(&refs->worktree_stacks);
263 refs->store_flags = store_flags;
264 refs->write_options.block_size = 4096;
265 refs->write_options.hash_id = repo->hash_algo->format_id;
266 refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
267
268 /*
269 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
270 * This stack contains both the shared and the main worktree refs.
271 *
272 * Note that we don't try to resolve the path in case we have a
273 * worktree because `get_common_dir_noenv()` already does it for us.
274 */
275 is_worktree = get_common_dir_noenv(&path, gitdir);
276 if (!is_worktree) {
277 strbuf_reset(&path);
278 strbuf_realpath(&path, gitdir, 0);
279 }
280 strbuf_addstr(&path, "/reftable");
281 refs->err = reftable_new_stack(&refs->main_stack, path.buf,
282 refs->write_options);
283 if (refs->err)
284 goto done;
285
286 /*
287 * If we're in a worktree we also need to set up the worktree reftable
288 * stack that is contained in the per-worktree GIT_DIR.
289 *
290 * Ideally, we would also add the stack to our worktree stack map. But
291 * we have no way to figure out the worktree name here and thus can't
292 * do it efficiently.
293 */
294 if (is_worktree) {
295 strbuf_reset(&path);
296 strbuf_addf(&path, "%s/reftable", gitdir);
297
298 refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
299 refs->write_options);
300 if (refs->err)
301 goto done;
302 }
303
304 chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
305
306 done:
307 assert(refs->err != REFTABLE_API_ERROR);
308 strbuf_release(&path);
309 return &refs->base;
310 }
311
312 static int reftable_be_init_db(struct ref_store *ref_store,
313 int flags UNUSED,
314 struct strbuf *err UNUSED)
315 {
316 struct reftable_ref_store *refs =
317 reftable_be_downcast(ref_store, REF_STORE_WRITE, "init_db");
318 struct strbuf sb = STRBUF_INIT;
319
320 strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
321 safe_create_dir(sb.buf, 1);
322 strbuf_reset(&sb);
323
324 strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
325 write_file(sb.buf, "ref: refs/heads/.invalid");
326 adjust_shared_perm(sb.buf);
327 strbuf_reset(&sb);
328
329 strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
330 safe_create_dir(sb.buf, 1);
331 strbuf_reset(&sb);
332
333 strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
334 write_file(sb.buf, "this repository uses the reftable format");
335 adjust_shared_perm(sb.buf);
336
337 strbuf_release(&sb);
338 return 0;
339 }
340
341 struct reftable_ref_iterator {
342 struct ref_iterator base;
343 struct reftable_ref_store *refs;
344 struct reftable_iterator iter;
345 struct reftable_ref_record ref;
346 struct object_id oid;
347
348 const char *prefix;
349 size_t prefix_len;
350 unsigned int flags;
351 int err;
352 };
353
354 static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
355 {
356 struct reftable_ref_iterator *iter =
357 (struct reftable_ref_iterator *)ref_iterator;
358 struct reftable_ref_store *refs = iter->refs;
359
360 while (!iter->err) {
361 int flags = 0;
362
363 iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
364 if (iter->err)
365 break;
366
367 /*
368 * The files backend only lists references contained in "refs/" unless
369 * the root refs are to be included. We emulate the same behaviour here.
370 */
371 if (!starts_with(iter->ref.refname, "refs/") &&
372 !(iter->flags & DO_FOR_EACH_INCLUDE_ROOT_REFS &&
373 (is_pseudoref(&iter->refs->base, iter->ref.refname) ||
374 is_headref(&iter->refs->base, iter->ref.refname)))) {
375 continue;
376 }
377
378 if (iter->prefix_len &&
379 strncmp(iter->prefix, iter->ref.refname, iter->prefix_len)) {
380 iter->err = 1;
381 break;
382 }
383
384 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
385 parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
386 REF_WORKTREE_CURRENT)
387 continue;
388
389 switch (iter->ref.value_type) {
390 case REFTABLE_REF_VAL1:
391 oidread(&iter->oid, iter->ref.value.val1);
392 break;
393 case REFTABLE_REF_VAL2:
394 oidread(&iter->oid, iter->ref.value.val2.value);
395 break;
396 case REFTABLE_REF_SYMREF:
397 if (!refs_resolve_ref_unsafe(&iter->refs->base, iter->ref.refname,
398 RESOLVE_REF_READING, &iter->oid, &flags))
399 oidclr(&iter->oid);
400 break;
401 default:
402 BUG("unhandled reference value type %d", iter->ref.value_type);
403 }
404
405 if (is_null_oid(&iter->oid))
406 flags |= REF_ISBROKEN;
407
408 if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
409 if (!refname_is_safe(iter->ref.refname))
410 die(_("refname is dangerous: %s"), iter->ref.refname);
411 oidclr(&iter->oid);
412 flags |= REF_BAD_NAME | REF_ISBROKEN;
413 }
414
415 if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
416 flags & REF_ISSYMREF &&
417 flags & REF_ISBROKEN)
418 continue;
419
420 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
421 !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
422 &iter->oid, flags))
423 continue;
424
425 iter->base.refname = iter->ref.refname;
426 iter->base.oid = &iter->oid;
427 iter->base.flags = flags;
428
429 break;
430 }
431
432 if (iter->err > 0) {
433 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
434 return ITER_ERROR;
435 return ITER_DONE;
436 }
437
438 if (iter->err < 0) {
439 ref_iterator_abort(ref_iterator);
440 return ITER_ERROR;
441 }
442
443 return ITER_OK;
444 }
445
446 static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
447 struct object_id *peeled)
448 {
449 struct reftable_ref_iterator *iter =
450 (struct reftable_ref_iterator *)ref_iterator;
451
452 if (iter->ref.value_type == REFTABLE_REF_VAL2) {
453 oidread(peeled, iter->ref.value.val2.target_value);
454 return 0;
455 }
456
457 return -1;
458 }
459
460 static int reftable_ref_iterator_abort(struct ref_iterator *ref_iterator)
461 {
462 struct reftable_ref_iterator *iter =
463 (struct reftable_ref_iterator *)ref_iterator;
464 reftable_ref_record_release(&iter->ref);
465 reftable_iterator_destroy(&iter->iter);
466 free(iter);
467 return ITER_DONE;
468 }
469
470 static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
471 .advance = reftable_ref_iterator_advance,
472 .peel = reftable_ref_iterator_peel,
473 .abort = reftable_ref_iterator_abort
474 };
475
476 static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
477 struct reftable_stack *stack,
478 const char *prefix,
479 int flags)
480 {
481 struct reftable_merged_table *merged_table;
482 struct reftable_ref_iterator *iter;
483 int ret;
484
485 iter = xcalloc(1, sizeof(*iter));
486 base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
487 iter->prefix = prefix;
488 iter->prefix_len = prefix ? strlen(prefix) : 0;
489 iter->base.oid = &iter->oid;
490 iter->flags = flags;
491 iter->refs = refs;
492
493 ret = refs->err;
494 if (ret)
495 goto done;
496
497 ret = reftable_stack_reload(stack);
498 if (ret)
499 goto done;
500
501 merged_table = reftable_stack_merged_table(stack);
502
503 ret = reftable_merged_table_seek_ref(merged_table, &iter->iter, prefix);
504 if (ret)
505 goto done;
506
507 done:
508 iter->err = ret;
509 return iter;
510 }
511
512 static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
513 const char *prefix,
514 const char **exclude_patterns,
515 unsigned int flags)
516 {
517 struct reftable_ref_iterator *main_iter, *worktree_iter;
518 struct reftable_ref_store *refs;
519 unsigned int required_flags = REF_STORE_READ;
520
521 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
522 required_flags |= REF_STORE_ODB;
523 refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
524
525 main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix, flags);
526
527 /*
528 * The worktree stack is only set when we're in an actual worktree
529 * right now. If we aren't, then we return the common reftable
530 * iterator, only.
531 */
532 if (!refs->worktree_stack)
533 return &main_iter->base;
534
535 /*
536 * Otherwise we merge both the common and the per-worktree refs into a
537 * single iterator.
538 */
539 worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix, flags);
540 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
541 ref_iterator_select, NULL);
542 }
543
544 static int reftable_be_read_raw_ref(struct ref_store *ref_store,
545 const char *refname,
546 struct object_id *oid,
547 struct strbuf *referent,
548 unsigned int *type,
549 int *failure_errno)
550 {
551 struct reftable_ref_store *refs =
552 reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
553 struct reftable_stack *stack = stack_for(refs, refname, &refname);
554 int ret;
555
556 if (refs->err < 0)
557 return refs->err;
558
559 ret = reftable_stack_reload(stack);
560 if (ret)
561 return ret;
562
563 ret = read_ref_without_reload(stack, refname, oid, referent, type);
564 if (ret < 0)
565 return ret;
566 if (ret > 0) {
567 *failure_errno = ENOENT;
568 return -1;
569 }
570
571 return 0;
572 }
573
574 static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
575 const char *refname,
576 struct strbuf *referent)
577 {
578 struct reftable_ref_store *refs =
579 reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
580 struct reftable_stack *stack = stack_for(refs, refname, &refname);
581 struct reftable_ref_record ref = {0};
582 int ret;
583
584 ret = reftable_stack_reload(stack);
585 if (ret)
586 return ret;
587
588 ret = reftable_stack_read_ref(stack, refname, &ref);
589 if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
590 strbuf_addstr(referent, ref.value.symref);
591 else
592 ret = -1;
593
594 reftable_ref_record_release(&ref);
595 return ret;
596 }
597
598 /*
599 * Return the refname under which update was originally requested.
600 */
601 static const char *original_update_refname(struct ref_update *update)
602 {
603 while (update->parent_update)
604 update = update->parent_update;
605 return update->refname;
606 }
607
608 struct reftable_transaction_update {
609 struct ref_update *update;
610 struct object_id current_oid;
611 };
612
613 struct write_transaction_table_arg {
614 struct reftable_ref_store *refs;
615 struct reftable_stack *stack;
616 struct reftable_addition *addition;
617 struct reftable_transaction_update *updates;
618 size_t updates_nr;
619 size_t updates_alloc;
620 size_t updates_expected;
621 };
622
623 struct reftable_transaction_data {
624 struct write_transaction_table_arg *args;
625 size_t args_nr, args_alloc;
626 };
627
628 static void free_transaction_data(struct reftable_transaction_data *tx_data)
629 {
630 if (!tx_data)
631 return;
632 for (size_t i = 0; i < tx_data->args_nr; i++) {
633 reftable_addition_destroy(tx_data->args[i].addition);
634 free(tx_data->args[i].updates);
635 }
636 free(tx_data->args);
637 free(tx_data);
638 }
639
640 /*
641 * Prepare transaction update for the given reference update. This will cause
642 * us to lock the corresponding reftable stack for concurrent modification.
643 */
644 static int prepare_transaction_update(struct write_transaction_table_arg **out,
645 struct reftable_ref_store *refs,
646 struct reftable_transaction_data *tx_data,
647 struct ref_update *update,
648 struct strbuf *err)
649 {
650 struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
651 struct write_transaction_table_arg *arg = NULL;
652 size_t i;
653 int ret;
654
655 /*
656 * Search for a preexisting stack update. If there is one then we add
657 * the update to it, otherwise we set up a new stack update.
658 */
659 for (i = 0; !arg && i < tx_data->args_nr; i++)
660 if (tx_data->args[i].stack == stack)
661 arg = &tx_data->args[i];
662
663 if (!arg) {
664 struct reftable_addition *addition;
665
666 ret = reftable_stack_reload(stack);
667 if (ret)
668 return ret;
669
670 ret = reftable_stack_new_addition(&addition, stack);
671 if (ret) {
672 if (ret == REFTABLE_LOCK_ERROR)
673 strbuf_addstr(err, "cannot lock references");
674 return ret;
675 }
676
677 ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
678 tx_data->args_alloc);
679 arg = &tx_data->args[tx_data->args_nr++];
680 arg->refs = refs;
681 arg->stack = stack;
682 arg->addition = addition;
683 arg->updates = NULL;
684 arg->updates_nr = 0;
685 arg->updates_alloc = 0;
686 arg->updates_expected = 0;
687 }
688
689 arg->updates_expected++;
690
691 if (out)
692 *out = arg;
693
694 return 0;
695 }
696
697 /*
698 * Queue a reference update for the correct stack. We potentially need to
699 * handle multiple stack updates in a single transaction when it spans across
700 * multiple worktrees.
701 */
702 static int queue_transaction_update(struct reftable_ref_store *refs,
703 struct reftable_transaction_data *tx_data,
704 struct ref_update *update,
705 struct object_id *current_oid,
706 struct strbuf *err)
707 {
708 struct write_transaction_table_arg *arg = NULL;
709 int ret;
710
711 if (update->backend_data)
712 BUG("reference update queued more than once");
713
714 ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
715 if (ret < 0)
716 return ret;
717
718 ALLOC_GROW(arg->updates, arg->updates_nr + 1,
719 arg->updates_alloc);
720 arg->updates[arg->updates_nr].update = update;
721 oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
722 update->backend_data = &arg->updates[arg->updates_nr++];
723
724 return 0;
725 }
726
727 static int reftable_be_transaction_prepare(struct ref_store *ref_store,
728 struct ref_transaction *transaction,
729 struct strbuf *err)
730 {
731 struct reftable_ref_store *refs =
732 reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
733 struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
734 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
735 struct reftable_transaction_data *tx_data = NULL;
736 struct object_id head_oid;
737 unsigned int head_type = 0;
738 size_t i;
739 int ret;
740
741 ret = refs->err;
742 if (ret < 0)
743 goto done;
744
745 tx_data = xcalloc(1, sizeof(*tx_data));
746
747 /*
748 * Preprocess all updates. For one we check that there are no duplicate
749 * reference updates in this transaction. Second, we lock all stacks
750 * that will be modified during the transaction.
751 */
752 for (i = 0; i < transaction->nr; i++) {
753 ret = prepare_transaction_update(NULL, refs, tx_data,
754 transaction->updates[i], err);
755 if (ret)
756 goto done;
757
758 string_list_append(&affected_refnames,
759 transaction->updates[i]->refname);
760 }
761
762 /*
763 * Now that we have counted updates per stack we can preallocate their
764 * arrays. This avoids having to reallocate many times.
765 */
766 for (i = 0; i < tx_data->args_nr; i++) {
767 CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
768 tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
769 }
770
771 /*
772 * Fail if a refname appears more than once in the transaction.
773 * This code is taken from the files backend and is a good candidate to
774 * be moved into the generic layer.
775 */
776 string_list_sort(&affected_refnames);
777 if (ref_update_reject_duplicates(&affected_refnames, err)) {
778 ret = TRANSACTION_GENERIC_ERROR;
779 goto done;
780 }
781
782 ret = read_ref_without_reload(stack_for(refs, "HEAD", NULL), "HEAD", &head_oid,
783 &head_referent, &head_type);
784 if (ret < 0)
785 goto done;
786 ret = 0;
787
788 for (i = 0; i < transaction->nr; i++) {
789 struct ref_update *u = transaction->updates[i];
790 struct object_id current_oid = {0};
791 struct reftable_stack *stack;
792 const char *rewritten_ref;
793
794 stack = stack_for(refs, u->refname, &rewritten_ref);
795
796 /* Verify that the new object ID is valid. */
797 if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
798 !(u->flags & REF_SKIP_OID_VERIFICATION) &&
799 !(u->flags & REF_LOG_ONLY)) {
800 struct object *o = parse_object(refs->base.repo, &u->new_oid);
801 if (!o) {
802 strbuf_addf(err,
803 _("trying to write ref '%s' with nonexistent object %s"),
804 u->refname, oid_to_hex(&u->new_oid));
805 ret = -1;
806 goto done;
807 }
808
809 if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
810 strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
811 oid_to_hex(&u->new_oid), u->refname);
812 ret = -1;
813 goto done;
814 }
815 }
816
817 /*
818 * When we update the reference that HEAD points to we enqueue
819 * a second log-only update for HEAD so that its reflog is
820 * updated accordingly.
821 */
822 if (head_type == REF_ISSYMREF &&
823 !(u->flags & REF_LOG_ONLY) &&
824 !(u->flags & REF_UPDATE_VIA_HEAD) &&
825 !strcmp(rewritten_ref, head_referent.buf)) {
826 struct ref_update *new_update;
827
828 /*
829 * First make sure that HEAD is not already in the
830 * transaction. This check is O(lg N) in the transaction
831 * size, but it happens at most once per transaction.
832 */
833 if (string_list_has_string(&affected_refnames, "HEAD")) {
834 /* An entry already existed */
835 strbuf_addf(err,
836 _("multiple updates for 'HEAD' (including one "
837 "via its referent '%s') are not allowed"),
838 u->refname);
839 ret = TRANSACTION_NAME_CONFLICT;
840 goto done;
841 }
842
843 new_update = ref_transaction_add_update(
844 transaction, "HEAD",
845 u->flags | REF_LOG_ONLY | REF_NO_DEREF,
846 &u->new_oid, &u->old_oid, u->msg);
847 string_list_insert(&affected_refnames, new_update->refname);
848 }
849
850 ret = read_ref_without_reload(stack, rewritten_ref,
851 &current_oid, &referent, &u->type);
852 if (ret < 0)
853 goto done;
854 if (ret > 0 && (!(u->flags & REF_HAVE_OLD) || is_null_oid(&u->old_oid))) {
855 /*
856 * The reference does not exist, and we either have no
857 * old object ID or expect the reference to not exist.
858 * We can thus skip below safety checks as well as the
859 * symref splitting. But we do want to verify that
860 * there is no conflicting reference here so that we
861 * can output a proper error message instead of failing
862 * at a later point.
863 */
864 ret = refs_verify_refname_available(ref_store, u->refname,
865 &affected_refnames, NULL, err);
866 if (ret < 0)
867 goto done;
868
869 /*
870 * There is no need to write the reference deletion
871 * when the reference in question doesn't exist.
872 */
873 if (u->flags & REF_HAVE_NEW && !is_null_oid(&u->new_oid)) {
874 ret = queue_transaction_update(refs, tx_data, u,
875 &current_oid, err);
876 if (ret)
877 goto done;
878 }
879
880 continue;
881 }
882 if (ret > 0) {
883 /* The reference does not exist, but we expected it to. */
884 strbuf_addf(err, _("cannot lock ref '%s': "
885 "unable to resolve reference '%s'"),
886 original_update_refname(u), u->refname);
887 ret = -1;
888 goto done;
889 }
890
891 if (u->type & REF_ISSYMREF) {
892 /*
893 * The reftable stack is locked at this point already,
894 * so it is safe to call `refs_resolve_ref_unsafe()`
895 * here without causing races.
896 */
897 const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
898 &current_oid, NULL);
899
900 if (u->flags & REF_NO_DEREF) {
901 if (u->flags & REF_HAVE_OLD && !resolved) {
902 strbuf_addf(err, _("cannot lock ref '%s': "
903 "error reading reference"), u->refname);
904 ret = -1;
905 goto done;
906 }
907 } else {
908 struct ref_update *new_update;
909 int new_flags;
910
911 new_flags = u->flags;
912 if (!strcmp(rewritten_ref, "HEAD"))
913 new_flags |= REF_UPDATE_VIA_HEAD;
914
915 /*
916 * If we are updating a symref (eg. HEAD), we should also
917 * update the branch that the symref points to.
918 *
919 * This is generic functionality, and would be better
920 * done in refs.c, but the current implementation is
921 * intertwined with the locking in files-backend.c.
922 */
923 new_update = ref_transaction_add_update(
924 transaction, referent.buf, new_flags,
925 &u->new_oid, &u->old_oid, u->msg);
926 new_update->parent_update = u;
927
928 /*
929 * Change the symbolic ref update to log only. Also, it
930 * doesn't need to check its old OID value, as that will be
931 * done when new_update is processed.
932 */
933 u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
934 u->flags &= ~REF_HAVE_OLD;
935
936 if (string_list_has_string(&affected_refnames, new_update->refname)) {
937 strbuf_addf(err,
938 _("multiple updates for '%s' (including one "
939 "via symref '%s') are not allowed"),
940 referent.buf, u->refname);
941 ret = TRANSACTION_NAME_CONFLICT;
942 goto done;
943 }
944 string_list_insert(&affected_refnames, new_update->refname);
945 }
946 }
947
948 /*
949 * Verify that the old object matches our expectations. Note
950 * that the error messages here do not make a lot of sense in
951 * the context of the reftable backend as we never lock
952 * individual refs. But the error messages match what the files
953 * backend returns, which keeps our tests happy.
954 */
955 if (u->flags & REF_HAVE_OLD && !oideq(&current_oid, &u->old_oid)) {
956 if (is_null_oid(&u->old_oid))
957 strbuf_addf(err, _("cannot lock ref '%s': "
958 "reference already exists"),
959 original_update_refname(u));
960 else if (is_null_oid(&current_oid))
961 strbuf_addf(err, _("cannot lock ref '%s': "
962 "reference is missing but expected %s"),
963 original_update_refname(u),
964 oid_to_hex(&u->old_oid));
965 else
966 strbuf_addf(err, _("cannot lock ref '%s': "
967 "is at %s but expected %s"),
968 original_update_refname(u),
969 oid_to_hex(&current_oid),
970 oid_to_hex(&u->old_oid));
971 ret = -1;
972 goto done;
973 }
974
975 /*
976 * If all of the following conditions are true:
977 *
978 * - We're not about to write a symref.
979 * - We're not about to write a log-only entry.
980 * - Old and new object ID are different.
981 *
982 * Then we're essentially doing a no-op update that can be
983 * skipped. This is not only for the sake of efficiency, but
984 * also skips writing unneeded reflog entries.
985 */
986 if ((u->type & REF_ISSYMREF) ||
987 (u->flags & REF_LOG_ONLY) ||
988 (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid))) {
989 ret = queue_transaction_update(refs, tx_data, u,
990 &current_oid, err);
991 if (ret)
992 goto done;
993 }
994 }
995
996 transaction->backend_data = tx_data;
997 transaction->state = REF_TRANSACTION_PREPARED;
998
999 done:
1000 assert(ret != REFTABLE_API_ERROR);
1001 if (ret < 0) {
1002 free_transaction_data(tx_data);
1003 transaction->state = REF_TRANSACTION_CLOSED;
1004 if (!err->len)
1005 strbuf_addf(err, _("reftable: transaction prepare: %s"),
1006 reftable_error_str(ret));
1007 }
1008 string_list_clear(&affected_refnames, 0);
1009 strbuf_release(&referent);
1010 strbuf_release(&head_referent);
1011
1012 return ret;
1013 }
1014
1015 static int reftable_be_transaction_abort(struct ref_store *ref_store,
1016 struct ref_transaction *transaction,
1017 struct strbuf *err)
1018 {
1019 struct reftable_transaction_data *tx_data = transaction->backend_data;
1020 free_transaction_data(tx_data);
1021 transaction->state = REF_TRANSACTION_CLOSED;
1022 return 0;
1023 }
1024
1025 static int transaction_update_cmp(const void *a, const void *b)
1026 {
1027 return strcmp(((struct reftable_transaction_update *)a)->update->refname,
1028 ((struct reftable_transaction_update *)b)->update->refname);
1029 }
1030
1031 static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
1032 {
1033 struct write_transaction_table_arg *arg = cb_data;
1034 struct reftable_merged_table *mt =
1035 reftable_stack_merged_table(arg->stack);
1036 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1037 struct reftable_log_record *logs = NULL;
1038 size_t logs_nr = 0, logs_alloc = 0, i;
1039 int ret = 0;
1040
1041 QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
1042
1043 reftable_writer_set_limits(writer, ts, ts);
1044
1045 for (i = 0; i < arg->updates_nr; i++) {
1046 struct reftable_transaction_update *tx_update = &arg->updates[i];
1047 struct ref_update *u = tx_update->update;
1048
1049 /*
1050 * Write a reflog entry when updating a ref to point to
1051 * something new in either of the following cases:
1052 *
1053 * - The reference is about to be deleted. We always want to
1054 * delete the reflog in that case.
1055 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1056 * the reflog entry.
1057 * - `core.logAllRefUpdates` tells us to create the reflog for
1058 * the given ref.
1059 */
1060 if (u->flags & REF_HAVE_NEW && !(u->type & REF_ISSYMREF) && is_null_oid(&u->new_oid)) {
1061 struct reftable_log_record log = {0};
1062 struct reftable_iterator it = {0};
1063
1064 /*
1065 * When deleting refs we also delete all reflog entries
1066 * with them. While it is not strictly required to
1067 * delete reflogs together with their refs, this
1068 * matches the behaviour of the files backend.
1069 *
1070 * Unfortunately, we have no better way than to delete
1071 * all reflog entries one by one.
1072 */
1073 ret = reftable_merged_table_seek_log(mt, &it, u->refname);
1074 while (ret == 0) {
1075 struct reftable_log_record *tombstone;
1076
1077 ret = reftable_iterator_next_log(&it, &log);
1078 if (ret < 0)
1079 break;
1080 if (ret > 0 || strcmp(log.refname, u->refname)) {
1081 ret = 0;
1082 break;
1083 }
1084
1085 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1086 tombstone = &logs[logs_nr++];
1087 tombstone->refname = xstrdup(u->refname);
1088 tombstone->value_type = REFTABLE_LOG_DELETION;
1089 tombstone->update_index = log.update_index;
1090 }
1091
1092 reftable_log_record_release(&log);
1093 reftable_iterator_destroy(&it);
1094
1095 if (ret)
1096 goto done;
1097 } else if (u->flags & REF_HAVE_NEW &&
1098 (u->flags & REF_FORCE_CREATE_REFLOG ||
1099 should_write_log(&arg->refs->base, u->refname))) {
1100 struct reftable_log_record *log;
1101
1102 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1103 log = &logs[logs_nr++];
1104 memset(log, 0, sizeof(*log));
1105
1106 fill_reftable_log_record(log);
1107 log->update_index = ts;
1108 log->refname = xstrdup(u->refname);
1109 log->value.update.new_hash = u->new_oid.hash;
1110 log->value.update.old_hash = tx_update->current_oid.hash;
1111 log->value.update.message =
1112 xstrndup(u->msg, arg->refs->write_options.block_size / 2);
1113 }
1114
1115 if (u->flags & REF_LOG_ONLY)
1116 continue;
1117
1118 if (u->flags & REF_HAVE_NEW && is_null_oid(&u->new_oid)) {
1119 struct reftable_ref_record ref = {
1120 .refname = (char *)u->refname,
1121 .update_index = ts,
1122 .value_type = REFTABLE_REF_DELETION,
1123 };
1124
1125 ret = reftable_writer_add_ref(writer, &ref);
1126 if (ret < 0)
1127 goto done;
1128 } else if (u->flags & REF_HAVE_NEW) {
1129 struct reftable_ref_record ref = {0};
1130 struct object_id peeled;
1131 int peel_error;
1132
1133 ref.refname = (char *)u->refname;
1134 ref.update_index = ts;
1135
1136 peel_error = peel_object(&u->new_oid, &peeled);
1137 if (!peel_error) {
1138 ref.value_type = REFTABLE_REF_VAL2;
1139 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
1140 memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
1141 } else if (!is_null_oid(&u->new_oid)) {
1142 ref.value_type = REFTABLE_REF_VAL1;
1143 memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
1144 }
1145
1146 ret = reftable_writer_add_ref(writer, &ref);
1147 if (ret < 0)
1148 goto done;
1149 }
1150 }
1151
1152 /*
1153 * Logs are written at the end so that we do not have intermixed ref
1154 * and log blocks.
1155 */
1156 if (logs) {
1157 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1158 if (ret < 0)
1159 goto done;
1160 }
1161
1162 done:
1163 assert(ret != REFTABLE_API_ERROR);
1164 for (i = 0; i < logs_nr; i++)
1165 clear_reftable_log_record(&logs[i]);
1166 free(logs);
1167 return ret;
1168 }
1169
1170 static int reftable_be_transaction_finish(struct ref_store *ref_store,
1171 struct ref_transaction *transaction,
1172 struct strbuf *err)
1173 {
1174 struct reftable_transaction_data *tx_data = transaction->backend_data;
1175 int ret = 0;
1176
1177 for (size_t i = 0; i < tx_data->args_nr; i++) {
1178 ret = reftable_addition_add(tx_data->args[i].addition,
1179 write_transaction_table, &tx_data->args[i]);
1180 if (ret < 0)
1181 goto done;
1182
1183 ret = reftable_addition_commit(tx_data->args[i].addition);
1184 if (ret < 0)
1185 goto done;
1186 }
1187
1188 done:
1189 assert(ret != REFTABLE_API_ERROR);
1190 free_transaction_data(tx_data);
1191 transaction->state = REF_TRANSACTION_CLOSED;
1192
1193 if (ret) {
1194 strbuf_addf(err, _("reftable: transaction failure: %s"),
1195 reftable_error_str(ret));
1196 return -1;
1197 }
1198 return ret;
1199 }
1200
1201 static int reftable_be_initial_transaction_commit(struct ref_store *ref_store UNUSED,
1202 struct ref_transaction *transaction,
1203 struct strbuf *err)
1204 {
1205 return ref_transaction_commit(transaction, err);
1206 }
1207
1208 static int reftable_be_pack_refs(struct ref_store *ref_store,
1209 struct pack_refs_opts *opts)
1210 {
1211 struct reftable_ref_store *refs =
1212 reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "pack_refs");
1213 struct reftable_stack *stack;
1214 int ret;
1215
1216 if (refs->err)
1217 return refs->err;
1218
1219 stack = refs->worktree_stack;
1220 if (!stack)
1221 stack = refs->main_stack;
1222
1223 ret = reftable_stack_compact_all(stack, NULL);
1224 if (ret)
1225 goto out;
1226 ret = reftable_stack_clean(stack);
1227 if (ret)
1228 goto out;
1229
1230 out:
1231 return ret;
1232 }
1233
1234 struct write_create_symref_arg {
1235 struct reftable_ref_store *refs;
1236 struct reftable_stack *stack;
1237 const char *refname;
1238 const char *target;
1239 const char *logmsg;
1240 };
1241
1242 static int write_create_symref_table(struct reftable_writer *writer, void *cb_data)
1243 {
1244 struct write_create_symref_arg *create = cb_data;
1245 uint64_t ts = reftable_stack_next_update_index(create->stack);
1246 struct reftable_ref_record ref = {
1247 .refname = (char *)create->refname,
1248 .value_type = REFTABLE_REF_SYMREF,
1249 .value.symref = (char *)create->target,
1250 .update_index = ts,
1251 };
1252 struct reftable_log_record log = {0};
1253 struct object_id new_oid;
1254 struct object_id old_oid;
1255 int ret;
1256
1257 reftable_writer_set_limits(writer, ts, ts);
1258
1259 ret = reftable_writer_add_ref(writer, &ref);
1260 if (ret)
1261 return ret;
1262
1263 /*
1264 * Note that it is important to try and resolve the reference before we
1265 * write the log entry. This is because `should_write_log()` will munge
1266 * `core.logAllRefUpdates`, which is undesirable when we create a new
1267 * repository because it would be written into the config. As HEAD will
1268 * not resolve for new repositories this ordering will ensure that this
1269 * never happens.
1270 */
1271 if (!create->logmsg ||
1272 !refs_resolve_ref_unsafe(&create->refs->base, create->target,
1273 RESOLVE_REF_READING, &new_oid, NULL) ||
1274 !should_write_log(&create->refs->base, create->refname))
1275 return 0;
1276
1277 fill_reftable_log_record(&log);
1278 log.refname = xstrdup(create->refname);
1279 log.update_index = ts;
1280 log.value.update.message = xstrndup(create->logmsg,
1281 create->refs->write_options.block_size / 2);
1282 log.value.update.new_hash = new_oid.hash;
1283 if (refs_resolve_ref_unsafe(&create->refs->base, create->refname,
1284 RESOLVE_REF_READING, &old_oid, NULL))
1285 log.value.update.old_hash = old_oid.hash;
1286
1287 ret = reftable_writer_add_log(writer, &log);
1288 clear_reftable_log_record(&log);
1289 return ret;
1290 }
1291
1292 static int reftable_be_create_symref(struct ref_store *ref_store,
1293 const char *refname,
1294 const char *target,
1295 const char *logmsg)
1296 {
1297 struct reftable_ref_store *refs =
1298 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_symref");
1299 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1300 struct write_create_symref_arg arg = {
1301 .refs = refs,
1302 .stack = stack,
1303 .refname = refname,
1304 .target = target,
1305 .logmsg = logmsg,
1306 };
1307 int ret;
1308
1309 ret = refs->err;
1310 if (ret < 0)
1311 goto done;
1312
1313 ret = reftable_stack_reload(stack);
1314 if (ret)
1315 goto done;
1316
1317 ret = reftable_stack_add(stack, &write_create_symref_table, &arg);
1318
1319 done:
1320 assert(ret != REFTABLE_API_ERROR);
1321 if (ret)
1322 error("unable to write symref for %s: %s", refname,
1323 reftable_error_str(ret));
1324 return ret;
1325 }
1326
1327 struct write_copy_arg {
1328 struct reftable_ref_store *refs;
1329 struct reftable_stack *stack;
1330 const char *oldname;
1331 const char *newname;
1332 const char *logmsg;
1333 int delete_old;
1334 };
1335
1336 static int write_copy_table(struct reftable_writer *writer, void *cb_data)
1337 {
1338 struct write_copy_arg *arg = cb_data;
1339 uint64_t deletion_ts, creation_ts;
1340 struct reftable_merged_table *mt = reftable_stack_merged_table(arg->stack);
1341 struct reftable_ref_record old_ref = {0}, refs[2] = {0};
1342 struct reftable_log_record old_log = {0}, *logs = NULL;
1343 struct reftable_iterator it = {0};
1344 struct string_list skip = STRING_LIST_INIT_NODUP;
1345 struct strbuf errbuf = STRBUF_INIT;
1346 size_t logs_nr = 0, logs_alloc = 0, i;
1347 int ret;
1348
1349 if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
1350 ret = error(_("refname %s not found"), arg->oldname);
1351 goto done;
1352 }
1353 if (old_ref.value_type == REFTABLE_REF_SYMREF) {
1354 ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
1355 arg->oldname);
1356 goto done;
1357 }
1358
1359 /*
1360 * There's nothing to do in case the old and new name are the same, so
1361 * we exit early in that case.
1362 */
1363 if (!strcmp(arg->oldname, arg->newname)) {
1364 ret = 0;
1365 goto done;
1366 }
1367
1368 /*
1369 * Verify that the new refname is available.
1370 */
1371 string_list_insert(&skip, arg->oldname);
1372 ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
1373 NULL, &skip, &errbuf);
1374 if (ret < 0) {
1375 error("%s", errbuf.buf);
1376 goto done;
1377 }
1378
1379 /*
1380 * When deleting the old reference we have to use two update indices:
1381 * once to delete the old ref and its reflog, and once to create the
1382 * new ref and its reflog. They need to be staged with two separate
1383 * indices because the new reflog needs to encode both the deletion of
1384 * the old branch and the creation of the new branch, and we cannot do
1385 * two changes to a reflog in a single update.
1386 */
1387 deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
1388 if (arg->delete_old)
1389 creation_ts++;
1390 reftable_writer_set_limits(writer, deletion_ts, creation_ts);
1391
1392 /*
1393 * Add the new reference. If this is a rename then we also delete the
1394 * old reference.
1395 */
1396 refs[0] = old_ref;
1397 refs[0].refname = (char *)arg->newname;
1398 refs[0].update_index = creation_ts;
1399 if (arg->delete_old) {
1400 refs[1].refname = (char *)arg->oldname;
1401 refs[1].value_type = REFTABLE_REF_DELETION;
1402 refs[1].update_index = deletion_ts;
1403 }
1404 ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
1405 if (ret < 0)
1406 goto done;
1407
1408 /*
1409 * When deleting the old branch we need to create a reflog entry on the
1410 * new branch name that indicates that the old branch has been deleted
1411 * and then recreated. This is a tad weird, but matches what the files
1412 * backend does.
1413 */
1414 if (arg->delete_old) {
1415 struct strbuf head_referent = STRBUF_INIT;
1416 struct object_id head_oid;
1417 int append_head_reflog;
1418 unsigned head_type = 0;
1419
1420 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1421 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1422 fill_reftable_log_record(&logs[logs_nr]);
1423 logs[logs_nr].refname = (char *)arg->newname;
1424 logs[logs_nr].update_index = deletion_ts;
1425 logs[logs_nr].value.update.message =
1426 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1427 logs[logs_nr].value.update.old_hash = old_ref.value.val1;
1428 logs_nr++;
1429
1430 ret = read_ref_without_reload(arg->stack, "HEAD", &head_oid, &head_referent, &head_type);
1431 if (ret < 0)
1432 goto done;
1433 append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
1434 strbuf_release(&head_referent);
1435
1436 /*
1437 * The files backend uses `refs_delete_ref()` to delete the old
1438 * branch name, which will append a reflog entry for HEAD in
1439 * case it points to the old branch.
1440 */
1441 if (append_head_reflog) {
1442 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1443 logs[logs_nr] = logs[logs_nr - 1];
1444 logs[logs_nr].refname = "HEAD";
1445 logs_nr++;
1446 }
1447 }
1448
1449 /*
1450 * Create the reflog entry for the newly created branch.
1451 */
1452 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1453 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1454 fill_reftable_log_record(&logs[logs_nr]);
1455 logs[logs_nr].refname = (char *)arg->newname;
1456 logs[logs_nr].update_index = creation_ts;
1457 logs[logs_nr].value.update.message =
1458 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1459 logs[logs_nr].value.update.new_hash = old_ref.value.val1;
1460 logs_nr++;
1461
1462 /*
1463 * In addition to writing the reflog entry for the new branch, we also
1464 * copy over all log entries from the old reflog. Last but not least,
1465 * when renaming we also have to delete all the old reflog entries.
1466 */
1467 ret = reftable_merged_table_seek_log(mt, &it, arg->oldname);
1468 if (ret < 0)
1469 goto done;
1470
1471 while (1) {
1472 ret = reftable_iterator_next_log(&it, &old_log);
1473 if (ret < 0)
1474 goto done;
1475 if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
1476 ret = 0;
1477 break;
1478 }
1479
1480 free(old_log.refname);
1481
1482 /*
1483 * Copy over the old reflog entry with the new refname.
1484 */
1485 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1486 logs[logs_nr] = old_log;
1487 logs[logs_nr].refname = (char *)arg->newname;
1488 logs_nr++;
1489
1490 /*
1491 * Delete the old reflog entry in case we are renaming.
1492 */
1493 if (arg->delete_old) {
1494 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1495 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1496 logs[logs_nr].refname = (char *)arg->oldname;
1497 logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
1498 logs[logs_nr].update_index = old_log.update_index;
1499 logs_nr++;
1500 }
1501
1502 /*
1503 * Transfer ownership of the log record we're iterating over to
1504 * the array of log records. Otherwise, the pointers would get
1505 * free'd or reallocated by the iterator.
1506 */
1507 memset(&old_log, 0, sizeof(old_log));
1508 }
1509
1510 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1511 if (ret < 0)
1512 goto done;
1513
1514 done:
1515 assert(ret != REFTABLE_API_ERROR);
1516 reftable_iterator_destroy(&it);
1517 string_list_clear(&skip, 0);
1518 strbuf_release(&errbuf);
1519 for (i = 0; i < logs_nr; i++) {
1520 if (!strcmp(logs[i].refname, "HEAD"))
1521 continue;
1522 if (logs[i].value.update.old_hash == old_ref.value.val1)
1523 logs[i].value.update.old_hash = NULL;
1524 if (logs[i].value.update.new_hash == old_ref.value.val1)
1525 logs[i].value.update.new_hash = NULL;
1526 logs[i].refname = NULL;
1527 reftable_log_record_release(&logs[i]);
1528 }
1529 free(logs);
1530 reftable_ref_record_release(&old_ref);
1531 reftable_log_record_release(&old_log);
1532 return ret;
1533 }
1534
1535 static int reftable_be_rename_ref(struct ref_store *ref_store,
1536 const char *oldrefname,
1537 const char *newrefname,
1538 const char *logmsg)
1539 {
1540 struct reftable_ref_store *refs =
1541 reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1542 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1543 struct write_copy_arg arg = {
1544 .refs = refs,
1545 .stack = stack,
1546 .oldname = oldrefname,
1547 .newname = newrefname,
1548 .logmsg = logmsg,
1549 .delete_old = 1,
1550 };
1551 int ret;
1552
1553 ret = refs->err;
1554 if (ret < 0)
1555 goto done;
1556
1557 ret = reftable_stack_reload(stack);
1558 if (ret)
1559 goto done;
1560 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1561
1562 done:
1563 assert(ret != REFTABLE_API_ERROR);
1564 return ret;
1565 }
1566
1567 static int reftable_be_copy_ref(struct ref_store *ref_store,
1568 const char *oldrefname,
1569 const char *newrefname,
1570 const char *logmsg)
1571 {
1572 struct reftable_ref_store *refs =
1573 reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
1574 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1575 struct write_copy_arg arg = {
1576 .refs = refs,
1577 .stack = stack,
1578 .oldname = oldrefname,
1579 .newname = newrefname,
1580 .logmsg = logmsg,
1581 };
1582 int ret;
1583
1584 ret = refs->err;
1585 if (ret < 0)
1586 goto done;
1587
1588 ret = reftable_stack_reload(stack);
1589 if (ret)
1590 goto done;
1591 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1592
1593 done:
1594 assert(ret != REFTABLE_API_ERROR);
1595 return ret;
1596 }
1597
1598 struct reftable_reflog_iterator {
1599 struct ref_iterator base;
1600 struct reftable_ref_store *refs;
1601 struct reftable_iterator iter;
1602 struct reftable_log_record log;
1603 char *last_name;
1604 int err;
1605 };
1606
1607 static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
1608 {
1609 struct reftable_reflog_iterator *iter =
1610 (struct reftable_reflog_iterator *)ref_iterator;
1611
1612 while (!iter->err) {
1613 iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
1614 if (iter->err)
1615 break;
1616
1617 /*
1618 * We want the refnames that we have reflogs for, so we skip if
1619 * we've already produced this name. This could be faster by
1620 * seeking directly to reflog@update_index==0.
1621 */
1622 if (iter->last_name && !strcmp(iter->log.refname, iter->last_name))
1623 continue;
1624
1625 if (check_refname_format(iter->log.refname,
1626 REFNAME_ALLOW_ONELEVEL))
1627 continue;
1628
1629 free(iter->last_name);
1630 iter->last_name = xstrdup(iter->log.refname);
1631 iter->base.refname = iter->log.refname;
1632
1633 break;
1634 }
1635
1636 if (iter->err > 0) {
1637 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
1638 return ITER_ERROR;
1639 return ITER_DONE;
1640 }
1641
1642 if (iter->err < 0) {
1643 ref_iterator_abort(ref_iterator);
1644 return ITER_ERROR;
1645 }
1646
1647 return ITER_OK;
1648 }
1649
1650 static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator,
1651 struct object_id *peeled)
1652 {
1653 BUG("reftable reflog iterator cannot be peeled");
1654 return -1;
1655 }
1656
1657 static int reftable_reflog_iterator_abort(struct ref_iterator *ref_iterator)
1658 {
1659 struct reftable_reflog_iterator *iter =
1660 (struct reftable_reflog_iterator *)ref_iterator;
1661 reftable_log_record_release(&iter->log);
1662 reftable_iterator_destroy(&iter->iter);
1663 free(iter->last_name);
1664 free(iter);
1665 return ITER_DONE;
1666 }
1667
1668 static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
1669 .advance = reftable_reflog_iterator_advance,
1670 .peel = reftable_reflog_iterator_peel,
1671 .abort = reftable_reflog_iterator_abort
1672 };
1673
1674 static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
1675 struct reftable_stack *stack)
1676 {
1677 struct reftable_merged_table *merged_table;
1678 struct reftable_reflog_iterator *iter;
1679 int ret;
1680
1681 iter = xcalloc(1, sizeof(*iter));
1682 base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable);
1683 iter->refs = refs;
1684
1685 ret = refs->err;
1686 if (ret)
1687 goto done;
1688
1689 ret = reftable_stack_reload(refs->main_stack);
1690 if (ret < 0)
1691 goto done;
1692
1693 merged_table = reftable_stack_merged_table(stack);
1694
1695 ret = reftable_merged_table_seek_log(merged_table, &iter->iter, "");
1696 if (ret < 0)
1697 goto done;
1698
1699 done:
1700 iter->err = ret;
1701 return iter;
1702 }
1703
1704 static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
1705 {
1706 struct reftable_ref_store *refs =
1707 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
1708 struct reftable_reflog_iterator *main_iter, *worktree_iter;
1709
1710 main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
1711 if (!refs->worktree_stack)
1712 return &main_iter->base;
1713
1714 worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
1715
1716 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
1717 ref_iterator_select, NULL);
1718 }
1719
1720 static int yield_log_record(struct reftable_log_record *log,
1721 each_reflog_ent_fn fn,
1722 void *cb_data)
1723 {
1724 struct object_id old_oid, new_oid;
1725 const char *full_committer;
1726
1727 oidread(&old_oid, log->value.update.old_hash);
1728 oidread(&new_oid, log->value.update.new_hash);
1729
1730 /*
1731 * When both the old object ID and the new object ID are null
1732 * then this is the reflog existence marker. The caller must
1733 * not be aware of it.
1734 */
1735 if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
1736 return 0;
1737
1738 full_committer = fmt_ident(log->value.update.name, log->value.update.email,
1739 WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
1740 return fn(&old_oid, &new_oid, full_committer,
1741 log->value.update.time, log->value.update.tz_offset,
1742 log->value.update.message, cb_data);
1743 }
1744
1745 static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1746 const char *refname,
1747 each_reflog_ent_fn fn,
1748 void *cb_data)
1749 {
1750 struct reftable_ref_store *refs =
1751 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
1752 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1753 struct reftable_merged_table *mt = NULL;
1754 struct reftable_log_record log = {0};
1755 struct reftable_iterator it = {0};
1756 int ret;
1757
1758 if (refs->err < 0)
1759 return refs->err;
1760
1761 mt = reftable_stack_merged_table(stack);
1762 ret = reftable_merged_table_seek_log(mt, &it, refname);
1763 while (!ret) {
1764 ret = reftable_iterator_next_log(&it, &log);
1765 if (ret < 0)
1766 break;
1767 if (ret > 0 || strcmp(log.refname, refname)) {
1768 ret = 0;
1769 break;
1770 }
1771
1772 ret = yield_log_record(&log, fn, cb_data);
1773 if (ret)
1774 break;
1775 }
1776
1777 reftable_log_record_release(&log);
1778 reftable_iterator_destroy(&it);
1779 return ret;
1780 }
1781
1782 static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
1783 const char *refname,
1784 each_reflog_ent_fn fn,
1785 void *cb_data)
1786 {
1787 struct reftable_ref_store *refs =
1788 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
1789 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1790 struct reftable_merged_table *mt = NULL;
1791 struct reftable_log_record *logs = NULL;
1792 struct reftable_iterator it = {0};
1793 size_t logs_alloc = 0, logs_nr = 0, i;
1794 int ret;
1795
1796 if (refs->err < 0)
1797 return refs->err;
1798
1799 mt = reftable_stack_merged_table(stack);
1800 ret = reftable_merged_table_seek_log(mt, &it, refname);
1801 while (!ret) {
1802 struct reftable_log_record log = {0};
1803
1804 ret = reftable_iterator_next_log(&it, &log);
1805 if (ret < 0)
1806 goto done;
1807 if (ret > 0 || strcmp(log.refname, refname)) {
1808 reftable_log_record_release(&log);
1809 ret = 0;
1810 break;
1811 }
1812
1813 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1814 logs[logs_nr++] = log;
1815 }
1816
1817 for (i = logs_nr; i--;) {
1818 ret = yield_log_record(&logs[i], fn, cb_data);
1819 if (ret)
1820 goto done;
1821 }
1822
1823 done:
1824 reftable_iterator_destroy(&it);
1825 for (i = 0; i < logs_nr; i++)
1826 reftable_log_record_release(&logs[i]);
1827 free(logs);
1828 return ret;
1829 }
1830
1831 static int reftable_be_reflog_exists(struct ref_store *ref_store,
1832 const char *refname)
1833 {
1834 struct reftable_ref_store *refs =
1835 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1836 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1837 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
1838 struct reftable_log_record log = {0};
1839 struct reftable_iterator it = {0};
1840 int ret;
1841
1842 ret = refs->err;
1843 if (ret < 0)
1844 goto done;
1845
1846 ret = reftable_stack_reload(stack);
1847 if (ret < 0)
1848 goto done;
1849
1850 ret = reftable_merged_table_seek_log(mt, &it, refname);
1851 if (ret < 0)
1852 goto done;
1853
1854 /*
1855 * Check whether we get at least one log record for the given ref name.
1856 * If so, the reflog exists, otherwise it doesn't.
1857 */
1858 ret = reftable_iterator_next_log(&it, &log);
1859 if (ret < 0)
1860 goto done;
1861 if (ret > 0) {
1862 ret = 0;
1863 goto done;
1864 }
1865
1866 ret = strcmp(log.refname, refname) == 0;
1867
1868 done:
1869 reftable_iterator_destroy(&it);
1870 reftable_log_record_release(&log);
1871 if (ret < 0)
1872 ret = 0;
1873 return ret;
1874 }
1875
1876 struct write_reflog_existence_arg {
1877 struct reftable_ref_store *refs;
1878 const char *refname;
1879 struct reftable_stack *stack;
1880 };
1881
1882 static int write_reflog_existence_table(struct reftable_writer *writer,
1883 void *cb_data)
1884 {
1885 struct write_reflog_existence_arg *arg = cb_data;
1886 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1887 struct reftable_log_record log = {0};
1888 int ret;
1889
1890 ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
1891 if (ret <= 0)
1892 goto done;
1893
1894 reftable_writer_set_limits(writer, ts, ts);
1895
1896 /*
1897 * The existence entry has both old and new object ID set to the the
1898 * null object ID. Our iterators are aware of this and will not present
1899 * them to their callers.
1900 */
1901 log.refname = xstrdup(arg->refname);
1902 log.update_index = ts;
1903 log.value_type = REFTABLE_LOG_UPDATE;
1904 ret = reftable_writer_add_log(writer, &log);
1905
1906 done:
1907 assert(ret != REFTABLE_API_ERROR);
1908 reftable_log_record_release(&log);
1909 return ret;
1910 }
1911
1912 static int reftable_be_create_reflog(struct ref_store *ref_store,
1913 const char *refname,
1914 struct strbuf *errmsg)
1915 {
1916 struct reftable_ref_store *refs =
1917 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1918 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1919 struct write_reflog_existence_arg arg = {
1920 .refs = refs,
1921 .stack = stack,
1922 .refname = refname,
1923 };
1924 int ret;
1925
1926 ret = refs->err;
1927 if (ret < 0)
1928 goto done;
1929
1930 ret = reftable_stack_reload(stack);
1931 if (ret)
1932 goto done;
1933
1934 ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
1935
1936 done:
1937 return ret;
1938 }
1939
1940 struct write_reflog_delete_arg {
1941 struct reftable_stack *stack;
1942 const char *refname;
1943 };
1944
1945 static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
1946 {
1947 struct write_reflog_delete_arg *arg = cb_data;
1948 struct reftable_merged_table *mt =
1949 reftable_stack_merged_table(arg->stack);
1950 struct reftable_log_record log = {0}, tombstone = {0};
1951 struct reftable_iterator it = {0};
1952 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1953 int ret;
1954
1955 reftable_writer_set_limits(writer, ts, ts);
1956
1957 /*
1958 * In order to delete a table we need to delete all reflog entries one
1959 * by one. This is inefficient, but the reftable format does not have a
1960 * better marker right now.
1961 */
1962 ret = reftable_merged_table_seek_log(mt, &it, arg->refname);
1963 while (ret == 0) {
1964 ret = reftable_iterator_next_log(&it, &log);
1965 if (ret < 0)
1966 break;
1967 if (ret > 0 || strcmp(log.refname, arg->refname)) {
1968 ret = 0;
1969 break;
1970 }
1971
1972 tombstone.refname = (char *)arg->refname;
1973 tombstone.value_type = REFTABLE_LOG_DELETION;
1974 tombstone.update_index = log.update_index;
1975
1976 ret = reftable_writer_add_log(writer, &tombstone);
1977 }
1978
1979 reftable_log_record_release(&log);
1980 reftable_iterator_destroy(&it);
1981 return ret;
1982 }
1983
1984 static int reftable_be_delete_reflog(struct ref_store *ref_store,
1985 const char *refname)
1986 {
1987 struct reftable_ref_store *refs =
1988 reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
1989 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1990 struct write_reflog_delete_arg arg = {
1991 .stack = stack,
1992 .refname = refname,
1993 };
1994 int ret;
1995
1996 ret = reftable_stack_reload(stack);
1997 if (ret)
1998 return ret;
1999 ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
2000
2001 assert(ret != REFTABLE_API_ERROR);
2002 return ret;
2003 }
2004
2005 struct reflog_expiry_arg {
2006 struct reftable_stack *stack;
2007 struct reftable_log_record *records;
2008 struct object_id update_oid;
2009 const char *refname;
2010 size_t len;
2011 };
2012
2013 static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
2014 {
2015 struct reflog_expiry_arg *arg = cb_data;
2016 uint64_t ts = reftable_stack_next_update_index(arg->stack);
2017 uint64_t live_records = 0;
2018 size_t i;
2019 int ret;
2020
2021 for (i = 0; i < arg->len; i++)
2022 if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
2023 live_records++;
2024
2025 reftable_writer_set_limits(writer, ts, ts);
2026
2027 if (!is_null_oid(&arg->update_oid)) {
2028 struct reftable_ref_record ref = {0};
2029 struct object_id peeled;
2030
2031 ref.refname = (char *)arg->refname;
2032 ref.update_index = ts;
2033
2034 if (!peel_object(&arg->update_oid, &peeled)) {
2035 ref.value_type = REFTABLE_REF_VAL2;
2036 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
2037 memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
2038 } else {
2039 ref.value_type = REFTABLE_REF_VAL1;
2040 memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
2041 }
2042
2043 ret = reftable_writer_add_ref(writer, &ref);
2044 if (ret < 0)
2045 return ret;
2046 }
2047
2048 /*
2049 * When there are no more entries left in the reflog we empty it
2050 * completely, but write a placeholder reflog entry that indicates that
2051 * the reflog still exists.
2052 */
2053 if (!live_records) {
2054 struct reftable_log_record log = {
2055 .refname = (char *)arg->refname,
2056 .value_type = REFTABLE_LOG_UPDATE,
2057 .update_index = ts,
2058 };
2059
2060 ret = reftable_writer_add_log(writer, &log);
2061 if (ret)
2062 return ret;
2063 }
2064
2065 for (i = 0; i < arg->len; i++) {
2066 ret = reftable_writer_add_log(writer, &arg->records[i]);
2067 if (ret)
2068 return ret;
2069 }
2070
2071 return 0;
2072 }
2073
2074 static int reftable_be_reflog_expire(struct ref_store *ref_store,
2075 const char *refname,
2076 unsigned int flags,
2077 reflog_expiry_prepare_fn prepare_fn,
2078 reflog_expiry_should_prune_fn should_prune_fn,
2079 reflog_expiry_cleanup_fn cleanup_fn,
2080 void *policy_cb_data)
2081 {
2082 /*
2083 * For log expiry, we write tombstones for every single reflog entry
2084 * that is to be expired. This means that the entries are still
2085 * retrievable by delving into the stack, and expiring entries
2086 * paradoxically takes extra memory. This memory is only reclaimed when
2087 * compacting the reftable stack.
2088 *
2089 * It would be better if the refs backend supported an API that sets a
2090 * criterion for all refs, passing the criterion to pack_refs().
2091 *
2092 * On the plus side, because we do the expiration per ref, we can easily
2093 * insert the reflog existence dummies.
2094 */
2095 struct reftable_ref_store *refs =
2096 reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2097 struct reftable_stack *stack = stack_for(refs, refname, &refname);
2098 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
2099 struct reftable_log_record *logs = NULL;
2100 struct reftable_log_record *rewritten = NULL;
2101 struct reftable_ref_record ref_record = {0};
2102 struct reftable_iterator it = {0};
2103 struct reftable_addition *add = NULL;
2104 struct reflog_expiry_arg arg = {0};
2105 struct object_id oid = {0};
2106 uint8_t *last_hash = NULL;
2107 size_t logs_nr = 0, logs_alloc = 0, i;
2108 int ret;
2109
2110 if (refs->err < 0)
2111 return refs->err;
2112
2113 ret = reftable_stack_reload(stack);
2114 if (ret < 0)
2115 goto done;
2116
2117 ret = reftable_merged_table_seek_log(mt, &it, refname);
2118 if (ret < 0)
2119 goto done;
2120
2121 ret = reftable_stack_new_addition(&add, stack);
2122 if (ret < 0)
2123 goto done;
2124
2125 ret = reftable_stack_read_ref(stack, refname, &ref_record);
2126 if (ret < 0)
2127 goto done;
2128 if (reftable_ref_record_val1(&ref_record))
2129 oidread(&oid, reftable_ref_record_val1(&ref_record));
2130 prepare_fn(refname, &oid, policy_cb_data);
2131
2132 while (1) {
2133 struct reftable_log_record log = {0};
2134 struct object_id old_oid, new_oid;
2135
2136 ret = reftable_iterator_next_log(&it, &log);
2137 if (ret < 0)
2138 goto done;
2139 if (ret > 0 || strcmp(log.refname, refname)) {
2140 reftable_log_record_release(&log);
2141 break;
2142 }
2143
2144 oidread(&old_oid, log.value.update.old_hash);
2145 oidread(&new_oid, log.value.update.new_hash);
2146
2147 /*
2148 * Skip over the reflog existence marker. We will add it back
2149 * in when there are no live reflog records.
2150 */
2151 if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
2152 reftable_log_record_release(&log);
2153 continue;
2154 }
2155
2156 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2157 logs[logs_nr++] = log;
2158 }
2159
2160 /*
2161 * We need to rewrite all reflog entries according to the pruning
2162 * callback function:
2163 *
2164 * - If a reflog entry shall be pruned we mark the record for
2165 * deletion.
2166 *
2167 * - Otherwise we may have to rewrite the chain of reflog entries so
2168 * that gaps created by just-deleted records get backfilled.
2169 */
2170 CALLOC_ARRAY(rewritten, logs_nr);
2171 for (i = logs_nr; i--;) {
2172 struct reftable_log_record *dest = &rewritten[i];
2173 struct object_id old_oid, new_oid;
2174
2175 *dest = logs[i];
2176 oidread(&old_oid, logs[i].value.update.old_hash);
2177 oidread(&new_oid, logs[i].value.update.new_hash);
2178
2179 if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
2180 (timestamp_t)logs[i].value.update.time,
2181 logs[i].value.update.tz_offset,
2182 logs[i].value.update.message,
2183 policy_cb_data)) {
2184 dest->value_type = REFTABLE_LOG_DELETION;
2185 } else {
2186 if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
2187 dest->value.update.old_hash = last_hash;
2188 last_hash = logs[i].value.update.new_hash;
2189 }
2190 }
2191
2192 if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
2193 reftable_ref_record_val1(&ref_record))
2194 oidread(&arg.update_oid, last_hash);
2195
2196 arg.records = rewritten;
2197 arg.len = logs_nr;
2198 arg.stack = stack,
2199 arg.refname = refname,
2200
2201 ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
2202 if (ret < 0)
2203 goto done;
2204
2205 /*
2206 * Future improvement: we could skip writing records that were
2207 * not changed.
2208 */
2209 if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
2210 ret = reftable_addition_commit(add);
2211
2212 done:
2213 if (add)
2214 cleanup_fn(policy_cb_data);
2215 assert(ret != REFTABLE_API_ERROR);
2216
2217 reftable_ref_record_release(&ref_record);
2218 reftable_iterator_destroy(&it);
2219 reftable_addition_destroy(add);
2220 for (i = 0; i < logs_nr; i++)
2221 reftable_log_record_release(&logs[i]);
2222 free(logs);
2223 free(rewritten);
2224 return ret;
2225 }
2226
2227 struct ref_storage_be refs_be_reftable = {
2228 .name = "reftable",
2229 .init = reftable_be_init,
2230 .init_db = reftable_be_init_db,
2231 .transaction_prepare = reftable_be_transaction_prepare,
2232 .transaction_finish = reftable_be_transaction_finish,
2233 .transaction_abort = reftable_be_transaction_abort,
2234 .initial_transaction_commit = reftable_be_initial_transaction_commit,
2235
2236 .pack_refs = reftable_be_pack_refs,
2237 .create_symref = reftable_be_create_symref,
2238 .rename_ref = reftable_be_rename_ref,
2239 .copy_ref = reftable_be_copy_ref,
2240
2241 .iterator_begin = reftable_be_iterator_begin,
2242 .read_raw_ref = reftable_be_read_raw_ref,
2243 .read_symbolic_ref = reftable_be_read_symbolic_ref,
2244
2245 .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
2246 .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
2247 .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
2248 .reflog_exists = reftable_be_reflog_exists,
2249 .create_reflog = reftable_be_create_reflog,
2250 .delete_reflog = reftable_be_delete_reflog,
2251 .reflog_expire = reftable_be_reflog_expire,
2252 };