]> git.ipfire.org Git - thirdparty/git.git/blob - refs/reftable-backend.c
Merge branch 'pb/ort-make-submodule-conflict-message-an-advice'
[thirdparty/git.git] / refs / reftable-backend.c
1 #include "../git-compat-util.h"
2 #include "../abspath.h"
3 #include "../chdir-notify.h"
4 #include "../environment.h"
5 #include "../gettext.h"
6 #include "../hash.h"
7 #include "../hex.h"
8 #include "../iterator.h"
9 #include "../ident.h"
10 #include "../lockfile.h"
11 #include "../object.h"
12 #include "../path.h"
13 #include "../refs.h"
14 #include "../reftable/reftable-stack.h"
15 #include "../reftable/reftable-record.h"
16 #include "../reftable/reftable-error.h"
17 #include "../reftable/reftable-iterator.h"
18 #include "../reftable/reftable-merged.h"
19 #include "../setup.h"
20 #include "../strmap.h"
21 #include "refs-internal.h"
22
23 /*
24 * Used as a flag in ref_update::flags when the ref_update was via an
25 * update to HEAD.
26 */
27 #define REF_UPDATE_VIA_HEAD (1 << 8)
28
29 struct reftable_ref_store {
30 struct ref_store base;
31
32 /*
33 * The main stack refers to the common dir and thus contains common
34 * refs as well as refs of the main repository.
35 */
36 struct reftable_stack *main_stack;
37 /*
38 * The worktree stack refers to the gitdir in case the refdb is opened
39 * via a worktree. It thus contains the per-worktree refs.
40 */
41 struct reftable_stack *worktree_stack;
42 /*
43 * Map of worktree stacks by their respective worktree names. The map
44 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
45 */
46 struct strmap worktree_stacks;
47 struct reftable_write_options write_options;
48
49 unsigned int store_flags;
50 int err;
51 };
52
53 /*
54 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
55 * reftable_ref_store. required_flags is compared with ref_store's store_flags
56 * to ensure the ref_store has all required capabilities. "caller" is used in
57 * any necessary error messages.
58 */
59 static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
60 unsigned int required_flags,
61 const char *caller)
62 {
63 struct reftable_ref_store *refs;
64
65 if (ref_store->be != &refs_be_reftable)
66 BUG("ref_store is type \"%s\" not \"reftables\" in %s",
67 ref_store->be->name, caller);
68
69 refs = (struct reftable_ref_store *)ref_store;
70
71 if ((refs->store_flags & required_flags) != required_flags)
72 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
73 caller, required_flags, refs->store_flags);
74
75 return refs;
76 }
77
78 /*
79 * Some refs are global to the repository (refs/heads/{*}), while others are
80 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
81 * multiple separate databases (ie. multiple reftable/ directories), one for
82 * the shared refs, one for the current worktree refs, and one for each
83 * additional worktree. For reading, we merge the view of both the shared and
84 * the current worktree's refs, when necessary.
85 *
86 * This function also optionally assigns the rewritten reference name that is
87 * local to the stack. This translation is required when using worktree refs
88 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
89 * those references in their normalized form.
90 */
91 static struct reftable_stack *stack_for(struct reftable_ref_store *store,
92 const char *refname,
93 const char **rewritten_ref)
94 {
95 const char *wtname;
96 int wtname_len;
97
98 if (!refname)
99 return store->main_stack;
100
101 switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
102 case REF_WORKTREE_OTHER: {
103 static struct strbuf wtname_buf = STRBUF_INIT;
104 struct strbuf wt_dir = STRBUF_INIT;
105 struct reftable_stack *stack;
106
107 /*
108 * We're using a static buffer here so that we don't need to
109 * allocate the worktree name whenever we look up a reference.
110 * This could be avoided if the strmap interface knew how to
111 * handle keys with a length.
112 */
113 strbuf_reset(&wtname_buf);
114 strbuf_add(&wtname_buf, wtname, wtname_len);
115
116 /*
117 * There is an edge case here: when the worktree references the
118 * current worktree, then we set up the stack once via
119 * `worktree_stacks` and once via `worktree_stack`. This is
120 * wasteful, but in the reading case it shouldn't matter. And
121 * in the writing case we would notice that the stack is locked
122 * already and error out when trying to write a reference via
123 * both stacks.
124 */
125 stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
126 if (!stack) {
127 strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
128 store->base.repo->commondir, wtname_buf.buf);
129
130 store->err = reftable_new_stack(&stack, wt_dir.buf,
131 store->write_options);
132 assert(store->err != REFTABLE_API_ERROR);
133 strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
134 }
135
136 strbuf_release(&wt_dir);
137 return stack;
138 }
139 case REF_WORKTREE_CURRENT:
140 /*
141 * If there is no worktree stack then we're currently in the
142 * main worktree. We thus return the main stack in that case.
143 */
144 if (!store->worktree_stack)
145 return store->main_stack;
146 return store->worktree_stack;
147 case REF_WORKTREE_MAIN:
148 case REF_WORKTREE_SHARED:
149 return store->main_stack;
150 default:
151 BUG("unhandled worktree reference type");
152 }
153 }
154
155 static int should_write_log(struct ref_store *refs, const char *refname)
156 {
157 if (log_all_ref_updates == LOG_REFS_UNSET)
158 log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
159
160 switch (log_all_ref_updates) {
161 case LOG_REFS_NONE:
162 return refs_reflog_exists(refs, refname);
163 case LOG_REFS_ALWAYS:
164 return 1;
165 case LOG_REFS_NORMAL:
166 if (should_autocreate_reflog(refname))
167 return 1;
168 return refs_reflog_exists(refs, refname);
169 default:
170 BUG("unhandled core.logAllRefUpdates value %d", log_all_ref_updates);
171 }
172 }
173
174 static void clear_reftable_log_record(struct reftable_log_record *log)
175 {
176 switch (log->value_type) {
177 case REFTABLE_LOG_UPDATE:
178 /*
179 * When we write log records, the hashes are owned by the
180 * caller and thus shouldn't be free'd.
181 */
182 log->value.update.old_hash = NULL;
183 log->value.update.new_hash = NULL;
184 break;
185 case REFTABLE_LOG_DELETION:
186 break;
187 }
188 reftable_log_record_release(log);
189 }
190
191 static void fill_reftable_log_record(struct reftable_log_record *log)
192 {
193 const char *info = git_committer_info(0);
194 struct ident_split split = {0};
195 int sign = 1;
196
197 if (split_ident_line(&split, info, strlen(info)))
198 BUG("failed splitting committer info");
199
200 reftable_log_record_release(log);
201 log->value_type = REFTABLE_LOG_UPDATE;
202 log->value.update.name =
203 xstrndup(split.name_begin, split.name_end - split.name_begin);
204 log->value.update.email =
205 xstrndup(split.mail_begin, split.mail_end - split.mail_begin);
206 log->value.update.time = atol(split.date_begin);
207 if (*split.tz_begin == '-') {
208 sign = -1;
209 split.tz_begin++;
210 }
211 if (*split.tz_begin == '+') {
212 sign = 1;
213 split.tz_begin++;
214 }
215
216 log->value.update.tz_offset = sign * atoi(split.tz_begin);
217 }
218
219 static int read_ref_without_reload(struct reftable_stack *stack,
220 const char *refname,
221 struct object_id *oid,
222 struct strbuf *referent,
223 unsigned int *type)
224 {
225 struct reftable_ref_record ref = {0};
226 int ret;
227
228 ret = reftable_stack_read_ref(stack, refname, &ref);
229 if (ret)
230 goto done;
231
232 if (ref.value_type == REFTABLE_REF_SYMREF) {
233 strbuf_reset(referent);
234 strbuf_addstr(referent, ref.value.symref);
235 *type |= REF_ISSYMREF;
236 } else if (reftable_ref_record_val1(&ref)) {
237 oidread(oid, reftable_ref_record_val1(&ref));
238 } else {
239 /* We got a tombstone, which should not happen. */
240 BUG("unhandled reference value type %d", ref.value_type);
241 }
242
243 done:
244 assert(ret != REFTABLE_API_ERROR);
245 reftable_ref_record_release(&ref);
246 return ret;
247 }
248
249 static struct ref_store *reftable_be_init(struct repository *repo,
250 const char *gitdir,
251 unsigned int store_flags)
252 {
253 struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
254 struct strbuf path = STRBUF_INIT;
255 int is_worktree;
256 mode_t mask;
257
258 mask = umask(0);
259 umask(mask);
260
261 base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
262 strmap_init(&refs->worktree_stacks);
263 refs->store_flags = store_flags;
264 refs->write_options.block_size = 4096;
265 refs->write_options.hash_id = repo->hash_algo->format_id;
266 refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
267
268 /*
269 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
270 * This stack contains both the shared and the main worktree refs.
271 *
272 * Note that we don't try to resolve the path in case we have a
273 * worktree because `get_common_dir_noenv()` already does it for us.
274 */
275 is_worktree = get_common_dir_noenv(&path, gitdir);
276 if (!is_worktree) {
277 strbuf_reset(&path);
278 strbuf_realpath(&path, gitdir, 0);
279 }
280 strbuf_addstr(&path, "/reftable");
281 refs->err = reftable_new_stack(&refs->main_stack, path.buf,
282 refs->write_options);
283 if (refs->err)
284 goto done;
285
286 /*
287 * If we're in a worktree we also need to set up the worktree reftable
288 * stack that is contained in the per-worktree GIT_DIR.
289 *
290 * Ideally, we would also add the stack to our worktree stack map. But
291 * we have no way to figure out the worktree name here and thus can't
292 * do it efficiently.
293 */
294 if (is_worktree) {
295 strbuf_reset(&path);
296 strbuf_addf(&path, "%s/reftable", gitdir);
297
298 refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
299 refs->write_options);
300 if (refs->err)
301 goto done;
302 }
303
304 chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
305
306 done:
307 assert(refs->err != REFTABLE_API_ERROR);
308 strbuf_release(&path);
309 return &refs->base;
310 }
311
312 static int reftable_be_init_db(struct ref_store *ref_store,
313 int flags UNUSED,
314 struct strbuf *err UNUSED)
315 {
316 struct reftable_ref_store *refs =
317 reftable_be_downcast(ref_store, REF_STORE_WRITE, "init_db");
318 struct strbuf sb = STRBUF_INIT;
319
320 strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
321 safe_create_dir(sb.buf, 1);
322 strbuf_reset(&sb);
323
324 strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
325 write_file(sb.buf, "ref: refs/heads/.invalid");
326 adjust_shared_perm(sb.buf);
327 strbuf_reset(&sb);
328
329 strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
330 safe_create_dir(sb.buf, 1);
331 strbuf_reset(&sb);
332
333 strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
334 write_file(sb.buf, "this repository uses the reftable format");
335 adjust_shared_perm(sb.buf);
336
337 strbuf_release(&sb);
338 return 0;
339 }
340
341 struct reftable_ref_iterator {
342 struct ref_iterator base;
343 struct reftable_ref_store *refs;
344 struct reftable_iterator iter;
345 struct reftable_ref_record ref;
346 struct object_id oid;
347
348 const char *prefix;
349 unsigned int flags;
350 int err;
351 };
352
353 static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
354 {
355 struct reftable_ref_iterator *iter =
356 (struct reftable_ref_iterator *)ref_iterator;
357 struct reftable_ref_store *refs = iter->refs;
358
359 while (!iter->err) {
360 int flags = 0;
361
362 iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
363 if (iter->err)
364 break;
365
366 /*
367 * The files backend only lists references contained in
368 * "refs/". We emulate the same behaviour here and thus skip
369 * all references that don't start with this prefix.
370 */
371 if (!starts_with(iter->ref.refname, "refs/"))
372 continue;
373
374 if (iter->prefix &&
375 strncmp(iter->prefix, iter->ref.refname, strlen(iter->prefix))) {
376 iter->err = 1;
377 break;
378 }
379
380 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
381 parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
382 REF_WORKTREE_CURRENT)
383 continue;
384
385 switch (iter->ref.value_type) {
386 case REFTABLE_REF_VAL1:
387 oidread(&iter->oid, iter->ref.value.val1);
388 break;
389 case REFTABLE_REF_VAL2:
390 oidread(&iter->oid, iter->ref.value.val2.value);
391 break;
392 case REFTABLE_REF_SYMREF:
393 if (!refs_resolve_ref_unsafe(&iter->refs->base, iter->ref.refname,
394 RESOLVE_REF_READING, &iter->oid, &flags))
395 oidclr(&iter->oid);
396 break;
397 default:
398 BUG("unhandled reference value type %d", iter->ref.value_type);
399 }
400
401 if (is_null_oid(&iter->oid))
402 flags |= REF_ISBROKEN;
403
404 if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
405 if (!refname_is_safe(iter->ref.refname))
406 die(_("refname is dangerous: %s"), iter->ref.refname);
407 oidclr(&iter->oid);
408 flags |= REF_BAD_NAME | REF_ISBROKEN;
409 }
410
411 if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
412 flags & REF_ISSYMREF &&
413 flags & REF_ISBROKEN)
414 continue;
415
416 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
417 !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
418 &iter->oid, flags))
419 continue;
420
421 iter->base.refname = iter->ref.refname;
422 iter->base.oid = &iter->oid;
423 iter->base.flags = flags;
424
425 break;
426 }
427
428 if (iter->err > 0) {
429 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
430 return ITER_ERROR;
431 return ITER_DONE;
432 }
433
434 if (iter->err < 0) {
435 ref_iterator_abort(ref_iterator);
436 return ITER_ERROR;
437 }
438
439 return ITER_OK;
440 }
441
442 static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
443 struct object_id *peeled)
444 {
445 struct reftable_ref_iterator *iter =
446 (struct reftable_ref_iterator *)ref_iterator;
447
448 if (iter->ref.value_type == REFTABLE_REF_VAL2) {
449 oidread(peeled, iter->ref.value.val2.target_value);
450 return 0;
451 }
452
453 return -1;
454 }
455
456 static int reftable_ref_iterator_abort(struct ref_iterator *ref_iterator)
457 {
458 struct reftable_ref_iterator *iter =
459 (struct reftable_ref_iterator *)ref_iterator;
460 reftable_ref_record_release(&iter->ref);
461 reftable_iterator_destroy(&iter->iter);
462 free(iter);
463 return ITER_DONE;
464 }
465
466 static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
467 .advance = reftable_ref_iterator_advance,
468 .peel = reftable_ref_iterator_peel,
469 .abort = reftable_ref_iterator_abort
470 };
471
472 static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
473 struct reftable_stack *stack,
474 const char *prefix,
475 int flags)
476 {
477 struct reftable_merged_table *merged_table;
478 struct reftable_ref_iterator *iter;
479 int ret;
480
481 iter = xcalloc(1, sizeof(*iter));
482 base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
483 iter->prefix = prefix;
484 iter->base.oid = &iter->oid;
485 iter->flags = flags;
486 iter->refs = refs;
487
488 ret = refs->err;
489 if (ret)
490 goto done;
491
492 ret = reftable_stack_reload(stack);
493 if (ret)
494 goto done;
495
496 merged_table = reftable_stack_merged_table(stack);
497
498 ret = reftable_merged_table_seek_ref(merged_table, &iter->iter, prefix);
499 if (ret)
500 goto done;
501
502 done:
503 iter->err = ret;
504 return iter;
505 }
506
507 static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
508 const char *prefix,
509 const char **exclude_patterns,
510 unsigned int flags)
511 {
512 struct reftable_ref_iterator *main_iter, *worktree_iter;
513 struct reftable_ref_store *refs;
514 unsigned int required_flags = REF_STORE_READ;
515
516 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
517 required_flags |= REF_STORE_ODB;
518 refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
519
520 main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix, flags);
521
522 /*
523 * The worktree stack is only set when we're in an actual worktree
524 * right now. If we aren't, then we return the common reftable
525 * iterator, only.
526 */
527 if (!refs->worktree_stack)
528 return &main_iter->base;
529
530 /*
531 * Otherwise we merge both the common and the per-worktree refs into a
532 * single iterator.
533 */
534 worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix, flags);
535 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
536 ref_iterator_select, NULL);
537 }
538
539 static int reftable_be_read_raw_ref(struct ref_store *ref_store,
540 const char *refname,
541 struct object_id *oid,
542 struct strbuf *referent,
543 unsigned int *type,
544 int *failure_errno)
545 {
546 struct reftable_ref_store *refs =
547 reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
548 struct reftable_stack *stack = stack_for(refs, refname, &refname);
549 int ret;
550
551 if (refs->err < 0)
552 return refs->err;
553
554 ret = reftable_stack_reload(stack);
555 if (ret)
556 return ret;
557
558 ret = read_ref_without_reload(stack, refname, oid, referent, type);
559 if (ret < 0)
560 return ret;
561 if (ret > 0) {
562 *failure_errno = ENOENT;
563 return -1;
564 }
565
566 return 0;
567 }
568
569 static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
570 const char *refname,
571 struct strbuf *referent)
572 {
573 struct reftable_ref_store *refs =
574 reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
575 struct reftable_stack *stack = stack_for(refs, refname, &refname);
576 struct reftable_ref_record ref = {0};
577 int ret;
578
579 ret = reftable_stack_reload(stack);
580 if (ret)
581 return ret;
582
583 ret = reftable_stack_read_ref(stack, refname, &ref);
584 if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
585 strbuf_addstr(referent, ref.value.symref);
586 else
587 ret = -1;
588
589 reftable_ref_record_release(&ref);
590 return ret;
591 }
592
593 /*
594 * Return the refname under which update was originally requested.
595 */
596 static const char *original_update_refname(struct ref_update *update)
597 {
598 while (update->parent_update)
599 update = update->parent_update;
600 return update->refname;
601 }
602
603 struct reftable_transaction_update {
604 struct ref_update *update;
605 struct object_id current_oid;
606 };
607
608 struct write_transaction_table_arg {
609 struct reftable_ref_store *refs;
610 struct reftable_stack *stack;
611 struct reftable_addition *addition;
612 struct reftable_transaction_update *updates;
613 size_t updates_nr;
614 size_t updates_alloc;
615 size_t updates_expected;
616 };
617
618 struct reftable_transaction_data {
619 struct write_transaction_table_arg *args;
620 size_t args_nr, args_alloc;
621 };
622
623 static void free_transaction_data(struct reftable_transaction_data *tx_data)
624 {
625 if (!tx_data)
626 return;
627 for (size_t i = 0; i < tx_data->args_nr; i++) {
628 reftable_addition_destroy(tx_data->args[i].addition);
629 free(tx_data->args[i].updates);
630 }
631 free(tx_data->args);
632 free(tx_data);
633 }
634
635 /*
636 * Prepare transaction update for the given reference update. This will cause
637 * us to lock the corresponding reftable stack for concurrent modification.
638 */
639 static int prepare_transaction_update(struct write_transaction_table_arg **out,
640 struct reftable_ref_store *refs,
641 struct reftable_transaction_data *tx_data,
642 struct ref_update *update,
643 struct strbuf *err)
644 {
645 struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
646 struct write_transaction_table_arg *arg = NULL;
647 size_t i;
648 int ret;
649
650 /*
651 * Search for a preexisting stack update. If there is one then we add
652 * the update to it, otherwise we set up a new stack update.
653 */
654 for (i = 0; !arg && i < tx_data->args_nr; i++)
655 if (tx_data->args[i].stack == stack)
656 arg = &tx_data->args[i];
657
658 if (!arg) {
659 struct reftable_addition *addition;
660
661 ret = reftable_stack_reload(stack);
662 if (ret)
663 return ret;
664
665 ret = reftable_stack_new_addition(&addition, stack);
666 if (ret) {
667 if (ret == REFTABLE_LOCK_ERROR)
668 strbuf_addstr(err, "cannot lock references");
669 return ret;
670 }
671
672 ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
673 tx_data->args_alloc);
674 arg = &tx_data->args[tx_data->args_nr++];
675 arg->refs = refs;
676 arg->stack = stack;
677 arg->addition = addition;
678 arg->updates = NULL;
679 arg->updates_nr = 0;
680 arg->updates_alloc = 0;
681 arg->updates_expected = 0;
682 }
683
684 arg->updates_expected++;
685
686 if (out)
687 *out = arg;
688
689 return 0;
690 }
691
692 /*
693 * Queue a reference update for the correct stack. We potentially need to
694 * handle multiple stack updates in a single transaction when it spans across
695 * multiple worktrees.
696 */
697 static int queue_transaction_update(struct reftable_ref_store *refs,
698 struct reftable_transaction_data *tx_data,
699 struct ref_update *update,
700 struct object_id *current_oid,
701 struct strbuf *err)
702 {
703 struct write_transaction_table_arg *arg = NULL;
704 int ret;
705
706 if (update->backend_data)
707 BUG("reference update queued more than once");
708
709 ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
710 if (ret < 0)
711 return ret;
712
713 ALLOC_GROW(arg->updates, arg->updates_nr + 1,
714 arg->updates_alloc);
715 arg->updates[arg->updates_nr].update = update;
716 oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
717 update->backend_data = &arg->updates[arg->updates_nr++];
718
719 return 0;
720 }
721
722 static int reftable_be_transaction_prepare(struct ref_store *ref_store,
723 struct ref_transaction *transaction,
724 struct strbuf *err)
725 {
726 struct reftable_ref_store *refs =
727 reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
728 struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
729 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
730 struct reftable_transaction_data *tx_data = NULL;
731 struct object_id head_oid;
732 unsigned int head_type = 0;
733 size_t i;
734 int ret;
735
736 ret = refs->err;
737 if (ret < 0)
738 goto done;
739
740 tx_data = xcalloc(1, sizeof(*tx_data));
741
742 /*
743 * Preprocess all updates. For one we check that there are no duplicate
744 * reference updates in this transaction. Second, we lock all stacks
745 * that will be modified during the transaction.
746 */
747 for (i = 0; i < transaction->nr; i++) {
748 ret = prepare_transaction_update(NULL, refs, tx_data,
749 transaction->updates[i], err);
750 if (ret)
751 goto done;
752
753 string_list_append(&affected_refnames,
754 transaction->updates[i]->refname);
755 }
756
757 /*
758 * Now that we have counted updates per stack we can preallocate their
759 * arrays. This avoids having to reallocate many times.
760 */
761 for (i = 0; i < tx_data->args_nr; i++) {
762 CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
763 tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
764 }
765
766 /*
767 * Fail if a refname appears more than once in the transaction.
768 * This code is taken from the files backend and is a good candidate to
769 * be moved into the generic layer.
770 */
771 string_list_sort(&affected_refnames);
772 if (ref_update_reject_duplicates(&affected_refnames, err)) {
773 ret = TRANSACTION_GENERIC_ERROR;
774 goto done;
775 }
776
777 ret = read_ref_without_reload(stack_for(refs, "HEAD", NULL), "HEAD", &head_oid,
778 &head_referent, &head_type);
779 if (ret < 0)
780 goto done;
781
782 for (i = 0; i < transaction->nr; i++) {
783 struct ref_update *u = transaction->updates[i];
784 struct object_id current_oid = {0};
785 struct reftable_stack *stack;
786 const char *rewritten_ref;
787
788 stack = stack_for(refs, u->refname, &rewritten_ref);
789
790 /* Verify that the new object ID is valid. */
791 if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
792 !(u->flags & REF_SKIP_OID_VERIFICATION) &&
793 !(u->flags & REF_LOG_ONLY)) {
794 struct object *o = parse_object(refs->base.repo, &u->new_oid);
795 if (!o) {
796 strbuf_addf(err,
797 _("trying to write ref '%s' with nonexistent object %s"),
798 u->refname, oid_to_hex(&u->new_oid));
799 ret = -1;
800 goto done;
801 }
802
803 if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
804 strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
805 oid_to_hex(&u->new_oid), u->refname);
806 ret = -1;
807 goto done;
808 }
809 }
810
811 /*
812 * When we update the reference that HEAD points to we enqueue
813 * a second log-only update for HEAD so that its reflog is
814 * updated accordingly.
815 */
816 if (head_type == REF_ISSYMREF &&
817 !(u->flags & REF_LOG_ONLY) &&
818 !(u->flags & REF_UPDATE_VIA_HEAD) &&
819 !strcmp(rewritten_ref, head_referent.buf)) {
820 struct ref_update *new_update;
821
822 /*
823 * First make sure that HEAD is not already in the
824 * transaction. This check is O(lg N) in the transaction
825 * size, but it happens at most once per transaction.
826 */
827 if (string_list_has_string(&affected_refnames, "HEAD")) {
828 /* An entry already existed */
829 strbuf_addf(err,
830 _("multiple updates for 'HEAD' (including one "
831 "via its referent '%s') are not allowed"),
832 u->refname);
833 ret = TRANSACTION_NAME_CONFLICT;
834 goto done;
835 }
836
837 new_update = ref_transaction_add_update(
838 transaction, "HEAD",
839 u->flags | REF_LOG_ONLY | REF_NO_DEREF,
840 &u->new_oid, &u->old_oid, u->msg);
841 string_list_insert(&affected_refnames, new_update->refname);
842 }
843
844 ret = read_ref_without_reload(stack, rewritten_ref,
845 &current_oid, &referent, &u->type);
846 if (ret < 0)
847 goto done;
848 if (ret > 0 && (!(u->flags & REF_HAVE_OLD) || is_null_oid(&u->old_oid))) {
849 /*
850 * The reference does not exist, and we either have no
851 * old object ID or expect the reference to not exist.
852 * We can thus skip below safety checks as well as the
853 * symref splitting. But we do want to verify that
854 * there is no conflicting reference here so that we
855 * can output a proper error message instead of failing
856 * at a later point.
857 */
858 ret = refs_verify_refname_available(ref_store, u->refname,
859 &affected_refnames, NULL, err);
860 if (ret < 0)
861 goto done;
862
863 /*
864 * There is no need to write the reference deletion
865 * when the reference in question doesn't exist.
866 */
867 if (u->flags & REF_HAVE_NEW && !is_null_oid(&u->new_oid)) {
868 ret = queue_transaction_update(refs, tx_data, u,
869 &current_oid, err);
870 if (ret)
871 goto done;
872 }
873
874 continue;
875 }
876 if (ret > 0) {
877 /* The reference does not exist, but we expected it to. */
878 strbuf_addf(err, _("cannot lock ref '%s': "
879 "unable to resolve reference '%s'"),
880 original_update_refname(u), u->refname);
881 ret = -1;
882 goto done;
883 }
884
885 if (u->type & REF_ISSYMREF) {
886 /*
887 * The reftable stack is locked at this point already,
888 * so it is safe to call `refs_resolve_ref_unsafe()`
889 * here without causing races.
890 */
891 const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
892 &current_oid, NULL);
893
894 if (u->flags & REF_NO_DEREF) {
895 if (u->flags & REF_HAVE_OLD && !resolved) {
896 strbuf_addf(err, _("cannot lock ref '%s': "
897 "error reading reference"), u->refname);
898 ret = -1;
899 goto done;
900 }
901 } else {
902 struct ref_update *new_update;
903 int new_flags;
904
905 new_flags = u->flags;
906 if (!strcmp(rewritten_ref, "HEAD"))
907 new_flags |= REF_UPDATE_VIA_HEAD;
908
909 /*
910 * If we are updating a symref (eg. HEAD), we should also
911 * update the branch that the symref points to.
912 *
913 * This is generic functionality, and would be better
914 * done in refs.c, but the current implementation is
915 * intertwined with the locking in files-backend.c.
916 */
917 new_update = ref_transaction_add_update(
918 transaction, referent.buf, new_flags,
919 &u->new_oid, &u->old_oid, u->msg);
920 new_update->parent_update = u;
921
922 /*
923 * Change the symbolic ref update to log only. Also, it
924 * doesn't need to check its old OID value, as that will be
925 * done when new_update is processed.
926 */
927 u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
928 u->flags &= ~REF_HAVE_OLD;
929
930 if (string_list_has_string(&affected_refnames, new_update->refname)) {
931 strbuf_addf(err,
932 _("multiple updates for '%s' (including one "
933 "via symref '%s') are not allowed"),
934 referent.buf, u->refname);
935 ret = TRANSACTION_NAME_CONFLICT;
936 goto done;
937 }
938 string_list_insert(&affected_refnames, new_update->refname);
939 }
940 }
941
942 /*
943 * Verify that the old object matches our expectations. Note
944 * that the error messages here do not make a lot of sense in
945 * the context of the reftable backend as we never lock
946 * individual refs. But the error messages match what the files
947 * backend returns, which keeps our tests happy.
948 */
949 if (u->flags & REF_HAVE_OLD && !oideq(&current_oid, &u->old_oid)) {
950 if (is_null_oid(&u->old_oid))
951 strbuf_addf(err, _("cannot lock ref '%s': "
952 "reference already exists"),
953 original_update_refname(u));
954 else if (is_null_oid(&current_oid))
955 strbuf_addf(err, _("cannot lock ref '%s': "
956 "reference is missing but expected %s"),
957 original_update_refname(u),
958 oid_to_hex(&u->old_oid));
959 else
960 strbuf_addf(err, _("cannot lock ref '%s': "
961 "is at %s but expected %s"),
962 original_update_refname(u),
963 oid_to_hex(&current_oid),
964 oid_to_hex(&u->old_oid));
965 ret = -1;
966 goto done;
967 }
968
969 /*
970 * If all of the following conditions are true:
971 *
972 * - We're not about to write a symref.
973 * - We're not about to write a log-only entry.
974 * - Old and new object ID are different.
975 *
976 * Then we're essentially doing a no-op update that can be
977 * skipped. This is not only for the sake of efficiency, but
978 * also skips writing unneeded reflog entries.
979 */
980 if ((u->type & REF_ISSYMREF) ||
981 (u->flags & REF_LOG_ONLY) ||
982 (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid))) {
983 ret = queue_transaction_update(refs, tx_data, u,
984 &current_oid, err);
985 if (ret)
986 goto done;
987 }
988 }
989
990 transaction->backend_data = tx_data;
991 transaction->state = REF_TRANSACTION_PREPARED;
992
993 done:
994 assert(ret != REFTABLE_API_ERROR);
995 if (ret < 0) {
996 free_transaction_data(tx_data);
997 transaction->state = REF_TRANSACTION_CLOSED;
998 if (!err->len)
999 strbuf_addf(err, _("reftable: transaction prepare: %s"),
1000 reftable_error_str(ret));
1001 }
1002 string_list_clear(&affected_refnames, 0);
1003 strbuf_release(&referent);
1004 strbuf_release(&head_referent);
1005
1006 return ret;
1007 }
1008
1009 static int reftable_be_transaction_abort(struct ref_store *ref_store,
1010 struct ref_transaction *transaction,
1011 struct strbuf *err)
1012 {
1013 struct reftable_transaction_data *tx_data = transaction->backend_data;
1014 free_transaction_data(tx_data);
1015 transaction->state = REF_TRANSACTION_CLOSED;
1016 return 0;
1017 }
1018
1019 static int transaction_update_cmp(const void *a, const void *b)
1020 {
1021 return strcmp(((struct reftable_transaction_update *)a)->update->refname,
1022 ((struct reftable_transaction_update *)b)->update->refname);
1023 }
1024
1025 static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
1026 {
1027 struct write_transaction_table_arg *arg = cb_data;
1028 struct reftable_merged_table *mt =
1029 reftable_stack_merged_table(arg->stack);
1030 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1031 struct reftable_log_record *logs = NULL;
1032 size_t logs_nr = 0, logs_alloc = 0, i;
1033 int ret = 0;
1034
1035 QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
1036
1037 reftable_writer_set_limits(writer, ts, ts);
1038
1039 for (i = 0; i < arg->updates_nr; i++) {
1040 struct reftable_transaction_update *tx_update = &arg->updates[i];
1041 struct ref_update *u = tx_update->update;
1042
1043 /*
1044 * Write a reflog entry when updating a ref to point to
1045 * something new in either of the following cases:
1046 *
1047 * - The reference is about to be deleted. We always want to
1048 * delete the reflog in that case.
1049 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1050 * the reflog entry.
1051 * - `core.logAllRefUpdates` tells us to create the reflog for
1052 * the given ref.
1053 */
1054 if (u->flags & REF_HAVE_NEW && !(u->type & REF_ISSYMREF) && is_null_oid(&u->new_oid)) {
1055 struct reftable_log_record log = {0};
1056 struct reftable_iterator it = {0};
1057
1058 /*
1059 * When deleting refs we also delete all reflog entries
1060 * with them. While it is not strictly required to
1061 * delete reflogs together with their refs, this
1062 * matches the behaviour of the files backend.
1063 *
1064 * Unfortunately, we have no better way than to delete
1065 * all reflog entries one by one.
1066 */
1067 ret = reftable_merged_table_seek_log(mt, &it, u->refname);
1068 while (ret == 0) {
1069 struct reftable_log_record *tombstone;
1070
1071 ret = reftable_iterator_next_log(&it, &log);
1072 if (ret < 0)
1073 break;
1074 if (ret > 0 || strcmp(log.refname, u->refname)) {
1075 ret = 0;
1076 break;
1077 }
1078
1079 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1080 tombstone = &logs[logs_nr++];
1081 tombstone->refname = xstrdup(u->refname);
1082 tombstone->value_type = REFTABLE_LOG_DELETION;
1083 tombstone->update_index = log.update_index;
1084 }
1085
1086 reftable_log_record_release(&log);
1087 reftable_iterator_destroy(&it);
1088
1089 if (ret)
1090 goto done;
1091 } else if (u->flags & REF_HAVE_NEW &&
1092 (u->flags & REF_FORCE_CREATE_REFLOG ||
1093 should_write_log(&arg->refs->base, u->refname))) {
1094 struct reftable_log_record *log;
1095
1096 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1097 log = &logs[logs_nr++];
1098 memset(log, 0, sizeof(*log));
1099
1100 fill_reftable_log_record(log);
1101 log->update_index = ts;
1102 log->refname = xstrdup(u->refname);
1103 log->value.update.new_hash = u->new_oid.hash;
1104 log->value.update.old_hash = tx_update->current_oid.hash;
1105 log->value.update.message =
1106 xstrndup(u->msg, arg->refs->write_options.block_size / 2);
1107 }
1108
1109 if (u->flags & REF_LOG_ONLY)
1110 continue;
1111
1112 if (u->flags & REF_HAVE_NEW && is_null_oid(&u->new_oid)) {
1113 struct reftable_ref_record ref = {
1114 .refname = (char *)u->refname,
1115 .update_index = ts,
1116 .value_type = REFTABLE_REF_DELETION,
1117 };
1118
1119 ret = reftable_writer_add_ref(writer, &ref);
1120 if (ret < 0)
1121 goto done;
1122 } else if (u->flags & REF_HAVE_NEW) {
1123 struct reftable_ref_record ref = {0};
1124 struct object_id peeled;
1125 int peel_error;
1126
1127 ref.refname = (char *)u->refname;
1128 ref.update_index = ts;
1129
1130 peel_error = peel_object(&u->new_oid, &peeled);
1131 if (!peel_error) {
1132 ref.value_type = REFTABLE_REF_VAL2;
1133 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
1134 memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
1135 } else if (!is_null_oid(&u->new_oid)) {
1136 ref.value_type = REFTABLE_REF_VAL1;
1137 memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
1138 }
1139
1140 ret = reftable_writer_add_ref(writer, &ref);
1141 if (ret < 0)
1142 goto done;
1143 }
1144 }
1145
1146 /*
1147 * Logs are written at the end so that we do not have intermixed ref
1148 * and log blocks.
1149 */
1150 if (logs) {
1151 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1152 if (ret < 0)
1153 goto done;
1154 }
1155
1156 done:
1157 assert(ret != REFTABLE_API_ERROR);
1158 for (i = 0; i < logs_nr; i++)
1159 clear_reftable_log_record(&logs[i]);
1160 free(logs);
1161 return ret;
1162 }
1163
1164 static int reftable_be_transaction_finish(struct ref_store *ref_store,
1165 struct ref_transaction *transaction,
1166 struct strbuf *err)
1167 {
1168 struct reftable_transaction_data *tx_data = transaction->backend_data;
1169 int ret = 0;
1170
1171 for (size_t i = 0; i < tx_data->args_nr; i++) {
1172 ret = reftable_addition_add(tx_data->args[i].addition,
1173 write_transaction_table, &tx_data->args[i]);
1174 if (ret < 0)
1175 goto done;
1176
1177 ret = reftable_addition_commit(tx_data->args[i].addition);
1178 if (ret < 0)
1179 goto done;
1180 }
1181
1182 done:
1183 assert(ret != REFTABLE_API_ERROR);
1184 free_transaction_data(tx_data);
1185 transaction->state = REF_TRANSACTION_CLOSED;
1186
1187 if (ret) {
1188 strbuf_addf(err, _("reftable: transaction failure: %s"),
1189 reftable_error_str(ret));
1190 return -1;
1191 }
1192 return ret;
1193 }
1194
1195 static int reftable_be_initial_transaction_commit(struct ref_store *ref_store UNUSED,
1196 struct ref_transaction *transaction,
1197 struct strbuf *err)
1198 {
1199 return ref_transaction_commit(transaction, err);
1200 }
1201
1202 static int reftable_be_pack_refs(struct ref_store *ref_store,
1203 struct pack_refs_opts *opts)
1204 {
1205 struct reftable_ref_store *refs =
1206 reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "pack_refs");
1207 struct reftable_stack *stack;
1208 int ret;
1209
1210 if (refs->err)
1211 return refs->err;
1212
1213 stack = refs->worktree_stack;
1214 if (!stack)
1215 stack = refs->main_stack;
1216
1217 ret = reftable_stack_compact_all(stack, NULL);
1218 if (ret)
1219 goto out;
1220 ret = reftable_stack_clean(stack);
1221 if (ret)
1222 goto out;
1223
1224 out:
1225 return ret;
1226 }
1227
1228 struct write_create_symref_arg {
1229 struct reftable_ref_store *refs;
1230 struct reftable_stack *stack;
1231 const char *refname;
1232 const char *target;
1233 const char *logmsg;
1234 };
1235
1236 static int write_create_symref_table(struct reftable_writer *writer, void *cb_data)
1237 {
1238 struct write_create_symref_arg *create = cb_data;
1239 uint64_t ts = reftable_stack_next_update_index(create->stack);
1240 struct reftable_ref_record ref = {
1241 .refname = (char *)create->refname,
1242 .value_type = REFTABLE_REF_SYMREF,
1243 .value.symref = (char *)create->target,
1244 .update_index = ts,
1245 };
1246 struct reftable_log_record log = {0};
1247 struct object_id new_oid;
1248 struct object_id old_oid;
1249 int ret;
1250
1251 reftable_writer_set_limits(writer, ts, ts);
1252
1253 ret = reftable_writer_add_ref(writer, &ref);
1254 if (ret)
1255 return ret;
1256
1257 /*
1258 * Note that it is important to try and resolve the reference before we
1259 * write the log entry. This is because `should_write_log()` will munge
1260 * `core.logAllRefUpdates`, which is undesirable when we create a new
1261 * repository because it would be written into the config. As HEAD will
1262 * not resolve for new repositories this ordering will ensure that this
1263 * never happens.
1264 */
1265 if (!create->logmsg ||
1266 !refs_resolve_ref_unsafe(&create->refs->base, create->target,
1267 RESOLVE_REF_READING, &new_oid, NULL) ||
1268 !should_write_log(&create->refs->base, create->refname))
1269 return 0;
1270
1271 fill_reftable_log_record(&log);
1272 log.refname = xstrdup(create->refname);
1273 log.update_index = ts;
1274 log.value.update.message = xstrndup(create->logmsg,
1275 create->refs->write_options.block_size / 2);
1276 log.value.update.new_hash = new_oid.hash;
1277 if (refs_resolve_ref_unsafe(&create->refs->base, create->refname,
1278 RESOLVE_REF_READING, &old_oid, NULL))
1279 log.value.update.old_hash = old_oid.hash;
1280
1281 ret = reftable_writer_add_log(writer, &log);
1282 clear_reftable_log_record(&log);
1283 return ret;
1284 }
1285
1286 static int reftable_be_create_symref(struct ref_store *ref_store,
1287 const char *refname,
1288 const char *target,
1289 const char *logmsg)
1290 {
1291 struct reftable_ref_store *refs =
1292 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_symref");
1293 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1294 struct write_create_symref_arg arg = {
1295 .refs = refs,
1296 .stack = stack,
1297 .refname = refname,
1298 .target = target,
1299 .logmsg = logmsg,
1300 };
1301 int ret;
1302
1303 ret = refs->err;
1304 if (ret < 0)
1305 goto done;
1306
1307 ret = reftable_stack_reload(stack);
1308 if (ret)
1309 goto done;
1310
1311 ret = reftable_stack_add(stack, &write_create_symref_table, &arg);
1312
1313 done:
1314 assert(ret != REFTABLE_API_ERROR);
1315 if (ret)
1316 error("unable to write symref for %s: %s", refname,
1317 reftable_error_str(ret));
1318 return ret;
1319 }
1320
1321 struct write_copy_arg {
1322 struct reftable_ref_store *refs;
1323 struct reftable_stack *stack;
1324 const char *oldname;
1325 const char *newname;
1326 const char *logmsg;
1327 int delete_old;
1328 };
1329
1330 static int write_copy_table(struct reftable_writer *writer, void *cb_data)
1331 {
1332 struct write_copy_arg *arg = cb_data;
1333 uint64_t deletion_ts, creation_ts;
1334 struct reftable_merged_table *mt = reftable_stack_merged_table(arg->stack);
1335 struct reftable_ref_record old_ref = {0}, refs[2] = {0};
1336 struct reftable_log_record old_log = {0}, *logs = NULL;
1337 struct reftable_iterator it = {0};
1338 struct string_list skip = STRING_LIST_INIT_NODUP;
1339 struct strbuf errbuf = STRBUF_INIT;
1340 size_t logs_nr = 0, logs_alloc = 0, i;
1341 int ret;
1342
1343 if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
1344 ret = error(_("refname %s not found"), arg->oldname);
1345 goto done;
1346 }
1347 if (old_ref.value_type == REFTABLE_REF_SYMREF) {
1348 ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
1349 arg->oldname);
1350 goto done;
1351 }
1352
1353 /*
1354 * There's nothing to do in case the old and new name are the same, so
1355 * we exit early in that case.
1356 */
1357 if (!strcmp(arg->oldname, arg->newname)) {
1358 ret = 0;
1359 goto done;
1360 }
1361
1362 /*
1363 * Verify that the new refname is available.
1364 */
1365 string_list_insert(&skip, arg->oldname);
1366 ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
1367 NULL, &skip, &errbuf);
1368 if (ret < 0) {
1369 error("%s", errbuf.buf);
1370 goto done;
1371 }
1372
1373 /*
1374 * When deleting the old reference we have to use two update indices:
1375 * once to delete the old ref and its reflog, and once to create the
1376 * new ref and its reflog. They need to be staged with two separate
1377 * indices because the new reflog needs to encode both the deletion of
1378 * the old branch and the creation of the new branch, and we cannot do
1379 * two changes to a reflog in a single update.
1380 */
1381 deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
1382 if (arg->delete_old)
1383 creation_ts++;
1384 reftable_writer_set_limits(writer, deletion_ts, creation_ts);
1385
1386 /*
1387 * Add the new reference. If this is a rename then we also delete the
1388 * old reference.
1389 */
1390 refs[0] = old_ref;
1391 refs[0].refname = (char *)arg->newname;
1392 refs[0].update_index = creation_ts;
1393 if (arg->delete_old) {
1394 refs[1].refname = (char *)arg->oldname;
1395 refs[1].value_type = REFTABLE_REF_DELETION;
1396 refs[1].update_index = deletion_ts;
1397 }
1398 ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
1399 if (ret < 0)
1400 goto done;
1401
1402 /*
1403 * When deleting the old branch we need to create a reflog entry on the
1404 * new branch name that indicates that the old branch has been deleted
1405 * and then recreated. This is a tad weird, but matches what the files
1406 * backend does.
1407 */
1408 if (arg->delete_old) {
1409 struct strbuf head_referent = STRBUF_INIT;
1410 struct object_id head_oid;
1411 int append_head_reflog;
1412 unsigned head_type = 0;
1413
1414 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1415 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1416 fill_reftable_log_record(&logs[logs_nr]);
1417 logs[logs_nr].refname = (char *)arg->newname;
1418 logs[logs_nr].update_index = deletion_ts;
1419 logs[logs_nr].value.update.message =
1420 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1421 logs[logs_nr].value.update.old_hash = old_ref.value.val1;
1422 logs_nr++;
1423
1424 ret = read_ref_without_reload(arg->stack, "HEAD", &head_oid, &head_referent, &head_type);
1425 if (ret < 0)
1426 goto done;
1427 append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
1428 strbuf_release(&head_referent);
1429
1430 /*
1431 * The files backend uses `refs_delete_ref()` to delete the old
1432 * branch name, which will append a reflog entry for HEAD in
1433 * case it points to the old branch.
1434 */
1435 if (append_head_reflog) {
1436 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1437 logs[logs_nr] = logs[logs_nr - 1];
1438 logs[logs_nr].refname = "HEAD";
1439 logs_nr++;
1440 }
1441 }
1442
1443 /*
1444 * Create the reflog entry for the newly created branch.
1445 */
1446 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1447 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1448 fill_reftable_log_record(&logs[logs_nr]);
1449 logs[logs_nr].refname = (char *)arg->newname;
1450 logs[logs_nr].update_index = creation_ts;
1451 logs[logs_nr].value.update.message =
1452 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1453 logs[logs_nr].value.update.new_hash = old_ref.value.val1;
1454 logs_nr++;
1455
1456 /*
1457 * In addition to writing the reflog entry for the new branch, we also
1458 * copy over all log entries from the old reflog. Last but not least,
1459 * when renaming we also have to delete all the old reflog entries.
1460 */
1461 ret = reftable_merged_table_seek_log(mt, &it, arg->oldname);
1462 if (ret < 0)
1463 goto done;
1464
1465 while (1) {
1466 ret = reftable_iterator_next_log(&it, &old_log);
1467 if (ret < 0)
1468 goto done;
1469 if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
1470 ret = 0;
1471 break;
1472 }
1473
1474 free(old_log.refname);
1475
1476 /*
1477 * Copy over the old reflog entry with the new refname.
1478 */
1479 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1480 logs[logs_nr] = old_log;
1481 logs[logs_nr].refname = (char *)arg->newname;
1482 logs_nr++;
1483
1484 /*
1485 * Delete the old reflog entry in case we are renaming.
1486 */
1487 if (arg->delete_old) {
1488 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1489 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1490 logs[logs_nr].refname = (char *)arg->oldname;
1491 logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
1492 logs[logs_nr].update_index = old_log.update_index;
1493 logs_nr++;
1494 }
1495
1496 /*
1497 * Transfer ownership of the log record we're iterating over to
1498 * the array of log records. Otherwise, the pointers would get
1499 * free'd or reallocated by the iterator.
1500 */
1501 memset(&old_log, 0, sizeof(old_log));
1502 }
1503
1504 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1505 if (ret < 0)
1506 goto done;
1507
1508 done:
1509 assert(ret != REFTABLE_API_ERROR);
1510 reftable_iterator_destroy(&it);
1511 string_list_clear(&skip, 0);
1512 strbuf_release(&errbuf);
1513 for (i = 0; i < logs_nr; i++) {
1514 if (!strcmp(logs[i].refname, "HEAD"))
1515 continue;
1516 if (logs[i].value.update.old_hash == old_ref.value.val1)
1517 logs[i].value.update.old_hash = NULL;
1518 if (logs[i].value.update.new_hash == old_ref.value.val1)
1519 logs[i].value.update.new_hash = NULL;
1520 logs[i].refname = NULL;
1521 reftable_log_record_release(&logs[i]);
1522 }
1523 free(logs);
1524 reftable_ref_record_release(&old_ref);
1525 reftable_log_record_release(&old_log);
1526 return ret;
1527 }
1528
1529 static int reftable_be_rename_ref(struct ref_store *ref_store,
1530 const char *oldrefname,
1531 const char *newrefname,
1532 const char *logmsg)
1533 {
1534 struct reftable_ref_store *refs =
1535 reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1536 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1537 struct write_copy_arg arg = {
1538 .refs = refs,
1539 .stack = stack,
1540 .oldname = oldrefname,
1541 .newname = newrefname,
1542 .logmsg = logmsg,
1543 .delete_old = 1,
1544 };
1545 int ret;
1546
1547 ret = refs->err;
1548 if (ret < 0)
1549 goto done;
1550
1551 ret = reftable_stack_reload(stack);
1552 if (ret)
1553 goto done;
1554 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1555
1556 done:
1557 assert(ret != REFTABLE_API_ERROR);
1558 return ret;
1559 }
1560
1561 static int reftable_be_copy_ref(struct ref_store *ref_store,
1562 const char *oldrefname,
1563 const char *newrefname,
1564 const char *logmsg)
1565 {
1566 struct reftable_ref_store *refs =
1567 reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
1568 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1569 struct write_copy_arg arg = {
1570 .refs = refs,
1571 .stack = stack,
1572 .oldname = oldrefname,
1573 .newname = newrefname,
1574 .logmsg = logmsg,
1575 };
1576 int ret;
1577
1578 ret = refs->err;
1579 if (ret < 0)
1580 goto done;
1581
1582 ret = reftable_stack_reload(stack);
1583 if (ret)
1584 goto done;
1585 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1586
1587 done:
1588 assert(ret != REFTABLE_API_ERROR);
1589 return ret;
1590 }
1591
1592 struct reftable_reflog_iterator {
1593 struct ref_iterator base;
1594 struct reftable_ref_store *refs;
1595 struct reftable_iterator iter;
1596 struct reftable_log_record log;
1597 char *last_name;
1598 int err;
1599 };
1600
1601 static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
1602 {
1603 struct reftable_reflog_iterator *iter =
1604 (struct reftable_reflog_iterator *)ref_iterator;
1605
1606 while (!iter->err) {
1607 iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
1608 if (iter->err)
1609 break;
1610
1611 /*
1612 * We want the refnames that we have reflogs for, so we skip if
1613 * we've already produced this name. This could be faster by
1614 * seeking directly to reflog@update_index==0.
1615 */
1616 if (iter->last_name && !strcmp(iter->log.refname, iter->last_name))
1617 continue;
1618
1619 if (check_refname_format(iter->log.refname,
1620 REFNAME_ALLOW_ONELEVEL))
1621 continue;
1622
1623 free(iter->last_name);
1624 iter->last_name = xstrdup(iter->log.refname);
1625 iter->base.refname = iter->log.refname;
1626
1627 break;
1628 }
1629
1630 if (iter->err > 0) {
1631 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
1632 return ITER_ERROR;
1633 return ITER_DONE;
1634 }
1635
1636 if (iter->err < 0) {
1637 ref_iterator_abort(ref_iterator);
1638 return ITER_ERROR;
1639 }
1640
1641 return ITER_OK;
1642 }
1643
1644 static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator,
1645 struct object_id *peeled)
1646 {
1647 BUG("reftable reflog iterator cannot be peeled");
1648 return -1;
1649 }
1650
1651 static int reftable_reflog_iterator_abort(struct ref_iterator *ref_iterator)
1652 {
1653 struct reftable_reflog_iterator *iter =
1654 (struct reftable_reflog_iterator *)ref_iterator;
1655 reftable_log_record_release(&iter->log);
1656 reftable_iterator_destroy(&iter->iter);
1657 free(iter->last_name);
1658 free(iter);
1659 return ITER_DONE;
1660 }
1661
1662 static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
1663 .advance = reftable_reflog_iterator_advance,
1664 .peel = reftable_reflog_iterator_peel,
1665 .abort = reftable_reflog_iterator_abort
1666 };
1667
1668 static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
1669 struct reftable_stack *stack)
1670 {
1671 struct reftable_merged_table *merged_table;
1672 struct reftable_reflog_iterator *iter;
1673 int ret;
1674
1675 iter = xcalloc(1, sizeof(*iter));
1676 base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable);
1677 iter->refs = refs;
1678
1679 ret = refs->err;
1680 if (ret)
1681 goto done;
1682
1683 ret = reftable_stack_reload(refs->main_stack);
1684 if (ret < 0)
1685 goto done;
1686
1687 merged_table = reftable_stack_merged_table(stack);
1688
1689 ret = reftable_merged_table_seek_log(merged_table, &iter->iter, "");
1690 if (ret < 0)
1691 goto done;
1692
1693 done:
1694 iter->err = ret;
1695 return iter;
1696 }
1697
1698 static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
1699 {
1700 struct reftable_ref_store *refs =
1701 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
1702 struct reftable_reflog_iterator *main_iter, *worktree_iter;
1703
1704 main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
1705 if (!refs->worktree_stack)
1706 return &main_iter->base;
1707
1708 worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
1709
1710 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
1711 ref_iterator_select, NULL);
1712 }
1713
1714 static int yield_log_record(struct reftable_log_record *log,
1715 each_reflog_ent_fn fn,
1716 void *cb_data)
1717 {
1718 struct object_id old_oid, new_oid;
1719 const char *full_committer;
1720
1721 oidread(&old_oid, log->value.update.old_hash);
1722 oidread(&new_oid, log->value.update.new_hash);
1723
1724 /*
1725 * When both the old object ID and the new object ID are null
1726 * then this is the reflog existence marker. The caller must
1727 * not be aware of it.
1728 */
1729 if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
1730 return 0;
1731
1732 full_committer = fmt_ident(log->value.update.name, log->value.update.email,
1733 WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
1734 return fn(&old_oid, &new_oid, full_committer,
1735 log->value.update.time, log->value.update.tz_offset,
1736 log->value.update.message, cb_data);
1737 }
1738
1739 static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1740 const char *refname,
1741 each_reflog_ent_fn fn,
1742 void *cb_data)
1743 {
1744 struct reftable_ref_store *refs =
1745 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
1746 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1747 struct reftable_merged_table *mt = NULL;
1748 struct reftable_log_record log = {0};
1749 struct reftable_iterator it = {0};
1750 int ret;
1751
1752 if (refs->err < 0)
1753 return refs->err;
1754
1755 mt = reftable_stack_merged_table(stack);
1756 ret = reftable_merged_table_seek_log(mt, &it, refname);
1757 while (!ret) {
1758 ret = reftable_iterator_next_log(&it, &log);
1759 if (ret < 0)
1760 break;
1761 if (ret > 0 || strcmp(log.refname, refname)) {
1762 ret = 0;
1763 break;
1764 }
1765
1766 ret = yield_log_record(&log, fn, cb_data);
1767 if (ret)
1768 break;
1769 }
1770
1771 reftable_log_record_release(&log);
1772 reftable_iterator_destroy(&it);
1773 return ret;
1774 }
1775
1776 static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
1777 const char *refname,
1778 each_reflog_ent_fn fn,
1779 void *cb_data)
1780 {
1781 struct reftable_ref_store *refs =
1782 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
1783 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1784 struct reftable_merged_table *mt = NULL;
1785 struct reftable_log_record *logs = NULL;
1786 struct reftable_iterator it = {0};
1787 size_t logs_alloc = 0, logs_nr = 0, i;
1788 int ret;
1789
1790 if (refs->err < 0)
1791 return refs->err;
1792
1793 mt = reftable_stack_merged_table(stack);
1794 ret = reftable_merged_table_seek_log(mt, &it, refname);
1795 while (!ret) {
1796 struct reftable_log_record log = {0};
1797
1798 ret = reftable_iterator_next_log(&it, &log);
1799 if (ret < 0)
1800 goto done;
1801 if (ret > 0 || strcmp(log.refname, refname)) {
1802 reftable_log_record_release(&log);
1803 ret = 0;
1804 break;
1805 }
1806
1807 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1808 logs[logs_nr++] = log;
1809 }
1810
1811 for (i = logs_nr; i--;) {
1812 ret = yield_log_record(&logs[i], fn, cb_data);
1813 if (ret)
1814 goto done;
1815 }
1816
1817 done:
1818 reftable_iterator_destroy(&it);
1819 for (i = 0; i < logs_nr; i++)
1820 reftable_log_record_release(&logs[i]);
1821 free(logs);
1822 return ret;
1823 }
1824
1825 static int reftable_be_reflog_exists(struct ref_store *ref_store,
1826 const char *refname)
1827 {
1828 struct reftable_ref_store *refs =
1829 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1830 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1831 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
1832 struct reftable_log_record log = {0};
1833 struct reftable_iterator it = {0};
1834 int ret;
1835
1836 ret = refs->err;
1837 if (ret < 0)
1838 goto done;
1839
1840 ret = reftable_stack_reload(stack);
1841 if (ret < 0)
1842 goto done;
1843
1844 ret = reftable_merged_table_seek_log(mt, &it, refname);
1845 if (ret < 0)
1846 goto done;
1847
1848 /*
1849 * Check whether we get at least one log record for the given ref name.
1850 * If so, the reflog exists, otherwise it doesn't.
1851 */
1852 ret = reftable_iterator_next_log(&it, &log);
1853 if (ret < 0)
1854 goto done;
1855 if (ret > 0) {
1856 ret = 0;
1857 goto done;
1858 }
1859
1860 ret = strcmp(log.refname, refname) == 0;
1861
1862 done:
1863 reftable_iterator_destroy(&it);
1864 reftable_log_record_release(&log);
1865 if (ret < 0)
1866 ret = 0;
1867 return ret;
1868 }
1869
1870 struct write_reflog_existence_arg {
1871 struct reftable_ref_store *refs;
1872 const char *refname;
1873 struct reftable_stack *stack;
1874 };
1875
1876 static int write_reflog_existence_table(struct reftable_writer *writer,
1877 void *cb_data)
1878 {
1879 struct write_reflog_existence_arg *arg = cb_data;
1880 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1881 struct reftable_log_record log = {0};
1882 int ret;
1883
1884 ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
1885 if (ret <= 0)
1886 goto done;
1887
1888 reftable_writer_set_limits(writer, ts, ts);
1889
1890 /*
1891 * The existence entry has both old and new object ID set to the the
1892 * null object ID. Our iterators are aware of this and will not present
1893 * them to their callers.
1894 */
1895 log.refname = xstrdup(arg->refname);
1896 log.update_index = ts;
1897 log.value_type = REFTABLE_LOG_UPDATE;
1898 ret = reftable_writer_add_log(writer, &log);
1899
1900 done:
1901 assert(ret != REFTABLE_API_ERROR);
1902 reftable_log_record_release(&log);
1903 return ret;
1904 }
1905
1906 static int reftable_be_create_reflog(struct ref_store *ref_store,
1907 const char *refname,
1908 struct strbuf *errmsg)
1909 {
1910 struct reftable_ref_store *refs =
1911 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1912 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1913 struct write_reflog_existence_arg arg = {
1914 .refs = refs,
1915 .stack = stack,
1916 .refname = refname,
1917 };
1918 int ret;
1919
1920 ret = refs->err;
1921 if (ret < 0)
1922 goto done;
1923
1924 ret = reftable_stack_reload(stack);
1925 if (ret)
1926 goto done;
1927
1928 ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
1929
1930 done:
1931 return ret;
1932 }
1933
1934 struct write_reflog_delete_arg {
1935 struct reftable_stack *stack;
1936 const char *refname;
1937 };
1938
1939 static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
1940 {
1941 struct write_reflog_delete_arg *arg = cb_data;
1942 struct reftable_merged_table *mt =
1943 reftable_stack_merged_table(arg->stack);
1944 struct reftable_log_record log = {0}, tombstone = {0};
1945 struct reftable_iterator it = {0};
1946 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1947 int ret;
1948
1949 reftable_writer_set_limits(writer, ts, ts);
1950
1951 /*
1952 * In order to delete a table we need to delete all reflog entries one
1953 * by one. This is inefficient, but the reftable format does not have a
1954 * better marker right now.
1955 */
1956 ret = reftable_merged_table_seek_log(mt, &it, arg->refname);
1957 while (ret == 0) {
1958 ret = reftable_iterator_next_log(&it, &log);
1959 if (ret < 0)
1960 break;
1961 if (ret > 0 || strcmp(log.refname, arg->refname)) {
1962 ret = 0;
1963 break;
1964 }
1965
1966 tombstone.refname = (char *)arg->refname;
1967 tombstone.value_type = REFTABLE_LOG_DELETION;
1968 tombstone.update_index = log.update_index;
1969
1970 ret = reftable_writer_add_log(writer, &tombstone);
1971 }
1972
1973 reftable_log_record_release(&log);
1974 reftable_iterator_destroy(&it);
1975 return ret;
1976 }
1977
1978 static int reftable_be_delete_reflog(struct ref_store *ref_store,
1979 const char *refname)
1980 {
1981 struct reftable_ref_store *refs =
1982 reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
1983 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1984 struct write_reflog_delete_arg arg = {
1985 .stack = stack,
1986 .refname = refname,
1987 };
1988 int ret;
1989
1990 ret = reftable_stack_reload(stack);
1991 if (ret)
1992 return ret;
1993 ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
1994
1995 assert(ret != REFTABLE_API_ERROR);
1996 return ret;
1997 }
1998
1999 struct reflog_expiry_arg {
2000 struct reftable_stack *stack;
2001 struct reftable_log_record *records;
2002 struct object_id update_oid;
2003 const char *refname;
2004 size_t len;
2005 };
2006
2007 static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
2008 {
2009 struct reflog_expiry_arg *arg = cb_data;
2010 uint64_t ts = reftable_stack_next_update_index(arg->stack);
2011 uint64_t live_records = 0;
2012 size_t i;
2013 int ret;
2014
2015 for (i = 0; i < arg->len; i++)
2016 if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
2017 live_records++;
2018
2019 reftable_writer_set_limits(writer, ts, ts);
2020
2021 if (!is_null_oid(&arg->update_oid)) {
2022 struct reftable_ref_record ref = {0};
2023 struct object_id peeled;
2024
2025 ref.refname = (char *)arg->refname;
2026 ref.update_index = ts;
2027
2028 if (!peel_object(&arg->update_oid, &peeled)) {
2029 ref.value_type = REFTABLE_REF_VAL2;
2030 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
2031 memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
2032 } else {
2033 ref.value_type = REFTABLE_REF_VAL1;
2034 memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
2035 }
2036
2037 ret = reftable_writer_add_ref(writer, &ref);
2038 if (ret < 0)
2039 return ret;
2040 }
2041
2042 /*
2043 * When there are no more entries left in the reflog we empty it
2044 * completely, but write a placeholder reflog entry that indicates that
2045 * the reflog still exists.
2046 */
2047 if (!live_records) {
2048 struct reftable_log_record log = {
2049 .refname = (char *)arg->refname,
2050 .value_type = REFTABLE_LOG_UPDATE,
2051 .update_index = ts,
2052 };
2053
2054 ret = reftable_writer_add_log(writer, &log);
2055 if (ret)
2056 return ret;
2057 }
2058
2059 for (i = 0; i < arg->len; i++) {
2060 ret = reftable_writer_add_log(writer, &arg->records[i]);
2061 if (ret)
2062 return ret;
2063 }
2064
2065 return 0;
2066 }
2067
2068 static int reftable_be_reflog_expire(struct ref_store *ref_store,
2069 const char *refname,
2070 unsigned int flags,
2071 reflog_expiry_prepare_fn prepare_fn,
2072 reflog_expiry_should_prune_fn should_prune_fn,
2073 reflog_expiry_cleanup_fn cleanup_fn,
2074 void *policy_cb_data)
2075 {
2076 /*
2077 * For log expiry, we write tombstones for every single reflog entry
2078 * that is to be expired. This means that the entries are still
2079 * retrievable by delving into the stack, and expiring entries
2080 * paradoxically takes extra memory. This memory is only reclaimed when
2081 * compacting the reftable stack.
2082 *
2083 * It would be better if the refs backend supported an API that sets a
2084 * criterion for all refs, passing the criterion to pack_refs().
2085 *
2086 * On the plus side, because we do the expiration per ref, we can easily
2087 * insert the reflog existence dummies.
2088 */
2089 struct reftable_ref_store *refs =
2090 reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2091 struct reftable_stack *stack = stack_for(refs, refname, &refname);
2092 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
2093 struct reftable_log_record *logs = NULL;
2094 struct reftable_log_record *rewritten = NULL;
2095 struct reftable_ref_record ref_record = {0};
2096 struct reftable_iterator it = {0};
2097 struct reftable_addition *add = NULL;
2098 struct reflog_expiry_arg arg = {0};
2099 struct object_id oid = {0};
2100 uint8_t *last_hash = NULL;
2101 size_t logs_nr = 0, logs_alloc = 0, i;
2102 int ret;
2103
2104 if (refs->err < 0)
2105 return refs->err;
2106
2107 ret = reftable_stack_reload(stack);
2108 if (ret < 0)
2109 goto done;
2110
2111 ret = reftable_merged_table_seek_log(mt, &it, refname);
2112 if (ret < 0)
2113 goto done;
2114
2115 ret = reftable_stack_new_addition(&add, stack);
2116 if (ret < 0)
2117 goto done;
2118
2119 ret = reftable_stack_read_ref(stack, refname, &ref_record);
2120 if (ret < 0)
2121 goto done;
2122 if (reftable_ref_record_val1(&ref_record))
2123 oidread(&oid, reftable_ref_record_val1(&ref_record));
2124 prepare_fn(refname, &oid, policy_cb_data);
2125
2126 while (1) {
2127 struct reftable_log_record log = {0};
2128 struct object_id old_oid, new_oid;
2129
2130 ret = reftable_iterator_next_log(&it, &log);
2131 if (ret < 0)
2132 goto done;
2133 if (ret > 0 || strcmp(log.refname, refname)) {
2134 reftable_log_record_release(&log);
2135 break;
2136 }
2137
2138 oidread(&old_oid, log.value.update.old_hash);
2139 oidread(&new_oid, log.value.update.new_hash);
2140
2141 /*
2142 * Skip over the reflog existence marker. We will add it back
2143 * in when there are no live reflog records.
2144 */
2145 if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
2146 reftable_log_record_release(&log);
2147 continue;
2148 }
2149
2150 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2151 logs[logs_nr++] = log;
2152 }
2153
2154 /*
2155 * We need to rewrite all reflog entries according to the pruning
2156 * callback function:
2157 *
2158 * - If a reflog entry shall be pruned we mark the record for
2159 * deletion.
2160 *
2161 * - Otherwise we may have to rewrite the chain of reflog entries so
2162 * that gaps created by just-deleted records get backfilled.
2163 */
2164 CALLOC_ARRAY(rewritten, logs_nr);
2165 for (i = logs_nr; i--;) {
2166 struct reftable_log_record *dest = &rewritten[i];
2167 struct object_id old_oid, new_oid;
2168
2169 *dest = logs[i];
2170 oidread(&old_oid, logs[i].value.update.old_hash);
2171 oidread(&new_oid, logs[i].value.update.new_hash);
2172
2173 if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
2174 (timestamp_t)logs[i].value.update.time,
2175 logs[i].value.update.tz_offset,
2176 logs[i].value.update.message,
2177 policy_cb_data)) {
2178 dest->value_type = REFTABLE_LOG_DELETION;
2179 } else {
2180 if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
2181 dest->value.update.old_hash = last_hash;
2182 last_hash = logs[i].value.update.new_hash;
2183 }
2184 }
2185
2186 if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
2187 reftable_ref_record_val1(&ref_record))
2188 oidread(&arg.update_oid, last_hash);
2189
2190 arg.records = rewritten;
2191 arg.len = logs_nr;
2192 arg.stack = stack,
2193 arg.refname = refname,
2194
2195 ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
2196 if (ret < 0)
2197 goto done;
2198
2199 /*
2200 * Future improvement: we could skip writing records that were
2201 * not changed.
2202 */
2203 if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
2204 ret = reftable_addition_commit(add);
2205
2206 done:
2207 if (add)
2208 cleanup_fn(policy_cb_data);
2209 assert(ret != REFTABLE_API_ERROR);
2210
2211 reftable_ref_record_release(&ref_record);
2212 reftable_iterator_destroy(&it);
2213 reftable_addition_destroy(add);
2214 for (i = 0; i < logs_nr; i++)
2215 reftable_log_record_release(&logs[i]);
2216 free(logs);
2217 free(rewritten);
2218 return ret;
2219 }
2220
2221 struct ref_storage_be refs_be_reftable = {
2222 .name = "reftable",
2223 .init = reftable_be_init,
2224 .init_db = reftable_be_init_db,
2225 .transaction_prepare = reftable_be_transaction_prepare,
2226 .transaction_finish = reftable_be_transaction_finish,
2227 .transaction_abort = reftable_be_transaction_abort,
2228 .initial_transaction_commit = reftable_be_initial_transaction_commit,
2229
2230 .pack_refs = reftable_be_pack_refs,
2231 .create_symref = reftable_be_create_symref,
2232 .rename_ref = reftable_be_rename_ref,
2233 .copy_ref = reftable_be_copy_ref,
2234
2235 .iterator_begin = reftable_be_iterator_begin,
2236 .read_raw_ref = reftable_be_read_raw_ref,
2237 .read_symbolic_ref = reftable_be_read_symbolic_ref,
2238
2239 .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
2240 .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
2241 .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
2242 .reflog_exists = reftable_be_reflog_exists,
2243 .create_reflog = reftable_be_create_reflog,
2244 .delete_reflog = reftable_be_delete_reflog,
2245 .reflog_expire = reftable_be_reflog_expire,
2246 };