]> git.ipfire.org Git - thirdparty/git.git/blob - refs/reftable-backend.c
refs/reftable: don't fail empty transactions in repo without HEAD
[thirdparty/git.git] / refs / reftable-backend.c
1 #include "../git-compat-util.h"
2 #include "../abspath.h"
3 #include "../chdir-notify.h"
4 #include "../environment.h"
5 #include "../gettext.h"
6 #include "../hash.h"
7 #include "../hex.h"
8 #include "../iterator.h"
9 #include "../ident.h"
10 #include "../lockfile.h"
11 #include "../object.h"
12 #include "../path.h"
13 #include "../refs.h"
14 #include "../reftable/reftable-stack.h"
15 #include "../reftable/reftable-record.h"
16 #include "../reftable/reftable-error.h"
17 #include "../reftable/reftable-iterator.h"
18 #include "../reftable/reftable-merged.h"
19 #include "../setup.h"
20 #include "../strmap.h"
21 #include "refs-internal.h"
22
23 /*
24 * Used as a flag in ref_update::flags when the ref_update was via an
25 * update to HEAD.
26 */
27 #define REF_UPDATE_VIA_HEAD (1 << 8)
28
29 struct reftable_ref_store {
30 struct ref_store base;
31
32 /*
33 * The main stack refers to the common dir and thus contains common
34 * refs as well as refs of the main repository.
35 */
36 struct reftable_stack *main_stack;
37 /*
38 * The worktree stack refers to the gitdir in case the refdb is opened
39 * via a worktree. It thus contains the per-worktree refs.
40 */
41 struct reftable_stack *worktree_stack;
42 /*
43 * Map of worktree stacks by their respective worktree names. The map
44 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
45 */
46 struct strmap worktree_stacks;
47 struct reftable_write_options write_options;
48
49 unsigned int store_flags;
50 int err;
51 };
52
53 /*
54 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
55 * reftable_ref_store. required_flags is compared with ref_store's store_flags
56 * to ensure the ref_store has all required capabilities. "caller" is used in
57 * any necessary error messages.
58 */
59 static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
60 unsigned int required_flags,
61 const char *caller)
62 {
63 struct reftable_ref_store *refs;
64
65 if (ref_store->be != &refs_be_reftable)
66 BUG("ref_store is type \"%s\" not \"reftables\" in %s",
67 ref_store->be->name, caller);
68
69 refs = (struct reftable_ref_store *)ref_store;
70
71 if ((refs->store_flags & required_flags) != required_flags)
72 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
73 caller, required_flags, refs->store_flags);
74
75 return refs;
76 }
77
78 /*
79 * Some refs are global to the repository (refs/heads/{*}), while others are
80 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
81 * multiple separate databases (ie. multiple reftable/ directories), one for
82 * the shared refs, one for the current worktree refs, and one for each
83 * additional worktree. For reading, we merge the view of both the shared and
84 * the current worktree's refs, when necessary.
85 *
86 * This function also optionally assigns the rewritten reference name that is
87 * local to the stack. This translation is required when using worktree refs
88 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
89 * those references in their normalized form.
90 */
91 static struct reftable_stack *stack_for(struct reftable_ref_store *store,
92 const char *refname,
93 const char **rewritten_ref)
94 {
95 const char *wtname;
96 int wtname_len;
97
98 if (!refname)
99 return store->main_stack;
100
101 switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
102 case REF_WORKTREE_OTHER: {
103 static struct strbuf wtname_buf = STRBUF_INIT;
104 struct strbuf wt_dir = STRBUF_INIT;
105 struct reftable_stack *stack;
106
107 /*
108 * We're using a static buffer here so that we don't need to
109 * allocate the worktree name whenever we look up a reference.
110 * This could be avoided if the strmap interface knew how to
111 * handle keys with a length.
112 */
113 strbuf_reset(&wtname_buf);
114 strbuf_add(&wtname_buf, wtname, wtname_len);
115
116 /*
117 * There is an edge case here: when the worktree references the
118 * current worktree, then we set up the stack once via
119 * `worktree_stacks` and once via `worktree_stack`. This is
120 * wasteful, but in the reading case it shouldn't matter. And
121 * in the writing case we would notice that the stack is locked
122 * already and error out when trying to write a reference via
123 * both stacks.
124 */
125 stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
126 if (!stack) {
127 strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
128 store->base.repo->commondir, wtname_buf.buf);
129
130 store->err = reftable_new_stack(&stack, wt_dir.buf,
131 store->write_options);
132 assert(store->err != REFTABLE_API_ERROR);
133 strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
134 }
135
136 strbuf_release(&wt_dir);
137 return stack;
138 }
139 case REF_WORKTREE_CURRENT:
140 /*
141 * If there is no worktree stack then we're currently in the
142 * main worktree. We thus return the main stack in that case.
143 */
144 if (!store->worktree_stack)
145 return store->main_stack;
146 return store->worktree_stack;
147 case REF_WORKTREE_MAIN:
148 case REF_WORKTREE_SHARED:
149 return store->main_stack;
150 default:
151 BUG("unhandled worktree reference type");
152 }
153 }
154
155 static int should_write_log(struct ref_store *refs, const char *refname)
156 {
157 if (log_all_ref_updates == LOG_REFS_UNSET)
158 log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
159
160 switch (log_all_ref_updates) {
161 case LOG_REFS_NONE:
162 return refs_reflog_exists(refs, refname);
163 case LOG_REFS_ALWAYS:
164 return 1;
165 case LOG_REFS_NORMAL:
166 if (should_autocreate_reflog(refname))
167 return 1;
168 return refs_reflog_exists(refs, refname);
169 default:
170 BUG("unhandled core.logAllRefUpdates value %d", log_all_ref_updates);
171 }
172 }
173
174 static void clear_reftable_log_record(struct reftable_log_record *log)
175 {
176 switch (log->value_type) {
177 case REFTABLE_LOG_UPDATE:
178 /*
179 * When we write log records, the hashes are owned by the
180 * caller and thus shouldn't be free'd.
181 */
182 log->value.update.old_hash = NULL;
183 log->value.update.new_hash = NULL;
184 break;
185 case REFTABLE_LOG_DELETION:
186 break;
187 }
188 reftable_log_record_release(log);
189 }
190
191 static void fill_reftable_log_record(struct reftable_log_record *log)
192 {
193 const char *info = git_committer_info(0);
194 struct ident_split split = {0};
195 int sign = 1;
196
197 if (split_ident_line(&split, info, strlen(info)))
198 BUG("failed splitting committer info");
199
200 reftable_log_record_release(log);
201 log->value_type = REFTABLE_LOG_UPDATE;
202 log->value.update.name =
203 xstrndup(split.name_begin, split.name_end - split.name_begin);
204 log->value.update.email =
205 xstrndup(split.mail_begin, split.mail_end - split.mail_begin);
206 log->value.update.time = atol(split.date_begin);
207 if (*split.tz_begin == '-') {
208 sign = -1;
209 split.tz_begin++;
210 }
211 if (*split.tz_begin == '+') {
212 sign = 1;
213 split.tz_begin++;
214 }
215
216 log->value.update.tz_offset = sign * atoi(split.tz_begin);
217 }
218
219 static int read_ref_without_reload(struct reftable_stack *stack,
220 const char *refname,
221 struct object_id *oid,
222 struct strbuf *referent,
223 unsigned int *type)
224 {
225 struct reftable_ref_record ref = {0};
226 int ret;
227
228 ret = reftable_stack_read_ref(stack, refname, &ref);
229 if (ret)
230 goto done;
231
232 if (ref.value_type == REFTABLE_REF_SYMREF) {
233 strbuf_reset(referent);
234 strbuf_addstr(referent, ref.value.symref);
235 *type |= REF_ISSYMREF;
236 } else if (reftable_ref_record_val1(&ref)) {
237 oidread(oid, reftable_ref_record_val1(&ref));
238 } else {
239 /* We got a tombstone, which should not happen. */
240 BUG("unhandled reference value type %d", ref.value_type);
241 }
242
243 done:
244 assert(ret != REFTABLE_API_ERROR);
245 reftable_ref_record_release(&ref);
246 return ret;
247 }
248
249 static struct ref_store *reftable_be_init(struct repository *repo,
250 const char *gitdir,
251 unsigned int store_flags)
252 {
253 struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
254 struct strbuf path = STRBUF_INIT;
255 int is_worktree;
256 mode_t mask;
257
258 mask = umask(0);
259 umask(mask);
260
261 base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
262 strmap_init(&refs->worktree_stacks);
263 refs->store_flags = store_flags;
264 refs->write_options.block_size = 4096;
265 refs->write_options.hash_id = repo->hash_algo->format_id;
266 refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
267
268 /*
269 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
270 * This stack contains both the shared and the main worktree refs.
271 *
272 * Note that we don't try to resolve the path in case we have a
273 * worktree because `get_common_dir_noenv()` already does it for us.
274 */
275 is_worktree = get_common_dir_noenv(&path, gitdir);
276 if (!is_worktree) {
277 strbuf_reset(&path);
278 strbuf_realpath(&path, gitdir, 0);
279 }
280 strbuf_addstr(&path, "/reftable");
281 refs->err = reftable_new_stack(&refs->main_stack, path.buf,
282 refs->write_options);
283 if (refs->err)
284 goto done;
285
286 /*
287 * If we're in a worktree we also need to set up the worktree reftable
288 * stack that is contained in the per-worktree GIT_DIR.
289 *
290 * Ideally, we would also add the stack to our worktree stack map. But
291 * we have no way to figure out the worktree name here and thus can't
292 * do it efficiently.
293 */
294 if (is_worktree) {
295 strbuf_reset(&path);
296 strbuf_addf(&path, "%s/reftable", gitdir);
297
298 refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
299 refs->write_options);
300 if (refs->err)
301 goto done;
302 }
303
304 chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
305
306 done:
307 assert(refs->err != REFTABLE_API_ERROR);
308 strbuf_release(&path);
309 return &refs->base;
310 }
311
312 static int reftable_be_init_db(struct ref_store *ref_store,
313 int flags UNUSED,
314 struct strbuf *err UNUSED)
315 {
316 struct reftable_ref_store *refs =
317 reftable_be_downcast(ref_store, REF_STORE_WRITE, "init_db");
318 struct strbuf sb = STRBUF_INIT;
319
320 strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
321 safe_create_dir(sb.buf, 1);
322 strbuf_reset(&sb);
323
324 strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
325 write_file(sb.buf, "ref: refs/heads/.invalid");
326 adjust_shared_perm(sb.buf);
327 strbuf_reset(&sb);
328
329 strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
330 safe_create_dir(sb.buf, 1);
331 strbuf_reset(&sb);
332
333 strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
334 write_file(sb.buf, "this repository uses the reftable format");
335 adjust_shared_perm(sb.buf);
336
337 strbuf_release(&sb);
338 return 0;
339 }
340
341 struct reftable_ref_iterator {
342 struct ref_iterator base;
343 struct reftable_ref_store *refs;
344 struct reftable_iterator iter;
345 struct reftable_ref_record ref;
346 struct object_id oid;
347
348 const char *prefix;
349 unsigned int flags;
350 int err;
351 };
352
353 static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
354 {
355 struct reftable_ref_iterator *iter =
356 (struct reftable_ref_iterator *)ref_iterator;
357 struct reftable_ref_store *refs = iter->refs;
358
359 while (!iter->err) {
360 int flags = 0;
361
362 iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
363 if (iter->err)
364 break;
365
366 /*
367 * The files backend only lists references contained in
368 * "refs/". We emulate the same behaviour here and thus skip
369 * all references that don't start with this prefix.
370 */
371 if (!starts_with(iter->ref.refname, "refs/"))
372 continue;
373
374 if (iter->prefix &&
375 strncmp(iter->prefix, iter->ref.refname, strlen(iter->prefix))) {
376 iter->err = 1;
377 break;
378 }
379
380 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
381 parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
382 REF_WORKTREE_CURRENT)
383 continue;
384
385 switch (iter->ref.value_type) {
386 case REFTABLE_REF_VAL1:
387 oidread(&iter->oid, iter->ref.value.val1);
388 break;
389 case REFTABLE_REF_VAL2:
390 oidread(&iter->oid, iter->ref.value.val2.value);
391 break;
392 case REFTABLE_REF_SYMREF:
393 if (!refs_resolve_ref_unsafe(&iter->refs->base, iter->ref.refname,
394 RESOLVE_REF_READING, &iter->oid, &flags))
395 oidclr(&iter->oid);
396 break;
397 default:
398 BUG("unhandled reference value type %d", iter->ref.value_type);
399 }
400
401 if (is_null_oid(&iter->oid))
402 flags |= REF_ISBROKEN;
403
404 if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
405 if (!refname_is_safe(iter->ref.refname))
406 die(_("refname is dangerous: %s"), iter->ref.refname);
407 oidclr(&iter->oid);
408 flags |= REF_BAD_NAME | REF_ISBROKEN;
409 }
410
411 if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
412 flags & REF_ISSYMREF &&
413 flags & REF_ISBROKEN)
414 continue;
415
416 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
417 !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
418 &iter->oid, flags))
419 continue;
420
421 iter->base.refname = iter->ref.refname;
422 iter->base.oid = &iter->oid;
423 iter->base.flags = flags;
424
425 break;
426 }
427
428 if (iter->err > 0) {
429 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
430 return ITER_ERROR;
431 return ITER_DONE;
432 }
433
434 if (iter->err < 0) {
435 ref_iterator_abort(ref_iterator);
436 return ITER_ERROR;
437 }
438
439 return ITER_OK;
440 }
441
442 static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
443 struct object_id *peeled)
444 {
445 struct reftable_ref_iterator *iter =
446 (struct reftable_ref_iterator *)ref_iterator;
447
448 if (iter->ref.value_type == REFTABLE_REF_VAL2) {
449 oidread(peeled, iter->ref.value.val2.target_value);
450 return 0;
451 }
452
453 return -1;
454 }
455
456 static int reftable_ref_iterator_abort(struct ref_iterator *ref_iterator)
457 {
458 struct reftable_ref_iterator *iter =
459 (struct reftable_ref_iterator *)ref_iterator;
460 reftable_ref_record_release(&iter->ref);
461 reftable_iterator_destroy(&iter->iter);
462 free(iter);
463 return ITER_DONE;
464 }
465
466 static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
467 .advance = reftable_ref_iterator_advance,
468 .peel = reftable_ref_iterator_peel,
469 .abort = reftable_ref_iterator_abort
470 };
471
472 static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
473 struct reftable_stack *stack,
474 const char *prefix,
475 int flags)
476 {
477 struct reftable_merged_table *merged_table;
478 struct reftable_ref_iterator *iter;
479 int ret;
480
481 iter = xcalloc(1, sizeof(*iter));
482 base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable, 1);
483 iter->prefix = prefix;
484 iter->base.oid = &iter->oid;
485 iter->flags = flags;
486 iter->refs = refs;
487
488 ret = refs->err;
489 if (ret)
490 goto done;
491
492 ret = reftable_stack_reload(stack);
493 if (ret)
494 goto done;
495
496 merged_table = reftable_stack_merged_table(stack);
497
498 ret = reftable_merged_table_seek_ref(merged_table, &iter->iter, prefix);
499 if (ret)
500 goto done;
501
502 done:
503 iter->err = ret;
504 return iter;
505 }
506
507 static enum iterator_selection iterator_select(struct ref_iterator *iter_worktree,
508 struct ref_iterator *iter_common,
509 void *cb_data UNUSED)
510 {
511 if (iter_worktree && !iter_common) {
512 /*
513 * Return the worktree ref if there are no more common refs.
514 */
515 return ITER_SELECT_0;
516 } else if (iter_common) {
517 /*
518 * In case we have pending worktree and common refs we need to
519 * yield them based on their lexicographical order. Worktree
520 * refs that have the same name as common refs shadow the
521 * latter.
522 */
523 if (iter_worktree) {
524 int cmp = strcmp(iter_worktree->refname,
525 iter_common->refname);
526 if (cmp < 0)
527 return ITER_SELECT_0;
528 else if (!cmp)
529 return ITER_SELECT_0_SKIP_1;
530 }
531
532 /*
533 * We now know that the lexicographically-next ref is a common
534 * ref. When the common ref is a shared one we return it.
535 */
536 if (parse_worktree_ref(iter_common->refname, NULL, NULL,
537 NULL) == REF_WORKTREE_SHARED)
538 return ITER_SELECT_1;
539
540 /*
541 * Otherwise, if the common ref is a per-worktree ref we skip
542 * it because it would belong to the main worktree, not ours.
543 */
544 return ITER_SKIP_1;
545 } else {
546 return ITER_DONE;
547 }
548 }
549
550 static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
551 const char *prefix,
552 const char **exclude_patterns,
553 unsigned int flags)
554 {
555 struct reftable_ref_iterator *main_iter, *worktree_iter;
556 struct reftable_ref_store *refs;
557 unsigned int required_flags = REF_STORE_READ;
558
559 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
560 required_flags |= REF_STORE_ODB;
561 refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
562
563 main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix, flags);
564
565 /*
566 * The worktree stack is only set when we're in an actual worktree
567 * right now. If we aren't, then we return the common reftable
568 * iterator, only.
569 */
570 if (!refs->worktree_stack)
571 return &main_iter->base;
572
573 /*
574 * Otherwise we merge both the common and the per-worktree refs into a
575 * single iterator.
576 */
577 worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix, flags);
578 return merge_ref_iterator_begin(1, &worktree_iter->base, &main_iter->base,
579 iterator_select, NULL);
580 }
581
582 static int reftable_be_read_raw_ref(struct ref_store *ref_store,
583 const char *refname,
584 struct object_id *oid,
585 struct strbuf *referent,
586 unsigned int *type,
587 int *failure_errno)
588 {
589 struct reftable_ref_store *refs =
590 reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
591 struct reftable_stack *stack = stack_for(refs, refname, &refname);
592 int ret;
593
594 if (refs->err < 0)
595 return refs->err;
596
597 ret = reftable_stack_reload(stack);
598 if (ret)
599 return ret;
600
601 ret = read_ref_without_reload(stack, refname, oid, referent, type);
602 if (ret < 0)
603 return ret;
604 if (ret > 0) {
605 *failure_errno = ENOENT;
606 return -1;
607 }
608
609 return 0;
610 }
611
612 static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
613 const char *refname,
614 struct strbuf *referent)
615 {
616 struct reftable_ref_store *refs =
617 reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
618 struct reftable_stack *stack = stack_for(refs, refname, &refname);
619 struct reftable_ref_record ref = {0};
620 int ret;
621
622 ret = reftable_stack_reload(stack);
623 if (ret)
624 return ret;
625
626 ret = reftable_stack_read_ref(stack, refname, &ref);
627 if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
628 strbuf_addstr(referent, ref.value.symref);
629 else
630 ret = -1;
631
632 reftable_ref_record_release(&ref);
633 return ret;
634 }
635
636 /*
637 * Return the refname under which update was originally requested.
638 */
639 static const char *original_update_refname(struct ref_update *update)
640 {
641 while (update->parent_update)
642 update = update->parent_update;
643 return update->refname;
644 }
645
646 struct reftable_transaction_update {
647 struct ref_update *update;
648 struct object_id current_oid;
649 };
650
651 struct write_transaction_table_arg {
652 struct reftable_ref_store *refs;
653 struct reftable_stack *stack;
654 struct reftable_addition *addition;
655 struct reftable_transaction_update *updates;
656 size_t updates_nr;
657 size_t updates_alloc;
658 size_t updates_expected;
659 };
660
661 struct reftable_transaction_data {
662 struct write_transaction_table_arg *args;
663 size_t args_nr, args_alloc;
664 };
665
666 static void free_transaction_data(struct reftable_transaction_data *tx_data)
667 {
668 if (!tx_data)
669 return;
670 for (size_t i = 0; i < tx_data->args_nr; i++) {
671 reftable_addition_destroy(tx_data->args[i].addition);
672 free(tx_data->args[i].updates);
673 }
674 free(tx_data->args);
675 free(tx_data);
676 }
677
678 /*
679 * Prepare transaction update for the given reference update. This will cause
680 * us to lock the corresponding reftable stack for concurrent modification.
681 */
682 static int prepare_transaction_update(struct write_transaction_table_arg **out,
683 struct reftable_ref_store *refs,
684 struct reftable_transaction_data *tx_data,
685 struct ref_update *update,
686 struct strbuf *err)
687 {
688 struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
689 struct write_transaction_table_arg *arg = NULL;
690 size_t i;
691 int ret;
692
693 /*
694 * Search for a preexisting stack update. If there is one then we add
695 * the update to it, otherwise we set up a new stack update.
696 */
697 for (i = 0; !arg && i < tx_data->args_nr; i++)
698 if (tx_data->args[i].stack == stack)
699 arg = &tx_data->args[i];
700
701 if (!arg) {
702 struct reftable_addition *addition;
703
704 ret = reftable_stack_reload(stack);
705 if (ret)
706 return ret;
707
708 ret = reftable_stack_new_addition(&addition, stack);
709 if (ret) {
710 if (ret == REFTABLE_LOCK_ERROR)
711 strbuf_addstr(err, "cannot lock references");
712 return ret;
713 }
714
715 ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
716 tx_data->args_alloc);
717 arg = &tx_data->args[tx_data->args_nr++];
718 arg->refs = refs;
719 arg->stack = stack;
720 arg->addition = addition;
721 arg->updates = NULL;
722 arg->updates_nr = 0;
723 arg->updates_alloc = 0;
724 arg->updates_expected = 0;
725 }
726
727 arg->updates_expected++;
728
729 if (out)
730 *out = arg;
731
732 return 0;
733 }
734
735 /*
736 * Queue a reference update for the correct stack. We potentially need to
737 * handle multiple stack updates in a single transaction when it spans across
738 * multiple worktrees.
739 */
740 static int queue_transaction_update(struct reftable_ref_store *refs,
741 struct reftable_transaction_data *tx_data,
742 struct ref_update *update,
743 struct object_id *current_oid,
744 struct strbuf *err)
745 {
746 struct write_transaction_table_arg *arg = NULL;
747 int ret;
748
749 if (update->backend_data)
750 BUG("reference update queued more than once");
751
752 ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
753 if (ret < 0)
754 return ret;
755
756 ALLOC_GROW(arg->updates, arg->updates_nr + 1,
757 arg->updates_alloc);
758 arg->updates[arg->updates_nr].update = update;
759 oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
760 update->backend_data = &arg->updates[arg->updates_nr++];
761
762 return 0;
763 }
764
765 static int reftable_be_transaction_prepare(struct ref_store *ref_store,
766 struct ref_transaction *transaction,
767 struct strbuf *err)
768 {
769 struct reftable_ref_store *refs =
770 reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
771 struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
772 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
773 struct reftable_transaction_data *tx_data = NULL;
774 struct object_id head_oid;
775 unsigned int head_type = 0;
776 size_t i;
777 int ret;
778
779 ret = refs->err;
780 if (ret < 0)
781 goto done;
782
783 tx_data = xcalloc(1, sizeof(*tx_data));
784
785 /*
786 * Preprocess all updates. For one we check that there are no duplicate
787 * reference updates in this transaction. Second, we lock all stacks
788 * that will be modified during the transaction.
789 */
790 for (i = 0; i < transaction->nr; i++) {
791 ret = prepare_transaction_update(NULL, refs, tx_data,
792 transaction->updates[i], err);
793 if (ret)
794 goto done;
795
796 string_list_append(&affected_refnames,
797 transaction->updates[i]->refname);
798 }
799
800 /*
801 * Now that we have counted updates per stack we can preallocate their
802 * arrays. This avoids having to reallocate many times.
803 */
804 for (i = 0; i < tx_data->args_nr; i++) {
805 CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
806 tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
807 }
808
809 /*
810 * Fail if a refname appears more than once in the transaction.
811 * This code is taken from the files backend and is a good candidate to
812 * be moved into the generic layer.
813 */
814 string_list_sort(&affected_refnames);
815 if (ref_update_reject_duplicates(&affected_refnames, err)) {
816 ret = TRANSACTION_GENERIC_ERROR;
817 goto done;
818 }
819
820 ret = read_ref_without_reload(stack_for(refs, "HEAD", NULL), "HEAD", &head_oid,
821 &head_referent, &head_type);
822 if (ret < 0)
823 goto done;
824 ret = 0;
825
826 for (i = 0; i < transaction->nr; i++) {
827 struct ref_update *u = transaction->updates[i];
828 struct object_id current_oid = {0};
829 struct reftable_stack *stack;
830 const char *rewritten_ref;
831
832 stack = stack_for(refs, u->refname, &rewritten_ref);
833
834 /* Verify that the new object ID is valid. */
835 if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
836 !(u->flags & REF_SKIP_OID_VERIFICATION) &&
837 !(u->flags & REF_LOG_ONLY)) {
838 struct object *o = parse_object(refs->base.repo, &u->new_oid);
839 if (!o) {
840 strbuf_addf(err,
841 _("trying to write ref '%s' with nonexistent object %s"),
842 u->refname, oid_to_hex(&u->new_oid));
843 ret = -1;
844 goto done;
845 }
846
847 if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
848 strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
849 oid_to_hex(&u->new_oid), u->refname);
850 ret = -1;
851 goto done;
852 }
853 }
854
855 /*
856 * When we update the reference that HEAD points to we enqueue
857 * a second log-only update for HEAD so that its reflog is
858 * updated accordingly.
859 */
860 if (head_type == REF_ISSYMREF &&
861 !(u->flags & REF_LOG_ONLY) &&
862 !(u->flags & REF_UPDATE_VIA_HEAD) &&
863 !strcmp(rewritten_ref, head_referent.buf)) {
864 struct ref_update *new_update;
865
866 /*
867 * First make sure that HEAD is not already in the
868 * transaction. This check is O(lg N) in the transaction
869 * size, but it happens at most once per transaction.
870 */
871 if (string_list_has_string(&affected_refnames, "HEAD")) {
872 /* An entry already existed */
873 strbuf_addf(err,
874 _("multiple updates for 'HEAD' (including one "
875 "via its referent '%s') are not allowed"),
876 u->refname);
877 ret = TRANSACTION_NAME_CONFLICT;
878 goto done;
879 }
880
881 new_update = ref_transaction_add_update(
882 transaction, "HEAD",
883 u->flags | REF_LOG_ONLY | REF_NO_DEREF,
884 &u->new_oid, &u->old_oid, u->msg);
885 string_list_insert(&affected_refnames, new_update->refname);
886 }
887
888 ret = read_ref_without_reload(stack, rewritten_ref,
889 &current_oid, &referent, &u->type);
890 if (ret < 0)
891 goto done;
892 if (ret > 0 && (!(u->flags & REF_HAVE_OLD) || is_null_oid(&u->old_oid))) {
893 /*
894 * The reference does not exist, and we either have no
895 * old object ID or expect the reference to not exist.
896 * We can thus skip below safety checks as well as the
897 * symref splitting. But we do want to verify that
898 * there is no conflicting reference here so that we
899 * can output a proper error message instead of failing
900 * at a later point.
901 */
902 ret = refs_verify_refname_available(ref_store, u->refname,
903 &affected_refnames, NULL, err);
904 if (ret < 0)
905 goto done;
906
907 /*
908 * There is no need to write the reference deletion
909 * when the reference in question doesn't exist.
910 */
911 if (u->flags & REF_HAVE_NEW && !is_null_oid(&u->new_oid)) {
912 ret = queue_transaction_update(refs, tx_data, u,
913 &current_oid, err);
914 if (ret)
915 goto done;
916 }
917
918 continue;
919 }
920 if (ret > 0) {
921 /* The reference does not exist, but we expected it to. */
922 strbuf_addf(err, _("cannot lock ref '%s': "
923 "unable to resolve reference '%s'"),
924 original_update_refname(u), u->refname);
925 ret = -1;
926 goto done;
927 }
928
929 if (u->type & REF_ISSYMREF) {
930 /*
931 * The reftable stack is locked at this point already,
932 * so it is safe to call `refs_resolve_ref_unsafe()`
933 * here without causing races.
934 */
935 const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
936 &current_oid, NULL);
937
938 if (u->flags & REF_NO_DEREF) {
939 if (u->flags & REF_HAVE_OLD && !resolved) {
940 strbuf_addf(err, _("cannot lock ref '%s': "
941 "error reading reference"), u->refname);
942 ret = -1;
943 goto done;
944 }
945 } else {
946 struct ref_update *new_update;
947 int new_flags;
948
949 new_flags = u->flags;
950 if (!strcmp(rewritten_ref, "HEAD"))
951 new_flags |= REF_UPDATE_VIA_HEAD;
952
953 /*
954 * If we are updating a symref (eg. HEAD), we should also
955 * update the branch that the symref points to.
956 *
957 * This is generic functionality, and would be better
958 * done in refs.c, but the current implementation is
959 * intertwined with the locking in files-backend.c.
960 */
961 new_update = ref_transaction_add_update(
962 transaction, referent.buf, new_flags,
963 &u->new_oid, &u->old_oid, u->msg);
964 new_update->parent_update = u;
965
966 /*
967 * Change the symbolic ref update to log only. Also, it
968 * doesn't need to check its old OID value, as that will be
969 * done when new_update is processed.
970 */
971 u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
972 u->flags &= ~REF_HAVE_OLD;
973
974 if (string_list_has_string(&affected_refnames, new_update->refname)) {
975 strbuf_addf(err,
976 _("multiple updates for '%s' (including one "
977 "via symref '%s') are not allowed"),
978 referent.buf, u->refname);
979 ret = TRANSACTION_NAME_CONFLICT;
980 goto done;
981 }
982 string_list_insert(&affected_refnames, new_update->refname);
983 }
984 }
985
986 /*
987 * Verify that the old object matches our expectations. Note
988 * that the error messages here do not make a lot of sense in
989 * the context of the reftable backend as we never lock
990 * individual refs. But the error messages match what the files
991 * backend returns, which keeps our tests happy.
992 */
993 if (u->flags & REF_HAVE_OLD && !oideq(&current_oid, &u->old_oid)) {
994 if (is_null_oid(&u->old_oid))
995 strbuf_addf(err, _("cannot lock ref '%s': "
996 "reference already exists"),
997 original_update_refname(u));
998 else if (is_null_oid(&current_oid))
999 strbuf_addf(err, _("cannot lock ref '%s': "
1000 "reference is missing but expected %s"),
1001 original_update_refname(u),
1002 oid_to_hex(&u->old_oid));
1003 else
1004 strbuf_addf(err, _("cannot lock ref '%s': "
1005 "is at %s but expected %s"),
1006 original_update_refname(u),
1007 oid_to_hex(&current_oid),
1008 oid_to_hex(&u->old_oid));
1009 ret = -1;
1010 goto done;
1011 }
1012
1013 /*
1014 * If all of the following conditions are true:
1015 *
1016 * - We're not about to write a symref.
1017 * - We're not about to write a log-only entry.
1018 * - Old and new object ID are different.
1019 *
1020 * Then we're essentially doing a no-op update that can be
1021 * skipped. This is not only for the sake of efficiency, but
1022 * also skips writing unneeded reflog entries.
1023 */
1024 if ((u->type & REF_ISSYMREF) ||
1025 (u->flags & REF_LOG_ONLY) ||
1026 (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid))) {
1027 ret = queue_transaction_update(refs, tx_data, u,
1028 &current_oid, err);
1029 if (ret)
1030 goto done;
1031 }
1032 }
1033
1034 transaction->backend_data = tx_data;
1035 transaction->state = REF_TRANSACTION_PREPARED;
1036
1037 done:
1038 assert(ret != REFTABLE_API_ERROR);
1039 if (ret < 0) {
1040 free_transaction_data(tx_data);
1041 transaction->state = REF_TRANSACTION_CLOSED;
1042 if (!err->len)
1043 strbuf_addf(err, _("reftable: transaction prepare: %s"),
1044 reftable_error_str(ret));
1045 }
1046 string_list_clear(&affected_refnames, 0);
1047 strbuf_release(&referent);
1048 strbuf_release(&head_referent);
1049
1050 return ret;
1051 }
1052
1053 static int reftable_be_transaction_abort(struct ref_store *ref_store,
1054 struct ref_transaction *transaction,
1055 struct strbuf *err)
1056 {
1057 struct reftable_transaction_data *tx_data = transaction->backend_data;
1058 free_transaction_data(tx_data);
1059 transaction->state = REF_TRANSACTION_CLOSED;
1060 return 0;
1061 }
1062
1063 static int transaction_update_cmp(const void *a, const void *b)
1064 {
1065 return strcmp(((struct reftable_transaction_update *)a)->update->refname,
1066 ((struct reftable_transaction_update *)b)->update->refname);
1067 }
1068
1069 static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
1070 {
1071 struct write_transaction_table_arg *arg = cb_data;
1072 struct reftable_merged_table *mt =
1073 reftable_stack_merged_table(arg->stack);
1074 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1075 struct reftable_log_record *logs = NULL;
1076 size_t logs_nr = 0, logs_alloc = 0, i;
1077 int ret = 0;
1078
1079 QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
1080
1081 reftable_writer_set_limits(writer, ts, ts);
1082
1083 for (i = 0; i < arg->updates_nr; i++) {
1084 struct reftable_transaction_update *tx_update = &arg->updates[i];
1085 struct ref_update *u = tx_update->update;
1086
1087 /*
1088 * Write a reflog entry when updating a ref to point to
1089 * something new in either of the following cases:
1090 *
1091 * - The reference is about to be deleted. We always want to
1092 * delete the reflog in that case.
1093 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1094 * the reflog entry.
1095 * - `core.logAllRefUpdates` tells us to create the reflog for
1096 * the given ref.
1097 */
1098 if (u->flags & REF_HAVE_NEW && !(u->type & REF_ISSYMREF) && is_null_oid(&u->new_oid)) {
1099 struct reftable_log_record log = {0};
1100 struct reftable_iterator it = {0};
1101
1102 /*
1103 * When deleting refs we also delete all reflog entries
1104 * with them. While it is not strictly required to
1105 * delete reflogs together with their refs, this
1106 * matches the behaviour of the files backend.
1107 *
1108 * Unfortunately, we have no better way than to delete
1109 * all reflog entries one by one.
1110 */
1111 ret = reftable_merged_table_seek_log(mt, &it, u->refname);
1112 while (ret == 0) {
1113 struct reftable_log_record *tombstone;
1114
1115 ret = reftable_iterator_next_log(&it, &log);
1116 if (ret < 0)
1117 break;
1118 if (ret > 0 || strcmp(log.refname, u->refname)) {
1119 ret = 0;
1120 break;
1121 }
1122
1123 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1124 tombstone = &logs[logs_nr++];
1125 tombstone->refname = xstrdup(u->refname);
1126 tombstone->value_type = REFTABLE_LOG_DELETION;
1127 tombstone->update_index = log.update_index;
1128 }
1129
1130 reftable_log_record_release(&log);
1131 reftable_iterator_destroy(&it);
1132
1133 if (ret)
1134 goto done;
1135 } else if (u->flags & REF_HAVE_NEW &&
1136 (u->flags & REF_FORCE_CREATE_REFLOG ||
1137 should_write_log(&arg->refs->base, u->refname))) {
1138 struct reftable_log_record *log;
1139
1140 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1141 log = &logs[logs_nr++];
1142 memset(log, 0, sizeof(*log));
1143
1144 fill_reftable_log_record(log);
1145 log->update_index = ts;
1146 log->refname = xstrdup(u->refname);
1147 log->value.update.new_hash = u->new_oid.hash;
1148 log->value.update.old_hash = tx_update->current_oid.hash;
1149 log->value.update.message =
1150 xstrndup(u->msg, arg->refs->write_options.block_size / 2);
1151 }
1152
1153 if (u->flags & REF_LOG_ONLY)
1154 continue;
1155
1156 if (u->flags & REF_HAVE_NEW && is_null_oid(&u->new_oid)) {
1157 struct reftable_ref_record ref = {
1158 .refname = (char *)u->refname,
1159 .update_index = ts,
1160 .value_type = REFTABLE_REF_DELETION,
1161 };
1162
1163 ret = reftable_writer_add_ref(writer, &ref);
1164 if (ret < 0)
1165 goto done;
1166 } else if (u->flags & REF_HAVE_NEW) {
1167 struct reftable_ref_record ref = {0};
1168 struct object_id peeled;
1169 int peel_error;
1170
1171 ref.refname = (char *)u->refname;
1172 ref.update_index = ts;
1173
1174 peel_error = peel_object(&u->new_oid, &peeled);
1175 if (!peel_error) {
1176 ref.value_type = REFTABLE_REF_VAL2;
1177 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
1178 memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
1179 } else if (!is_null_oid(&u->new_oid)) {
1180 ref.value_type = REFTABLE_REF_VAL1;
1181 memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
1182 }
1183
1184 ret = reftable_writer_add_ref(writer, &ref);
1185 if (ret < 0)
1186 goto done;
1187 }
1188 }
1189
1190 /*
1191 * Logs are written at the end so that we do not have intermixed ref
1192 * and log blocks.
1193 */
1194 if (logs) {
1195 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1196 if (ret < 0)
1197 goto done;
1198 }
1199
1200 done:
1201 assert(ret != REFTABLE_API_ERROR);
1202 for (i = 0; i < logs_nr; i++)
1203 clear_reftable_log_record(&logs[i]);
1204 free(logs);
1205 return ret;
1206 }
1207
1208 static int reftable_be_transaction_finish(struct ref_store *ref_store,
1209 struct ref_transaction *transaction,
1210 struct strbuf *err)
1211 {
1212 struct reftable_transaction_data *tx_data = transaction->backend_data;
1213 int ret = 0;
1214
1215 for (size_t i = 0; i < tx_data->args_nr; i++) {
1216 ret = reftable_addition_add(tx_data->args[i].addition,
1217 write_transaction_table, &tx_data->args[i]);
1218 if (ret < 0)
1219 goto done;
1220
1221 ret = reftable_addition_commit(tx_data->args[i].addition);
1222 if (ret < 0)
1223 goto done;
1224 }
1225
1226 done:
1227 assert(ret != REFTABLE_API_ERROR);
1228 free_transaction_data(tx_data);
1229 transaction->state = REF_TRANSACTION_CLOSED;
1230
1231 if (ret) {
1232 strbuf_addf(err, _("reftable: transaction failure: %s"),
1233 reftable_error_str(ret));
1234 return -1;
1235 }
1236 return ret;
1237 }
1238
1239 static int reftable_be_initial_transaction_commit(struct ref_store *ref_store UNUSED,
1240 struct ref_transaction *transaction,
1241 struct strbuf *err)
1242 {
1243 return ref_transaction_commit(transaction, err);
1244 }
1245
1246 static int reftable_be_pack_refs(struct ref_store *ref_store,
1247 struct pack_refs_opts *opts)
1248 {
1249 struct reftable_ref_store *refs =
1250 reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "pack_refs");
1251 struct reftable_stack *stack;
1252 int ret;
1253
1254 if (refs->err)
1255 return refs->err;
1256
1257 stack = refs->worktree_stack;
1258 if (!stack)
1259 stack = refs->main_stack;
1260
1261 ret = reftable_stack_compact_all(stack, NULL);
1262 if (ret)
1263 goto out;
1264 ret = reftable_stack_clean(stack);
1265 if (ret)
1266 goto out;
1267
1268 out:
1269 return ret;
1270 }
1271
1272 struct write_create_symref_arg {
1273 struct reftable_ref_store *refs;
1274 struct reftable_stack *stack;
1275 const char *refname;
1276 const char *target;
1277 const char *logmsg;
1278 };
1279
1280 static int write_create_symref_table(struct reftable_writer *writer, void *cb_data)
1281 {
1282 struct write_create_symref_arg *create = cb_data;
1283 uint64_t ts = reftable_stack_next_update_index(create->stack);
1284 struct reftable_ref_record ref = {
1285 .refname = (char *)create->refname,
1286 .value_type = REFTABLE_REF_SYMREF,
1287 .value.symref = (char *)create->target,
1288 .update_index = ts,
1289 };
1290 struct reftable_log_record log = {0};
1291 struct object_id new_oid;
1292 struct object_id old_oid;
1293 int ret;
1294
1295 reftable_writer_set_limits(writer, ts, ts);
1296
1297 ret = reftable_writer_add_ref(writer, &ref);
1298 if (ret)
1299 return ret;
1300
1301 /*
1302 * Note that it is important to try and resolve the reference before we
1303 * write the log entry. This is because `should_write_log()` will munge
1304 * `core.logAllRefUpdates`, which is undesirable when we create a new
1305 * repository because it would be written into the config. As HEAD will
1306 * not resolve for new repositories this ordering will ensure that this
1307 * never happens.
1308 */
1309 if (!create->logmsg ||
1310 !refs_resolve_ref_unsafe(&create->refs->base, create->target,
1311 RESOLVE_REF_READING, &new_oid, NULL) ||
1312 !should_write_log(&create->refs->base, create->refname))
1313 return 0;
1314
1315 fill_reftable_log_record(&log);
1316 log.refname = xstrdup(create->refname);
1317 log.update_index = ts;
1318 log.value.update.message = xstrndup(create->logmsg,
1319 create->refs->write_options.block_size / 2);
1320 log.value.update.new_hash = new_oid.hash;
1321 if (refs_resolve_ref_unsafe(&create->refs->base, create->refname,
1322 RESOLVE_REF_READING, &old_oid, NULL))
1323 log.value.update.old_hash = old_oid.hash;
1324
1325 ret = reftable_writer_add_log(writer, &log);
1326 clear_reftable_log_record(&log);
1327 return ret;
1328 }
1329
1330 static int reftable_be_create_symref(struct ref_store *ref_store,
1331 const char *refname,
1332 const char *target,
1333 const char *logmsg)
1334 {
1335 struct reftable_ref_store *refs =
1336 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_symref");
1337 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1338 struct write_create_symref_arg arg = {
1339 .refs = refs,
1340 .stack = stack,
1341 .refname = refname,
1342 .target = target,
1343 .logmsg = logmsg,
1344 };
1345 int ret;
1346
1347 ret = refs->err;
1348 if (ret < 0)
1349 goto done;
1350
1351 ret = reftable_stack_reload(stack);
1352 if (ret)
1353 goto done;
1354
1355 ret = reftable_stack_add(stack, &write_create_symref_table, &arg);
1356
1357 done:
1358 assert(ret != REFTABLE_API_ERROR);
1359 if (ret)
1360 error("unable to write symref for %s: %s", refname,
1361 reftable_error_str(ret));
1362 return ret;
1363 }
1364
1365 struct write_copy_arg {
1366 struct reftable_ref_store *refs;
1367 struct reftable_stack *stack;
1368 const char *oldname;
1369 const char *newname;
1370 const char *logmsg;
1371 int delete_old;
1372 };
1373
1374 static int write_copy_table(struct reftable_writer *writer, void *cb_data)
1375 {
1376 struct write_copy_arg *arg = cb_data;
1377 uint64_t deletion_ts, creation_ts;
1378 struct reftable_merged_table *mt = reftable_stack_merged_table(arg->stack);
1379 struct reftable_ref_record old_ref = {0}, refs[2] = {0};
1380 struct reftable_log_record old_log = {0}, *logs = NULL;
1381 struct reftable_iterator it = {0};
1382 struct string_list skip = STRING_LIST_INIT_NODUP;
1383 struct strbuf errbuf = STRBUF_INIT;
1384 size_t logs_nr = 0, logs_alloc = 0, i;
1385 int ret;
1386
1387 if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
1388 ret = error(_("refname %s not found"), arg->oldname);
1389 goto done;
1390 }
1391 if (old_ref.value_type == REFTABLE_REF_SYMREF) {
1392 ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
1393 arg->oldname);
1394 goto done;
1395 }
1396
1397 /*
1398 * There's nothing to do in case the old and new name are the same, so
1399 * we exit early in that case.
1400 */
1401 if (!strcmp(arg->oldname, arg->newname)) {
1402 ret = 0;
1403 goto done;
1404 }
1405
1406 /*
1407 * Verify that the new refname is available.
1408 */
1409 string_list_insert(&skip, arg->oldname);
1410 ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
1411 NULL, &skip, &errbuf);
1412 if (ret < 0) {
1413 error("%s", errbuf.buf);
1414 goto done;
1415 }
1416
1417 /*
1418 * When deleting the old reference we have to use two update indices:
1419 * once to delete the old ref and its reflog, and once to create the
1420 * new ref and its reflog. They need to be staged with two separate
1421 * indices because the new reflog needs to encode both the deletion of
1422 * the old branch and the creation of the new branch, and we cannot do
1423 * two changes to a reflog in a single update.
1424 */
1425 deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
1426 if (arg->delete_old)
1427 creation_ts++;
1428 reftable_writer_set_limits(writer, deletion_ts, creation_ts);
1429
1430 /*
1431 * Add the new reference. If this is a rename then we also delete the
1432 * old reference.
1433 */
1434 refs[0] = old_ref;
1435 refs[0].refname = (char *)arg->newname;
1436 refs[0].update_index = creation_ts;
1437 if (arg->delete_old) {
1438 refs[1].refname = (char *)arg->oldname;
1439 refs[1].value_type = REFTABLE_REF_DELETION;
1440 refs[1].update_index = deletion_ts;
1441 }
1442 ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
1443 if (ret < 0)
1444 goto done;
1445
1446 /*
1447 * When deleting the old branch we need to create a reflog entry on the
1448 * new branch name that indicates that the old branch has been deleted
1449 * and then recreated. This is a tad weird, but matches what the files
1450 * backend does.
1451 */
1452 if (arg->delete_old) {
1453 struct strbuf head_referent = STRBUF_INIT;
1454 struct object_id head_oid;
1455 int append_head_reflog;
1456 unsigned head_type = 0;
1457
1458 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1459 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1460 fill_reftable_log_record(&logs[logs_nr]);
1461 logs[logs_nr].refname = (char *)arg->newname;
1462 logs[logs_nr].update_index = deletion_ts;
1463 logs[logs_nr].value.update.message =
1464 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1465 logs[logs_nr].value.update.old_hash = old_ref.value.val1;
1466 logs_nr++;
1467
1468 ret = read_ref_without_reload(arg->stack, "HEAD", &head_oid, &head_referent, &head_type);
1469 if (ret < 0)
1470 goto done;
1471 append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
1472 strbuf_release(&head_referent);
1473
1474 /*
1475 * The files backend uses `refs_delete_ref()` to delete the old
1476 * branch name, which will append a reflog entry for HEAD in
1477 * case it points to the old branch.
1478 */
1479 if (append_head_reflog) {
1480 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1481 logs[logs_nr] = logs[logs_nr - 1];
1482 logs[logs_nr].refname = "HEAD";
1483 logs_nr++;
1484 }
1485 }
1486
1487 /*
1488 * Create the reflog entry for the newly created branch.
1489 */
1490 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1491 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1492 fill_reftable_log_record(&logs[logs_nr]);
1493 logs[logs_nr].refname = (char *)arg->newname;
1494 logs[logs_nr].update_index = creation_ts;
1495 logs[logs_nr].value.update.message =
1496 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1497 logs[logs_nr].value.update.new_hash = old_ref.value.val1;
1498 logs_nr++;
1499
1500 /*
1501 * In addition to writing the reflog entry for the new branch, we also
1502 * copy over all log entries from the old reflog. Last but not least,
1503 * when renaming we also have to delete all the old reflog entries.
1504 */
1505 ret = reftable_merged_table_seek_log(mt, &it, arg->oldname);
1506 if (ret < 0)
1507 goto done;
1508
1509 while (1) {
1510 ret = reftable_iterator_next_log(&it, &old_log);
1511 if (ret < 0)
1512 goto done;
1513 if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
1514 ret = 0;
1515 break;
1516 }
1517
1518 free(old_log.refname);
1519
1520 /*
1521 * Copy over the old reflog entry with the new refname.
1522 */
1523 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1524 logs[logs_nr] = old_log;
1525 logs[logs_nr].refname = (char *)arg->newname;
1526 logs_nr++;
1527
1528 /*
1529 * Delete the old reflog entry in case we are renaming.
1530 */
1531 if (arg->delete_old) {
1532 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1533 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1534 logs[logs_nr].refname = (char *)arg->oldname;
1535 logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
1536 logs[logs_nr].update_index = old_log.update_index;
1537 logs_nr++;
1538 }
1539
1540 /*
1541 * Transfer ownership of the log record we're iterating over to
1542 * the array of log records. Otherwise, the pointers would get
1543 * free'd or reallocated by the iterator.
1544 */
1545 memset(&old_log, 0, sizeof(old_log));
1546 }
1547
1548 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1549 if (ret < 0)
1550 goto done;
1551
1552 done:
1553 assert(ret != REFTABLE_API_ERROR);
1554 reftable_iterator_destroy(&it);
1555 string_list_clear(&skip, 0);
1556 strbuf_release(&errbuf);
1557 for (i = 0; i < logs_nr; i++) {
1558 if (!strcmp(logs[i].refname, "HEAD"))
1559 continue;
1560 if (logs[i].value.update.old_hash == old_ref.value.val1)
1561 logs[i].value.update.old_hash = NULL;
1562 if (logs[i].value.update.new_hash == old_ref.value.val1)
1563 logs[i].value.update.new_hash = NULL;
1564 logs[i].refname = NULL;
1565 reftable_log_record_release(&logs[i]);
1566 }
1567 free(logs);
1568 reftable_ref_record_release(&old_ref);
1569 reftable_log_record_release(&old_log);
1570 return ret;
1571 }
1572
1573 static int reftable_be_rename_ref(struct ref_store *ref_store,
1574 const char *oldrefname,
1575 const char *newrefname,
1576 const char *logmsg)
1577 {
1578 struct reftable_ref_store *refs =
1579 reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1580 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1581 struct write_copy_arg arg = {
1582 .refs = refs,
1583 .stack = stack,
1584 .oldname = oldrefname,
1585 .newname = newrefname,
1586 .logmsg = logmsg,
1587 .delete_old = 1,
1588 };
1589 int ret;
1590
1591 ret = refs->err;
1592 if (ret < 0)
1593 goto done;
1594
1595 ret = reftable_stack_reload(stack);
1596 if (ret)
1597 goto done;
1598 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1599
1600 done:
1601 assert(ret != REFTABLE_API_ERROR);
1602 return ret;
1603 }
1604
1605 static int reftable_be_copy_ref(struct ref_store *ref_store,
1606 const char *oldrefname,
1607 const char *newrefname,
1608 const char *logmsg)
1609 {
1610 struct reftable_ref_store *refs =
1611 reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
1612 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1613 struct write_copy_arg arg = {
1614 .refs = refs,
1615 .stack = stack,
1616 .oldname = oldrefname,
1617 .newname = newrefname,
1618 .logmsg = logmsg,
1619 };
1620 int ret;
1621
1622 ret = refs->err;
1623 if (ret < 0)
1624 goto done;
1625
1626 ret = reftable_stack_reload(stack);
1627 if (ret)
1628 goto done;
1629 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1630
1631 done:
1632 assert(ret != REFTABLE_API_ERROR);
1633 return ret;
1634 }
1635
1636 struct reftable_reflog_iterator {
1637 struct ref_iterator base;
1638 struct reftable_ref_store *refs;
1639 struct reftable_iterator iter;
1640 struct reftable_log_record log;
1641 struct object_id oid;
1642 char *last_name;
1643 int err;
1644 };
1645
1646 static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
1647 {
1648 struct reftable_reflog_iterator *iter =
1649 (struct reftable_reflog_iterator *)ref_iterator;
1650
1651 while (!iter->err) {
1652 int flags;
1653
1654 iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
1655 if (iter->err)
1656 break;
1657
1658 /*
1659 * We want the refnames that we have reflogs for, so we skip if
1660 * we've already produced this name. This could be faster by
1661 * seeking directly to reflog@update_index==0.
1662 */
1663 if (iter->last_name && !strcmp(iter->log.refname, iter->last_name))
1664 continue;
1665
1666 if (!refs_resolve_ref_unsafe(&iter->refs->base, iter->log.refname,
1667 0, &iter->oid, &flags)) {
1668 error(_("bad ref for %s"), iter->log.refname);
1669 continue;
1670 }
1671
1672 free(iter->last_name);
1673 iter->last_name = xstrdup(iter->log.refname);
1674 iter->base.refname = iter->log.refname;
1675 iter->base.oid = &iter->oid;
1676 iter->base.flags = flags;
1677
1678 break;
1679 }
1680
1681 if (iter->err > 0) {
1682 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
1683 return ITER_ERROR;
1684 return ITER_DONE;
1685 }
1686
1687 if (iter->err < 0) {
1688 ref_iterator_abort(ref_iterator);
1689 return ITER_ERROR;
1690 }
1691
1692 return ITER_OK;
1693 }
1694
1695 static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator,
1696 struct object_id *peeled)
1697 {
1698 BUG("reftable reflog iterator cannot be peeled");
1699 return -1;
1700 }
1701
1702 static int reftable_reflog_iterator_abort(struct ref_iterator *ref_iterator)
1703 {
1704 struct reftable_reflog_iterator *iter =
1705 (struct reftable_reflog_iterator *)ref_iterator;
1706 reftable_log_record_release(&iter->log);
1707 reftable_iterator_destroy(&iter->iter);
1708 free(iter->last_name);
1709 free(iter);
1710 return ITER_DONE;
1711 }
1712
1713 static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
1714 .advance = reftable_reflog_iterator_advance,
1715 .peel = reftable_reflog_iterator_peel,
1716 .abort = reftable_reflog_iterator_abort
1717 };
1718
1719 static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
1720 struct reftable_stack *stack)
1721 {
1722 struct reftable_merged_table *merged_table;
1723 struct reftable_reflog_iterator *iter;
1724 int ret;
1725
1726 iter = xcalloc(1, sizeof(*iter));
1727 base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable, 1);
1728 iter->refs = refs;
1729 iter->base.oid = &iter->oid;
1730
1731 ret = refs->err;
1732 if (ret)
1733 goto done;
1734
1735 ret = reftable_stack_reload(refs->main_stack);
1736 if (ret < 0)
1737 goto done;
1738
1739 merged_table = reftable_stack_merged_table(stack);
1740
1741 ret = reftable_merged_table_seek_log(merged_table, &iter->iter, "");
1742 if (ret < 0)
1743 goto done;
1744
1745 done:
1746 iter->err = ret;
1747 return iter;
1748 }
1749
1750 static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
1751 {
1752 struct reftable_ref_store *refs =
1753 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
1754 struct reftable_reflog_iterator *main_iter, *worktree_iter;
1755
1756 main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
1757 if (!refs->worktree_stack)
1758 return &main_iter->base;
1759
1760 worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
1761
1762 return merge_ref_iterator_begin(1, &worktree_iter->base, &main_iter->base,
1763 iterator_select, NULL);
1764 }
1765
1766 static int yield_log_record(struct reftable_log_record *log,
1767 each_reflog_ent_fn fn,
1768 void *cb_data)
1769 {
1770 struct object_id old_oid, new_oid;
1771 const char *full_committer;
1772
1773 oidread(&old_oid, log->value.update.old_hash);
1774 oidread(&new_oid, log->value.update.new_hash);
1775
1776 /*
1777 * When both the old object ID and the new object ID are null
1778 * then this is the reflog existence marker. The caller must
1779 * not be aware of it.
1780 */
1781 if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
1782 return 0;
1783
1784 full_committer = fmt_ident(log->value.update.name, log->value.update.email,
1785 WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
1786 return fn(&old_oid, &new_oid, full_committer,
1787 log->value.update.time, log->value.update.tz_offset,
1788 log->value.update.message, cb_data);
1789 }
1790
1791 static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1792 const char *refname,
1793 each_reflog_ent_fn fn,
1794 void *cb_data)
1795 {
1796 struct reftable_ref_store *refs =
1797 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
1798 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1799 struct reftable_merged_table *mt = NULL;
1800 struct reftable_log_record log = {0};
1801 struct reftable_iterator it = {0};
1802 int ret;
1803
1804 if (refs->err < 0)
1805 return refs->err;
1806
1807 mt = reftable_stack_merged_table(stack);
1808 ret = reftable_merged_table_seek_log(mt, &it, refname);
1809 while (!ret) {
1810 ret = reftable_iterator_next_log(&it, &log);
1811 if (ret < 0)
1812 break;
1813 if (ret > 0 || strcmp(log.refname, refname)) {
1814 ret = 0;
1815 break;
1816 }
1817
1818 ret = yield_log_record(&log, fn, cb_data);
1819 if (ret)
1820 break;
1821 }
1822
1823 reftable_log_record_release(&log);
1824 reftable_iterator_destroy(&it);
1825 return ret;
1826 }
1827
1828 static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
1829 const char *refname,
1830 each_reflog_ent_fn fn,
1831 void *cb_data)
1832 {
1833 struct reftable_ref_store *refs =
1834 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
1835 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1836 struct reftable_merged_table *mt = NULL;
1837 struct reftable_log_record *logs = NULL;
1838 struct reftable_iterator it = {0};
1839 size_t logs_alloc = 0, logs_nr = 0, i;
1840 int ret;
1841
1842 if (refs->err < 0)
1843 return refs->err;
1844
1845 mt = reftable_stack_merged_table(stack);
1846 ret = reftable_merged_table_seek_log(mt, &it, refname);
1847 while (!ret) {
1848 struct reftable_log_record log = {0};
1849
1850 ret = reftable_iterator_next_log(&it, &log);
1851 if (ret < 0)
1852 goto done;
1853 if (ret > 0 || strcmp(log.refname, refname)) {
1854 reftable_log_record_release(&log);
1855 ret = 0;
1856 break;
1857 }
1858
1859 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1860 logs[logs_nr++] = log;
1861 }
1862
1863 for (i = logs_nr; i--;) {
1864 ret = yield_log_record(&logs[i], fn, cb_data);
1865 if (ret)
1866 goto done;
1867 }
1868
1869 done:
1870 reftable_iterator_destroy(&it);
1871 for (i = 0; i < logs_nr; i++)
1872 reftable_log_record_release(&logs[i]);
1873 free(logs);
1874 return ret;
1875 }
1876
1877 static int reftable_be_reflog_exists(struct ref_store *ref_store,
1878 const char *refname)
1879 {
1880 struct reftable_ref_store *refs =
1881 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1882 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1883 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
1884 struct reftable_log_record log = {0};
1885 struct reftable_iterator it = {0};
1886 int ret;
1887
1888 ret = refs->err;
1889 if (ret < 0)
1890 goto done;
1891
1892 ret = reftable_stack_reload(stack);
1893 if (ret < 0)
1894 goto done;
1895
1896 ret = reftable_merged_table_seek_log(mt, &it, refname);
1897 if (ret < 0)
1898 goto done;
1899
1900 /*
1901 * Check whether we get at least one log record for the given ref name.
1902 * If so, the reflog exists, otherwise it doesn't.
1903 */
1904 ret = reftable_iterator_next_log(&it, &log);
1905 if (ret < 0)
1906 goto done;
1907 if (ret > 0) {
1908 ret = 0;
1909 goto done;
1910 }
1911
1912 ret = strcmp(log.refname, refname) == 0;
1913
1914 done:
1915 reftable_iterator_destroy(&it);
1916 reftable_log_record_release(&log);
1917 if (ret < 0)
1918 ret = 0;
1919 return ret;
1920 }
1921
1922 struct write_reflog_existence_arg {
1923 struct reftable_ref_store *refs;
1924 const char *refname;
1925 struct reftable_stack *stack;
1926 };
1927
1928 static int write_reflog_existence_table(struct reftable_writer *writer,
1929 void *cb_data)
1930 {
1931 struct write_reflog_existence_arg *arg = cb_data;
1932 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1933 struct reftable_log_record log = {0};
1934 int ret;
1935
1936 ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
1937 if (ret <= 0)
1938 goto done;
1939
1940 reftable_writer_set_limits(writer, ts, ts);
1941
1942 /*
1943 * The existence entry has both old and new object ID set to the the
1944 * null object ID. Our iterators are aware of this and will not present
1945 * them to their callers.
1946 */
1947 log.refname = xstrdup(arg->refname);
1948 log.update_index = ts;
1949 log.value_type = REFTABLE_LOG_UPDATE;
1950 ret = reftable_writer_add_log(writer, &log);
1951
1952 done:
1953 assert(ret != REFTABLE_API_ERROR);
1954 reftable_log_record_release(&log);
1955 return ret;
1956 }
1957
1958 static int reftable_be_create_reflog(struct ref_store *ref_store,
1959 const char *refname,
1960 struct strbuf *errmsg)
1961 {
1962 struct reftable_ref_store *refs =
1963 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1964 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1965 struct write_reflog_existence_arg arg = {
1966 .refs = refs,
1967 .stack = stack,
1968 .refname = refname,
1969 };
1970 int ret;
1971
1972 ret = refs->err;
1973 if (ret < 0)
1974 goto done;
1975
1976 ret = reftable_stack_reload(stack);
1977 if (ret)
1978 goto done;
1979
1980 ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
1981
1982 done:
1983 return ret;
1984 }
1985
1986 struct write_reflog_delete_arg {
1987 struct reftable_stack *stack;
1988 const char *refname;
1989 };
1990
1991 static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
1992 {
1993 struct write_reflog_delete_arg *arg = cb_data;
1994 struct reftable_merged_table *mt =
1995 reftable_stack_merged_table(arg->stack);
1996 struct reftable_log_record log = {0}, tombstone = {0};
1997 struct reftable_iterator it = {0};
1998 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1999 int ret;
2000
2001 reftable_writer_set_limits(writer, ts, ts);
2002
2003 /*
2004 * In order to delete a table we need to delete all reflog entries one
2005 * by one. This is inefficient, but the reftable format does not have a
2006 * better marker right now.
2007 */
2008 ret = reftable_merged_table_seek_log(mt, &it, arg->refname);
2009 while (ret == 0) {
2010 ret = reftable_iterator_next_log(&it, &log);
2011 if (ret < 0)
2012 break;
2013 if (ret > 0 || strcmp(log.refname, arg->refname)) {
2014 ret = 0;
2015 break;
2016 }
2017
2018 tombstone.refname = (char *)arg->refname;
2019 tombstone.value_type = REFTABLE_LOG_DELETION;
2020 tombstone.update_index = log.update_index;
2021
2022 ret = reftable_writer_add_log(writer, &tombstone);
2023 }
2024
2025 reftable_log_record_release(&log);
2026 reftable_iterator_destroy(&it);
2027 return ret;
2028 }
2029
2030 static int reftable_be_delete_reflog(struct ref_store *ref_store,
2031 const char *refname)
2032 {
2033 struct reftable_ref_store *refs =
2034 reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
2035 struct reftable_stack *stack = stack_for(refs, refname, &refname);
2036 struct write_reflog_delete_arg arg = {
2037 .stack = stack,
2038 .refname = refname,
2039 };
2040 int ret;
2041
2042 ret = reftable_stack_reload(stack);
2043 if (ret)
2044 return ret;
2045 ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
2046
2047 assert(ret != REFTABLE_API_ERROR);
2048 return ret;
2049 }
2050
2051 struct reflog_expiry_arg {
2052 struct reftable_stack *stack;
2053 struct reftable_log_record *records;
2054 struct object_id update_oid;
2055 const char *refname;
2056 size_t len;
2057 };
2058
2059 static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
2060 {
2061 struct reflog_expiry_arg *arg = cb_data;
2062 uint64_t ts = reftable_stack_next_update_index(arg->stack);
2063 uint64_t live_records = 0;
2064 size_t i;
2065 int ret;
2066
2067 for (i = 0; i < arg->len; i++)
2068 if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
2069 live_records++;
2070
2071 reftable_writer_set_limits(writer, ts, ts);
2072
2073 if (!is_null_oid(&arg->update_oid)) {
2074 struct reftable_ref_record ref = {0};
2075 struct object_id peeled;
2076
2077 ref.refname = (char *)arg->refname;
2078 ref.update_index = ts;
2079
2080 if (!peel_object(&arg->update_oid, &peeled)) {
2081 ref.value_type = REFTABLE_REF_VAL2;
2082 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
2083 memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
2084 } else {
2085 ref.value_type = REFTABLE_REF_VAL1;
2086 memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
2087 }
2088
2089 ret = reftable_writer_add_ref(writer, &ref);
2090 if (ret < 0)
2091 return ret;
2092 }
2093
2094 /*
2095 * When there are no more entries left in the reflog we empty it
2096 * completely, but write a placeholder reflog entry that indicates that
2097 * the reflog still exists.
2098 */
2099 if (!live_records) {
2100 struct reftable_log_record log = {
2101 .refname = (char *)arg->refname,
2102 .value_type = REFTABLE_LOG_UPDATE,
2103 .update_index = ts,
2104 };
2105
2106 ret = reftable_writer_add_log(writer, &log);
2107 if (ret)
2108 return ret;
2109 }
2110
2111 for (i = 0; i < arg->len; i++) {
2112 ret = reftable_writer_add_log(writer, &arg->records[i]);
2113 if (ret)
2114 return ret;
2115 }
2116
2117 return 0;
2118 }
2119
2120 static int reftable_be_reflog_expire(struct ref_store *ref_store,
2121 const char *refname,
2122 unsigned int flags,
2123 reflog_expiry_prepare_fn prepare_fn,
2124 reflog_expiry_should_prune_fn should_prune_fn,
2125 reflog_expiry_cleanup_fn cleanup_fn,
2126 void *policy_cb_data)
2127 {
2128 /*
2129 * For log expiry, we write tombstones for every single reflog entry
2130 * that is to be expired. This means that the entries are still
2131 * retrievable by delving into the stack, and expiring entries
2132 * paradoxically takes extra memory. This memory is only reclaimed when
2133 * compacting the reftable stack.
2134 *
2135 * It would be better if the refs backend supported an API that sets a
2136 * criterion for all refs, passing the criterion to pack_refs().
2137 *
2138 * On the plus side, because we do the expiration per ref, we can easily
2139 * insert the reflog existence dummies.
2140 */
2141 struct reftable_ref_store *refs =
2142 reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2143 struct reftable_stack *stack = stack_for(refs, refname, &refname);
2144 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
2145 struct reftable_log_record *logs = NULL;
2146 struct reftable_log_record *rewritten = NULL;
2147 struct reftable_ref_record ref_record = {0};
2148 struct reftable_iterator it = {0};
2149 struct reftable_addition *add = NULL;
2150 struct reflog_expiry_arg arg = {0};
2151 struct object_id oid = {0};
2152 uint8_t *last_hash = NULL;
2153 size_t logs_nr = 0, logs_alloc = 0, i;
2154 int ret;
2155
2156 if (refs->err < 0)
2157 return refs->err;
2158
2159 ret = reftable_stack_reload(stack);
2160 if (ret < 0)
2161 goto done;
2162
2163 ret = reftable_merged_table_seek_log(mt, &it, refname);
2164 if (ret < 0)
2165 goto done;
2166
2167 ret = reftable_stack_new_addition(&add, stack);
2168 if (ret < 0)
2169 goto done;
2170
2171 ret = reftable_stack_read_ref(stack, refname, &ref_record);
2172 if (ret < 0)
2173 goto done;
2174 if (reftable_ref_record_val1(&ref_record))
2175 oidread(&oid, reftable_ref_record_val1(&ref_record));
2176 prepare_fn(refname, &oid, policy_cb_data);
2177
2178 while (1) {
2179 struct reftable_log_record log = {0};
2180 struct object_id old_oid, new_oid;
2181
2182 ret = reftable_iterator_next_log(&it, &log);
2183 if (ret < 0)
2184 goto done;
2185 if (ret > 0 || strcmp(log.refname, refname)) {
2186 reftable_log_record_release(&log);
2187 break;
2188 }
2189
2190 oidread(&old_oid, log.value.update.old_hash);
2191 oidread(&new_oid, log.value.update.new_hash);
2192
2193 /*
2194 * Skip over the reflog existence marker. We will add it back
2195 * in when there are no live reflog records.
2196 */
2197 if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
2198 reftable_log_record_release(&log);
2199 continue;
2200 }
2201
2202 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2203 logs[logs_nr++] = log;
2204 }
2205
2206 /*
2207 * We need to rewrite all reflog entries according to the pruning
2208 * callback function:
2209 *
2210 * - If a reflog entry shall be pruned we mark the record for
2211 * deletion.
2212 *
2213 * - Otherwise we may have to rewrite the chain of reflog entries so
2214 * that gaps created by just-deleted records get backfilled.
2215 */
2216 CALLOC_ARRAY(rewritten, logs_nr);
2217 for (i = logs_nr; i--;) {
2218 struct reftable_log_record *dest = &rewritten[i];
2219 struct object_id old_oid, new_oid;
2220
2221 *dest = logs[i];
2222 oidread(&old_oid, logs[i].value.update.old_hash);
2223 oidread(&new_oid, logs[i].value.update.new_hash);
2224
2225 if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
2226 (timestamp_t)logs[i].value.update.time,
2227 logs[i].value.update.tz_offset,
2228 logs[i].value.update.message,
2229 policy_cb_data)) {
2230 dest->value_type = REFTABLE_LOG_DELETION;
2231 } else {
2232 if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
2233 dest->value.update.old_hash = last_hash;
2234 last_hash = logs[i].value.update.new_hash;
2235 }
2236 }
2237
2238 if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
2239 reftable_ref_record_val1(&ref_record))
2240 oidread(&arg.update_oid, last_hash);
2241
2242 arg.records = rewritten;
2243 arg.len = logs_nr;
2244 arg.stack = stack,
2245 arg.refname = refname,
2246
2247 ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
2248 if (ret < 0)
2249 goto done;
2250
2251 /*
2252 * Future improvement: we could skip writing records that were
2253 * not changed.
2254 */
2255 if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
2256 ret = reftable_addition_commit(add);
2257
2258 done:
2259 if (add)
2260 cleanup_fn(policy_cb_data);
2261 assert(ret != REFTABLE_API_ERROR);
2262
2263 reftable_ref_record_release(&ref_record);
2264 reftable_iterator_destroy(&it);
2265 reftable_addition_destroy(add);
2266 for (i = 0; i < logs_nr; i++)
2267 reftable_log_record_release(&logs[i]);
2268 free(logs);
2269 free(rewritten);
2270 return ret;
2271 }
2272
2273 struct ref_storage_be refs_be_reftable = {
2274 .name = "reftable",
2275 .init = reftable_be_init,
2276 .init_db = reftable_be_init_db,
2277 .transaction_prepare = reftable_be_transaction_prepare,
2278 .transaction_finish = reftable_be_transaction_finish,
2279 .transaction_abort = reftable_be_transaction_abort,
2280 .initial_transaction_commit = reftable_be_initial_transaction_commit,
2281
2282 .pack_refs = reftable_be_pack_refs,
2283 .create_symref = reftable_be_create_symref,
2284 .rename_ref = reftable_be_rename_ref,
2285 .copy_ref = reftable_be_copy_ref,
2286
2287 .iterator_begin = reftable_be_iterator_begin,
2288 .read_raw_ref = reftable_be_read_raw_ref,
2289 .read_symbolic_ref = reftable_be_read_symbolic_ref,
2290
2291 .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
2292 .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
2293 .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
2294 .reflog_exists = reftable_be_reflog_exists,
2295 .create_reflog = reftable_be_create_reflog,
2296 .delete_reflog = reftable_be_delete_reflog,
2297 .reflog_expire = reftable_be_reflog_expire,
2298 };