]> git.ipfire.org Git - thirdparty/git.git/blob - refs/reftable-backend.c
Merge branch 'jt/commit-redundant-scissors-fix'
[thirdparty/git.git] / refs / reftable-backend.c
1 #include "../git-compat-util.h"
2 #include "../abspath.h"
3 #include "../chdir-notify.h"
4 #include "../environment.h"
5 #include "../gettext.h"
6 #include "../hash.h"
7 #include "../hex.h"
8 #include "../iterator.h"
9 #include "../ident.h"
10 #include "../lockfile.h"
11 #include "../object.h"
12 #include "../path.h"
13 #include "../refs.h"
14 #include "../reftable/reftable-stack.h"
15 #include "../reftable/reftable-record.h"
16 #include "../reftable/reftable-error.h"
17 #include "../reftable/reftable-iterator.h"
18 #include "../reftable/reftable-merged.h"
19 #include "../setup.h"
20 #include "../strmap.h"
21 #include "refs-internal.h"
22
23 /*
24 * Used as a flag in ref_update::flags when the ref_update was via an
25 * update to HEAD.
26 */
27 #define REF_UPDATE_VIA_HEAD (1 << 8)
28
29 struct reftable_ref_store {
30 struct ref_store base;
31
32 /*
33 * The main stack refers to the common dir and thus contains common
34 * refs as well as refs of the main repository.
35 */
36 struct reftable_stack *main_stack;
37 /*
38 * The worktree stack refers to the gitdir in case the refdb is opened
39 * via a worktree. It thus contains the per-worktree refs.
40 */
41 struct reftable_stack *worktree_stack;
42 /*
43 * Map of worktree stacks by their respective worktree names. The map
44 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
45 */
46 struct strmap worktree_stacks;
47 struct reftable_write_options write_options;
48
49 unsigned int store_flags;
50 int err;
51 };
52
53 /*
54 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
55 * reftable_ref_store. required_flags is compared with ref_store's store_flags
56 * to ensure the ref_store has all required capabilities. "caller" is used in
57 * any necessary error messages.
58 */
59 static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
60 unsigned int required_flags,
61 const char *caller)
62 {
63 struct reftable_ref_store *refs;
64
65 if (ref_store->be != &refs_be_reftable)
66 BUG("ref_store is type \"%s\" not \"reftables\" in %s",
67 ref_store->be->name, caller);
68
69 refs = (struct reftable_ref_store *)ref_store;
70
71 if ((refs->store_flags & required_flags) != required_flags)
72 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
73 caller, required_flags, refs->store_flags);
74
75 return refs;
76 }
77
78 /*
79 * Some refs are global to the repository (refs/heads/{*}), while others are
80 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
81 * multiple separate databases (ie. multiple reftable/ directories), one for
82 * the shared refs, one for the current worktree refs, and one for each
83 * additional worktree. For reading, we merge the view of both the shared and
84 * the current worktree's refs, when necessary.
85 *
86 * This function also optionally assigns the rewritten reference name that is
87 * local to the stack. This translation is required when using worktree refs
88 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
89 * those references in their normalized form.
90 */
91 static struct reftable_stack *stack_for(struct reftable_ref_store *store,
92 const char *refname,
93 const char **rewritten_ref)
94 {
95 const char *wtname;
96 int wtname_len;
97
98 if (!refname)
99 return store->main_stack;
100
101 switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
102 case REF_WORKTREE_OTHER: {
103 static struct strbuf wtname_buf = STRBUF_INIT;
104 struct strbuf wt_dir = STRBUF_INIT;
105 struct reftable_stack *stack;
106
107 /*
108 * We're using a static buffer here so that we don't need to
109 * allocate the worktree name whenever we look up a reference.
110 * This could be avoided if the strmap interface knew how to
111 * handle keys with a length.
112 */
113 strbuf_reset(&wtname_buf);
114 strbuf_add(&wtname_buf, wtname, wtname_len);
115
116 /*
117 * There is an edge case here: when the worktree references the
118 * current worktree, then we set up the stack once via
119 * `worktree_stacks` and once via `worktree_stack`. This is
120 * wasteful, but in the reading case it shouldn't matter. And
121 * in the writing case we would notice that the stack is locked
122 * already and error out when trying to write a reference via
123 * both stacks.
124 */
125 stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
126 if (!stack) {
127 strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
128 store->base.repo->commondir, wtname_buf.buf);
129
130 store->err = reftable_new_stack(&stack, wt_dir.buf,
131 store->write_options);
132 assert(store->err != REFTABLE_API_ERROR);
133 strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
134 }
135
136 strbuf_release(&wt_dir);
137 return stack;
138 }
139 case REF_WORKTREE_CURRENT:
140 /*
141 * If there is no worktree stack then we're currently in the
142 * main worktree. We thus return the main stack in that case.
143 */
144 if (!store->worktree_stack)
145 return store->main_stack;
146 return store->worktree_stack;
147 case REF_WORKTREE_MAIN:
148 case REF_WORKTREE_SHARED:
149 return store->main_stack;
150 default:
151 BUG("unhandled worktree reference type");
152 }
153 }
154
155 static int should_write_log(struct ref_store *refs, const char *refname)
156 {
157 if (log_all_ref_updates == LOG_REFS_UNSET)
158 log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
159
160 switch (log_all_ref_updates) {
161 case LOG_REFS_NONE:
162 return refs_reflog_exists(refs, refname);
163 case LOG_REFS_ALWAYS:
164 return 1;
165 case LOG_REFS_NORMAL:
166 if (should_autocreate_reflog(refname))
167 return 1;
168 return refs_reflog_exists(refs, refname);
169 default:
170 BUG("unhandled core.logAllRefUpdates value %d", log_all_ref_updates);
171 }
172 }
173
174 static void clear_reftable_log_record(struct reftable_log_record *log)
175 {
176 switch (log->value_type) {
177 case REFTABLE_LOG_UPDATE:
178 /*
179 * When we write log records, the hashes are owned by the
180 * caller and thus shouldn't be free'd.
181 */
182 log->value.update.old_hash = NULL;
183 log->value.update.new_hash = NULL;
184 break;
185 case REFTABLE_LOG_DELETION:
186 break;
187 }
188 reftable_log_record_release(log);
189 }
190
191 static void fill_reftable_log_record(struct reftable_log_record *log)
192 {
193 const char *info = git_committer_info(0);
194 struct ident_split split = {0};
195 int sign = 1;
196
197 if (split_ident_line(&split, info, strlen(info)))
198 BUG("failed splitting committer info");
199
200 reftable_log_record_release(log);
201 log->value_type = REFTABLE_LOG_UPDATE;
202 log->value.update.name =
203 xstrndup(split.name_begin, split.name_end - split.name_begin);
204 log->value.update.email =
205 xstrndup(split.mail_begin, split.mail_end - split.mail_begin);
206 log->value.update.time = atol(split.date_begin);
207 if (*split.tz_begin == '-') {
208 sign = -1;
209 split.tz_begin++;
210 }
211 if (*split.tz_begin == '+') {
212 sign = 1;
213 split.tz_begin++;
214 }
215
216 log->value.update.tz_offset = sign * atoi(split.tz_begin);
217 }
218
219 static int read_ref_without_reload(struct reftable_stack *stack,
220 const char *refname,
221 struct object_id *oid,
222 struct strbuf *referent,
223 unsigned int *type)
224 {
225 struct reftable_ref_record ref = {0};
226 int ret;
227
228 ret = reftable_stack_read_ref(stack, refname, &ref);
229 if (ret)
230 goto done;
231
232 if (ref.value_type == REFTABLE_REF_SYMREF) {
233 strbuf_reset(referent);
234 strbuf_addstr(referent, ref.value.symref);
235 *type |= REF_ISSYMREF;
236 } else if (reftable_ref_record_val1(&ref)) {
237 oidread(oid, reftable_ref_record_val1(&ref));
238 } else {
239 /* We got a tombstone, which should not happen. */
240 BUG("unhandled reference value type %d", ref.value_type);
241 }
242
243 done:
244 assert(ret != REFTABLE_API_ERROR);
245 reftable_ref_record_release(&ref);
246 return ret;
247 }
248
249 static struct ref_store *reftable_be_init(struct repository *repo,
250 const char *gitdir,
251 unsigned int store_flags)
252 {
253 struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
254 struct strbuf path = STRBUF_INIT;
255 int is_worktree;
256 mode_t mask;
257
258 mask = umask(0);
259 umask(mask);
260
261 base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
262 strmap_init(&refs->worktree_stacks);
263 refs->store_flags = store_flags;
264 refs->write_options.block_size = 4096;
265 refs->write_options.hash_id = repo->hash_algo->format_id;
266 refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
267
268 /*
269 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
270 * This stack contains both the shared and the main worktree refs.
271 *
272 * Note that we don't try to resolve the path in case we have a
273 * worktree because `get_common_dir_noenv()` already does it for us.
274 */
275 is_worktree = get_common_dir_noenv(&path, gitdir);
276 if (!is_worktree) {
277 strbuf_reset(&path);
278 strbuf_realpath(&path, gitdir, 0);
279 }
280 strbuf_addstr(&path, "/reftable");
281 refs->err = reftable_new_stack(&refs->main_stack, path.buf,
282 refs->write_options);
283 if (refs->err)
284 goto done;
285
286 /*
287 * If we're in a worktree we also need to set up the worktree reftable
288 * stack that is contained in the per-worktree GIT_DIR.
289 *
290 * Ideally, we would also add the stack to our worktree stack map. But
291 * we have no way to figure out the worktree name here and thus can't
292 * do it efficiently.
293 */
294 if (is_worktree) {
295 strbuf_reset(&path);
296 strbuf_addf(&path, "%s/reftable", gitdir);
297
298 refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
299 refs->write_options);
300 if (refs->err)
301 goto done;
302 }
303
304 chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
305
306 done:
307 assert(refs->err != REFTABLE_API_ERROR);
308 strbuf_release(&path);
309 return &refs->base;
310 }
311
312 static int reftable_be_init_db(struct ref_store *ref_store,
313 int flags UNUSED,
314 struct strbuf *err UNUSED)
315 {
316 struct reftable_ref_store *refs =
317 reftable_be_downcast(ref_store, REF_STORE_WRITE, "init_db");
318 struct strbuf sb = STRBUF_INIT;
319
320 strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
321 safe_create_dir(sb.buf, 1);
322 strbuf_reset(&sb);
323
324 strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
325 write_file(sb.buf, "ref: refs/heads/.invalid");
326 adjust_shared_perm(sb.buf);
327 strbuf_reset(&sb);
328
329 strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
330 safe_create_dir(sb.buf, 1);
331 strbuf_reset(&sb);
332
333 strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
334 write_file(sb.buf, "this repository uses the reftable format");
335 adjust_shared_perm(sb.buf);
336
337 strbuf_release(&sb);
338 return 0;
339 }
340
341 struct reftable_ref_iterator {
342 struct ref_iterator base;
343 struct reftable_ref_store *refs;
344 struct reftable_iterator iter;
345 struct reftable_ref_record ref;
346 struct object_id oid;
347
348 const char *prefix;
349 unsigned int flags;
350 int err;
351 };
352
353 static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
354 {
355 struct reftable_ref_iterator *iter =
356 (struct reftable_ref_iterator *)ref_iterator;
357 struct reftable_ref_store *refs = iter->refs;
358
359 while (!iter->err) {
360 int flags = 0;
361
362 iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
363 if (iter->err)
364 break;
365
366 /*
367 * The files backend only lists references contained in "refs/" unless
368 * the root refs are to be included. We emulate the same behaviour here.
369 */
370 if (!starts_with(iter->ref.refname, "refs/") &&
371 !(iter->flags & DO_FOR_EACH_INCLUDE_ROOT_REFS &&
372 (is_pseudoref(&iter->refs->base, iter->ref.refname) ||
373 is_headref(&iter->refs->base, iter->ref.refname)))) {
374 continue;
375 }
376
377 if (iter->prefix &&
378 strncmp(iter->prefix, iter->ref.refname, strlen(iter->prefix))) {
379 iter->err = 1;
380 break;
381 }
382
383 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
384 parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
385 REF_WORKTREE_CURRENT)
386 continue;
387
388 switch (iter->ref.value_type) {
389 case REFTABLE_REF_VAL1:
390 oidread(&iter->oid, iter->ref.value.val1);
391 break;
392 case REFTABLE_REF_VAL2:
393 oidread(&iter->oid, iter->ref.value.val2.value);
394 break;
395 case REFTABLE_REF_SYMREF:
396 if (!refs_resolve_ref_unsafe(&iter->refs->base, iter->ref.refname,
397 RESOLVE_REF_READING, &iter->oid, &flags))
398 oidclr(&iter->oid);
399 break;
400 default:
401 BUG("unhandled reference value type %d", iter->ref.value_type);
402 }
403
404 if (is_null_oid(&iter->oid))
405 flags |= REF_ISBROKEN;
406
407 if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
408 if (!refname_is_safe(iter->ref.refname))
409 die(_("refname is dangerous: %s"), iter->ref.refname);
410 oidclr(&iter->oid);
411 flags |= REF_BAD_NAME | REF_ISBROKEN;
412 }
413
414 if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
415 flags & REF_ISSYMREF &&
416 flags & REF_ISBROKEN)
417 continue;
418
419 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
420 !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
421 &iter->oid, flags))
422 continue;
423
424 iter->base.refname = iter->ref.refname;
425 iter->base.oid = &iter->oid;
426 iter->base.flags = flags;
427
428 break;
429 }
430
431 if (iter->err > 0) {
432 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
433 return ITER_ERROR;
434 return ITER_DONE;
435 }
436
437 if (iter->err < 0) {
438 ref_iterator_abort(ref_iterator);
439 return ITER_ERROR;
440 }
441
442 return ITER_OK;
443 }
444
445 static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
446 struct object_id *peeled)
447 {
448 struct reftable_ref_iterator *iter =
449 (struct reftable_ref_iterator *)ref_iterator;
450
451 if (iter->ref.value_type == REFTABLE_REF_VAL2) {
452 oidread(peeled, iter->ref.value.val2.target_value);
453 return 0;
454 }
455
456 return -1;
457 }
458
459 static int reftable_ref_iterator_abort(struct ref_iterator *ref_iterator)
460 {
461 struct reftable_ref_iterator *iter =
462 (struct reftable_ref_iterator *)ref_iterator;
463 reftable_ref_record_release(&iter->ref);
464 reftable_iterator_destroy(&iter->iter);
465 free(iter);
466 return ITER_DONE;
467 }
468
469 static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
470 .advance = reftable_ref_iterator_advance,
471 .peel = reftable_ref_iterator_peel,
472 .abort = reftable_ref_iterator_abort
473 };
474
475 static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
476 struct reftable_stack *stack,
477 const char *prefix,
478 int flags)
479 {
480 struct reftable_merged_table *merged_table;
481 struct reftable_ref_iterator *iter;
482 int ret;
483
484 iter = xcalloc(1, sizeof(*iter));
485 base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
486 iter->prefix = prefix;
487 iter->base.oid = &iter->oid;
488 iter->flags = flags;
489 iter->refs = refs;
490
491 ret = refs->err;
492 if (ret)
493 goto done;
494
495 ret = reftable_stack_reload(stack);
496 if (ret)
497 goto done;
498
499 merged_table = reftable_stack_merged_table(stack);
500
501 ret = reftable_merged_table_seek_ref(merged_table, &iter->iter, prefix);
502 if (ret)
503 goto done;
504
505 done:
506 iter->err = ret;
507 return iter;
508 }
509
510 static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
511 const char *prefix,
512 const char **exclude_patterns,
513 unsigned int flags)
514 {
515 struct reftable_ref_iterator *main_iter, *worktree_iter;
516 struct reftable_ref_store *refs;
517 unsigned int required_flags = REF_STORE_READ;
518
519 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
520 required_flags |= REF_STORE_ODB;
521 refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
522
523 main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix, flags);
524
525 /*
526 * The worktree stack is only set when we're in an actual worktree
527 * right now. If we aren't, then we return the common reftable
528 * iterator, only.
529 */
530 if (!refs->worktree_stack)
531 return &main_iter->base;
532
533 /*
534 * Otherwise we merge both the common and the per-worktree refs into a
535 * single iterator.
536 */
537 worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix, flags);
538 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
539 ref_iterator_select, NULL);
540 }
541
542 static int reftable_be_read_raw_ref(struct ref_store *ref_store,
543 const char *refname,
544 struct object_id *oid,
545 struct strbuf *referent,
546 unsigned int *type,
547 int *failure_errno)
548 {
549 struct reftable_ref_store *refs =
550 reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
551 struct reftable_stack *stack = stack_for(refs, refname, &refname);
552 int ret;
553
554 if (refs->err < 0)
555 return refs->err;
556
557 ret = reftable_stack_reload(stack);
558 if (ret)
559 return ret;
560
561 ret = read_ref_without_reload(stack, refname, oid, referent, type);
562 if (ret < 0)
563 return ret;
564 if (ret > 0) {
565 *failure_errno = ENOENT;
566 return -1;
567 }
568
569 return 0;
570 }
571
572 static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
573 const char *refname,
574 struct strbuf *referent)
575 {
576 struct reftable_ref_store *refs =
577 reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
578 struct reftable_stack *stack = stack_for(refs, refname, &refname);
579 struct reftable_ref_record ref = {0};
580 int ret;
581
582 ret = reftable_stack_reload(stack);
583 if (ret)
584 return ret;
585
586 ret = reftable_stack_read_ref(stack, refname, &ref);
587 if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
588 strbuf_addstr(referent, ref.value.symref);
589 else
590 ret = -1;
591
592 reftable_ref_record_release(&ref);
593 return ret;
594 }
595
596 /*
597 * Return the refname under which update was originally requested.
598 */
599 static const char *original_update_refname(struct ref_update *update)
600 {
601 while (update->parent_update)
602 update = update->parent_update;
603 return update->refname;
604 }
605
606 struct reftable_transaction_update {
607 struct ref_update *update;
608 struct object_id current_oid;
609 };
610
611 struct write_transaction_table_arg {
612 struct reftable_ref_store *refs;
613 struct reftable_stack *stack;
614 struct reftable_addition *addition;
615 struct reftable_transaction_update *updates;
616 size_t updates_nr;
617 size_t updates_alloc;
618 size_t updates_expected;
619 };
620
621 struct reftable_transaction_data {
622 struct write_transaction_table_arg *args;
623 size_t args_nr, args_alloc;
624 };
625
626 static void free_transaction_data(struct reftable_transaction_data *tx_data)
627 {
628 if (!tx_data)
629 return;
630 for (size_t i = 0; i < tx_data->args_nr; i++) {
631 reftable_addition_destroy(tx_data->args[i].addition);
632 free(tx_data->args[i].updates);
633 }
634 free(tx_data->args);
635 free(tx_data);
636 }
637
638 /*
639 * Prepare transaction update for the given reference update. This will cause
640 * us to lock the corresponding reftable stack for concurrent modification.
641 */
642 static int prepare_transaction_update(struct write_transaction_table_arg **out,
643 struct reftable_ref_store *refs,
644 struct reftable_transaction_data *tx_data,
645 struct ref_update *update,
646 struct strbuf *err)
647 {
648 struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
649 struct write_transaction_table_arg *arg = NULL;
650 size_t i;
651 int ret;
652
653 /*
654 * Search for a preexisting stack update. If there is one then we add
655 * the update to it, otherwise we set up a new stack update.
656 */
657 for (i = 0; !arg && i < tx_data->args_nr; i++)
658 if (tx_data->args[i].stack == stack)
659 arg = &tx_data->args[i];
660
661 if (!arg) {
662 struct reftable_addition *addition;
663
664 ret = reftable_stack_reload(stack);
665 if (ret)
666 return ret;
667
668 ret = reftable_stack_new_addition(&addition, stack);
669 if (ret) {
670 if (ret == REFTABLE_LOCK_ERROR)
671 strbuf_addstr(err, "cannot lock references");
672 return ret;
673 }
674
675 ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
676 tx_data->args_alloc);
677 arg = &tx_data->args[tx_data->args_nr++];
678 arg->refs = refs;
679 arg->stack = stack;
680 arg->addition = addition;
681 arg->updates = NULL;
682 arg->updates_nr = 0;
683 arg->updates_alloc = 0;
684 arg->updates_expected = 0;
685 }
686
687 arg->updates_expected++;
688
689 if (out)
690 *out = arg;
691
692 return 0;
693 }
694
695 /*
696 * Queue a reference update for the correct stack. We potentially need to
697 * handle multiple stack updates in a single transaction when it spans across
698 * multiple worktrees.
699 */
700 static int queue_transaction_update(struct reftable_ref_store *refs,
701 struct reftable_transaction_data *tx_data,
702 struct ref_update *update,
703 struct object_id *current_oid,
704 struct strbuf *err)
705 {
706 struct write_transaction_table_arg *arg = NULL;
707 int ret;
708
709 if (update->backend_data)
710 BUG("reference update queued more than once");
711
712 ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
713 if (ret < 0)
714 return ret;
715
716 ALLOC_GROW(arg->updates, arg->updates_nr + 1,
717 arg->updates_alloc);
718 arg->updates[arg->updates_nr].update = update;
719 oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
720 update->backend_data = &arg->updates[arg->updates_nr++];
721
722 return 0;
723 }
724
725 static int reftable_be_transaction_prepare(struct ref_store *ref_store,
726 struct ref_transaction *transaction,
727 struct strbuf *err)
728 {
729 struct reftable_ref_store *refs =
730 reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
731 struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
732 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
733 struct reftable_transaction_data *tx_data = NULL;
734 struct object_id head_oid;
735 unsigned int head_type = 0;
736 size_t i;
737 int ret;
738
739 ret = refs->err;
740 if (ret < 0)
741 goto done;
742
743 tx_data = xcalloc(1, sizeof(*tx_data));
744
745 /*
746 * Preprocess all updates. For one we check that there are no duplicate
747 * reference updates in this transaction. Second, we lock all stacks
748 * that will be modified during the transaction.
749 */
750 for (i = 0; i < transaction->nr; i++) {
751 ret = prepare_transaction_update(NULL, refs, tx_data,
752 transaction->updates[i], err);
753 if (ret)
754 goto done;
755
756 string_list_append(&affected_refnames,
757 transaction->updates[i]->refname);
758 }
759
760 /*
761 * Now that we have counted updates per stack we can preallocate their
762 * arrays. This avoids having to reallocate many times.
763 */
764 for (i = 0; i < tx_data->args_nr; i++) {
765 CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
766 tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
767 }
768
769 /*
770 * Fail if a refname appears more than once in the transaction.
771 * This code is taken from the files backend and is a good candidate to
772 * be moved into the generic layer.
773 */
774 string_list_sort(&affected_refnames);
775 if (ref_update_reject_duplicates(&affected_refnames, err)) {
776 ret = TRANSACTION_GENERIC_ERROR;
777 goto done;
778 }
779
780 ret = read_ref_without_reload(stack_for(refs, "HEAD", NULL), "HEAD", &head_oid,
781 &head_referent, &head_type);
782 if (ret < 0)
783 goto done;
784
785 for (i = 0; i < transaction->nr; i++) {
786 struct ref_update *u = transaction->updates[i];
787 struct object_id current_oid = {0};
788 struct reftable_stack *stack;
789 const char *rewritten_ref;
790
791 stack = stack_for(refs, u->refname, &rewritten_ref);
792
793 /* Verify that the new object ID is valid. */
794 if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
795 !(u->flags & REF_SKIP_OID_VERIFICATION) &&
796 !(u->flags & REF_LOG_ONLY)) {
797 struct object *o = parse_object(refs->base.repo, &u->new_oid);
798 if (!o) {
799 strbuf_addf(err,
800 _("trying to write ref '%s' with nonexistent object %s"),
801 u->refname, oid_to_hex(&u->new_oid));
802 ret = -1;
803 goto done;
804 }
805
806 if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
807 strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
808 oid_to_hex(&u->new_oid), u->refname);
809 ret = -1;
810 goto done;
811 }
812 }
813
814 /*
815 * When we update the reference that HEAD points to we enqueue
816 * a second log-only update for HEAD so that its reflog is
817 * updated accordingly.
818 */
819 if (head_type == REF_ISSYMREF &&
820 !(u->flags & REF_LOG_ONLY) &&
821 !(u->flags & REF_UPDATE_VIA_HEAD) &&
822 !strcmp(rewritten_ref, head_referent.buf)) {
823 struct ref_update *new_update;
824
825 /*
826 * First make sure that HEAD is not already in the
827 * transaction. This check is O(lg N) in the transaction
828 * size, but it happens at most once per transaction.
829 */
830 if (string_list_has_string(&affected_refnames, "HEAD")) {
831 /* An entry already existed */
832 strbuf_addf(err,
833 _("multiple updates for 'HEAD' (including one "
834 "via its referent '%s') are not allowed"),
835 u->refname);
836 ret = TRANSACTION_NAME_CONFLICT;
837 goto done;
838 }
839
840 new_update = ref_transaction_add_update(
841 transaction, "HEAD",
842 u->flags | REF_LOG_ONLY | REF_NO_DEREF,
843 &u->new_oid, &u->old_oid, u->msg);
844 string_list_insert(&affected_refnames, new_update->refname);
845 }
846
847 ret = read_ref_without_reload(stack, rewritten_ref,
848 &current_oid, &referent, &u->type);
849 if (ret < 0)
850 goto done;
851 if (ret > 0 && (!(u->flags & REF_HAVE_OLD) || is_null_oid(&u->old_oid))) {
852 /*
853 * The reference does not exist, and we either have no
854 * old object ID or expect the reference to not exist.
855 * We can thus skip below safety checks as well as the
856 * symref splitting. But we do want to verify that
857 * there is no conflicting reference here so that we
858 * can output a proper error message instead of failing
859 * at a later point.
860 */
861 ret = refs_verify_refname_available(ref_store, u->refname,
862 &affected_refnames, NULL, err);
863 if (ret < 0)
864 goto done;
865
866 /*
867 * There is no need to write the reference deletion
868 * when the reference in question doesn't exist.
869 */
870 if (u->flags & REF_HAVE_NEW && !is_null_oid(&u->new_oid)) {
871 ret = queue_transaction_update(refs, tx_data, u,
872 &current_oid, err);
873 if (ret)
874 goto done;
875 }
876
877 continue;
878 }
879 if (ret > 0) {
880 /* The reference does not exist, but we expected it to. */
881 strbuf_addf(err, _("cannot lock ref '%s': "
882 "unable to resolve reference '%s'"),
883 original_update_refname(u), u->refname);
884 ret = -1;
885 goto done;
886 }
887
888 if (u->type & REF_ISSYMREF) {
889 /*
890 * The reftable stack is locked at this point already,
891 * so it is safe to call `refs_resolve_ref_unsafe()`
892 * here without causing races.
893 */
894 const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
895 &current_oid, NULL);
896
897 if (u->flags & REF_NO_DEREF) {
898 if (u->flags & REF_HAVE_OLD && !resolved) {
899 strbuf_addf(err, _("cannot lock ref '%s': "
900 "error reading reference"), u->refname);
901 ret = -1;
902 goto done;
903 }
904 } else {
905 struct ref_update *new_update;
906 int new_flags;
907
908 new_flags = u->flags;
909 if (!strcmp(rewritten_ref, "HEAD"))
910 new_flags |= REF_UPDATE_VIA_HEAD;
911
912 /*
913 * If we are updating a symref (eg. HEAD), we should also
914 * update the branch that the symref points to.
915 *
916 * This is generic functionality, and would be better
917 * done in refs.c, but the current implementation is
918 * intertwined with the locking in files-backend.c.
919 */
920 new_update = ref_transaction_add_update(
921 transaction, referent.buf, new_flags,
922 &u->new_oid, &u->old_oid, u->msg);
923 new_update->parent_update = u;
924
925 /*
926 * Change the symbolic ref update to log only. Also, it
927 * doesn't need to check its old OID value, as that will be
928 * done when new_update is processed.
929 */
930 u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
931 u->flags &= ~REF_HAVE_OLD;
932
933 if (string_list_has_string(&affected_refnames, new_update->refname)) {
934 strbuf_addf(err,
935 _("multiple updates for '%s' (including one "
936 "via symref '%s') are not allowed"),
937 referent.buf, u->refname);
938 ret = TRANSACTION_NAME_CONFLICT;
939 goto done;
940 }
941 string_list_insert(&affected_refnames, new_update->refname);
942 }
943 }
944
945 /*
946 * Verify that the old object matches our expectations. Note
947 * that the error messages here do not make a lot of sense in
948 * the context of the reftable backend as we never lock
949 * individual refs. But the error messages match what the files
950 * backend returns, which keeps our tests happy.
951 */
952 if (u->flags & REF_HAVE_OLD && !oideq(&current_oid, &u->old_oid)) {
953 if (is_null_oid(&u->old_oid))
954 strbuf_addf(err, _("cannot lock ref '%s': "
955 "reference already exists"),
956 original_update_refname(u));
957 else if (is_null_oid(&current_oid))
958 strbuf_addf(err, _("cannot lock ref '%s': "
959 "reference is missing but expected %s"),
960 original_update_refname(u),
961 oid_to_hex(&u->old_oid));
962 else
963 strbuf_addf(err, _("cannot lock ref '%s': "
964 "is at %s but expected %s"),
965 original_update_refname(u),
966 oid_to_hex(&current_oid),
967 oid_to_hex(&u->old_oid));
968 ret = -1;
969 goto done;
970 }
971
972 /*
973 * If all of the following conditions are true:
974 *
975 * - We're not about to write a symref.
976 * - We're not about to write a log-only entry.
977 * - Old and new object ID are different.
978 *
979 * Then we're essentially doing a no-op update that can be
980 * skipped. This is not only for the sake of efficiency, but
981 * also skips writing unneeded reflog entries.
982 */
983 if ((u->type & REF_ISSYMREF) ||
984 (u->flags & REF_LOG_ONLY) ||
985 (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid))) {
986 ret = queue_transaction_update(refs, tx_data, u,
987 &current_oid, err);
988 if (ret)
989 goto done;
990 }
991 }
992
993 transaction->backend_data = tx_data;
994 transaction->state = REF_TRANSACTION_PREPARED;
995
996 done:
997 assert(ret != REFTABLE_API_ERROR);
998 if (ret < 0) {
999 free_transaction_data(tx_data);
1000 transaction->state = REF_TRANSACTION_CLOSED;
1001 if (!err->len)
1002 strbuf_addf(err, _("reftable: transaction prepare: %s"),
1003 reftable_error_str(ret));
1004 }
1005 string_list_clear(&affected_refnames, 0);
1006 strbuf_release(&referent);
1007 strbuf_release(&head_referent);
1008
1009 return ret;
1010 }
1011
1012 static int reftable_be_transaction_abort(struct ref_store *ref_store,
1013 struct ref_transaction *transaction,
1014 struct strbuf *err)
1015 {
1016 struct reftable_transaction_data *tx_data = transaction->backend_data;
1017 free_transaction_data(tx_data);
1018 transaction->state = REF_TRANSACTION_CLOSED;
1019 return 0;
1020 }
1021
1022 static int transaction_update_cmp(const void *a, const void *b)
1023 {
1024 return strcmp(((struct reftable_transaction_update *)a)->update->refname,
1025 ((struct reftable_transaction_update *)b)->update->refname);
1026 }
1027
1028 static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
1029 {
1030 struct write_transaction_table_arg *arg = cb_data;
1031 struct reftable_merged_table *mt =
1032 reftable_stack_merged_table(arg->stack);
1033 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1034 struct reftable_log_record *logs = NULL;
1035 size_t logs_nr = 0, logs_alloc = 0, i;
1036 int ret = 0;
1037
1038 QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
1039
1040 reftable_writer_set_limits(writer, ts, ts);
1041
1042 for (i = 0; i < arg->updates_nr; i++) {
1043 struct reftable_transaction_update *tx_update = &arg->updates[i];
1044 struct ref_update *u = tx_update->update;
1045
1046 /*
1047 * Write a reflog entry when updating a ref to point to
1048 * something new in either of the following cases:
1049 *
1050 * - The reference is about to be deleted. We always want to
1051 * delete the reflog in that case.
1052 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1053 * the reflog entry.
1054 * - `core.logAllRefUpdates` tells us to create the reflog for
1055 * the given ref.
1056 */
1057 if (u->flags & REF_HAVE_NEW && !(u->type & REF_ISSYMREF) && is_null_oid(&u->new_oid)) {
1058 struct reftable_log_record log = {0};
1059 struct reftable_iterator it = {0};
1060
1061 /*
1062 * When deleting refs we also delete all reflog entries
1063 * with them. While it is not strictly required to
1064 * delete reflogs together with their refs, this
1065 * matches the behaviour of the files backend.
1066 *
1067 * Unfortunately, we have no better way than to delete
1068 * all reflog entries one by one.
1069 */
1070 ret = reftable_merged_table_seek_log(mt, &it, u->refname);
1071 while (ret == 0) {
1072 struct reftable_log_record *tombstone;
1073
1074 ret = reftable_iterator_next_log(&it, &log);
1075 if (ret < 0)
1076 break;
1077 if (ret > 0 || strcmp(log.refname, u->refname)) {
1078 ret = 0;
1079 break;
1080 }
1081
1082 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1083 tombstone = &logs[logs_nr++];
1084 tombstone->refname = xstrdup(u->refname);
1085 tombstone->value_type = REFTABLE_LOG_DELETION;
1086 tombstone->update_index = log.update_index;
1087 }
1088
1089 reftable_log_record_release(&log);
1090 reftable_iterator_destroy(&it);
1091
1092 if (ret)
1093 goto done;
1094 } else if (u->flags & REF_HAVE_NEW &&
1095 (u->flags & REF_FORCE_CREATE_REFLOG ||
1096 should_write_log(&arg->refs->base, u->refname))) {
1097 struct reftable_log_record *log;
1098
1099 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1100 log = &logs[logs_nr++];
1101 memset(log, 0, sizeof(*log));
1102
1103 fill_reftable_log_record(log);
1104 log->update_index = ts;
1105 log->refname = xstrdup(u->refname);
1106 log->value.update.new_hash = u->new_oid.hash;
1107 log->value.update.old_hash = tx_update->current_oid.hash;
1108 log->value.update.message =
1109 xstrndup(u->msg, arg->refs->write_options.block_size / 2);
1110 }
1111
1112 if (u->flags & REF_LOG_ONLY)
1113 continue;
1114
1115 if (u->flags & REF_HAVE_NEW && is_null_oid(&u->new_oid)) {
1116 struct reftable_ref_record ref = {
1117 .refname = (char *)u->refname,
1118 .update_index = ts,
1119 .value_type = REFTABLE_REF_DELETION,
1120 };
1121
1122 ret = reftable_writer_add_ref(writer, &ref);
1123 if (ret < 0)
1124 goto done;
1125 } else if (u->flags & REF_HAVE_NEW) {
1126 struct reftable_ref_record ref = {0};
1127 struct object_id peeled;
1128 int peel_error;
1129
1130 ref.refname = (char *)u->refname;
1131 ref.update_index = ts;
1132
1133 peel_error = peel_object(&u->new_oid, &peeled);
1134 if (!peel_error) {
1135 ref.value_type = REFTABLE_REF_VAL2;
1136 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
1137 memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
1138 } else if (!is_null_oid(&u->new_oid)) {
1139 ref.value_type = REFTABLE_REF_VAL1;
1140 memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
1141 }
1142
1143 ret = reftable_writer_add_ref(writer, &ref);
1144 if (ret < 0)
1145 goto done;
1146 }
1147 }
1148
1149 /*
1150 * Logs are written at the end so that we do not have intermixed ref
1151 * and log blocks.
1152 */
1153 if (logs) {
1154 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1155 if (ret < 0)
1156 goto done;
1157 }
1158
1159 done:
1160 assert(ret != REFTABLE_API_ERROR);
1161 for (i = 0; i < logs_nr; i++)
1162 clear_reftable_log_record(&logs[i]);
1163 free(logs);
1164 return ret;
1165 }
1166
1167 static int reftable_be_transaction_finish(struct ref_store *ref_store,
1168 struct ref_transaction *transaction,
1169 struct strbuf *err)
1170 {
1171 struct reftable_transaction_data *tx_data = transaction->backend_data;
1172 int ret = 0;
1173
1174 for (size_t i = 0; i < tx_data->args_nr; i++) {
1175 ret = reftable_addition_add(tx_data->args[i].addition,
1176 write_transaction_table, &tx_data->args[i]);
1177 if (ret < 0)
1178 goto done;
1179
1180 ret = reftable_addition_commit(tx_data->args[i].addition);
1181 if (ret < 0)
1182 goto done;
1183 }
1184
1185 done:
1186 assert(ret != REFTABLE_API_ERROR);
1187 free_transaction_data(tx_data);
1188 transaction->state = REF_TRANSACTION_CLOSED;
1189
1190 if (ret) {
1191 strbuf_addf(err, _("reftable: transaction failure: %s"),
1192 reftable_error_str(ret));
1193 return -1;
1194 }
1195 return ret;
1196 }
1197
1198 static int reftable_be_initial_transaction_commit(struct ref_store *ref_store UNUSED,
1199 struct ref_transaction *transaction,
1200 struct strbuf *err)
1201 {
1202 return ref_transaction_commit(transaction, err);
1203 }
1204
1205 static int reftable_be_pack_refs(struct ref_store *ref_store,
1206 struct pack_refs_opts *opts)
1207 {
1208 struct reftable_ref_store *refs =
1209 reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "pack_refs");
1210 struct reftable_stack *stack;
1211 int ret;
1212
1213 if (refs->err)
1214 return refs->err;
1215
1216 stack = refs->worktree_stack;
1217 if (!stack)
1218 stack = refs->main_stack;
1219
1220 ret = reftable_stack_compact_all(stack, NULL);
1221 if (ret)
1222 goto out;
1223 ret = reftable_stack_clean(stack);
1224 if (ret)
1225 goto out;
1226
1227 out:
1228 return ret;
1229 }
1230
1231 struct write_create_symref_arg {
1232 struct reftable_ref_store *refs;
1233 struct reftable_stack *stack;
1234 const char *refname;
1235 const char *target;
1236 const char *logmsg;
1237 };
1238
1239 static int write_create_symref_table(struct reftable_writer *writer, void *cb_data)
1240 {
1241 struct write_create_symref_arg *create = cb_data;
1242 uint64_t ts = reftable_stack_next_update_index(create->stack);
1243 struct reftable_ref_record ref = {
1244 .refname = (char *)create->refname,
1245 .value_type = REFTABLE_REF_SYMREF,
1246 .value.symref = (char *)create->target,
1247 .update_index = ts,
1248 };
1249 struct reftable_log_record log = {0};
1250 struct object_id new_oid;
1251 struct object_id old_oid;
1252 int ret;
1253
1254 reftable_writer_set_limits(writer, ts, ts);
1255
1256 ret = reftable_writer_add_ref(writer, &ref);
1257 if (ret)
1258 return ret;
1259
1260 /*
1261 * Note that it is important to try and resolve the reference before we
1262 * write the log entry. This is because `should_write_log()` will munge
1263 * `core.logAllRefUpdates`, which is undesirable when we create a new
1264 * repository because it would be written into the config. As HEAD will
1265 * not resolve for new repositories this ordering will ensure that this
1266 * never happens.
1267 */
1268 if (!create->logmsg ||
1269 !refs_resolve_ref_unsafe(&create->refs->base, create->target,
1270 RESOLVE_REF_READING, &new_oid, NULL) ||
1271 !should_write_log(&create->refs->base, create->refname))
1272 return 0;
1273
1274 fill_reftable_log_record(&log);
1275 log.refname = xstrdup(create->refname);
1276 log.update_index = ts;
1277 log.value.update.message = xstrndup(create->logmsg,
1278 create->refs->write_options.block_size / 2);
1279 log.value.update.new_hash = new_oid.hash;
1280 if (refs_resolve_ref_unsafe(&create->refs->base, create->refname,
1281 RESOLVE_REF_READING, &old_oid, NULL))
1282 log.value.update.old_hash = old_oid.hash;
1283
1284 ret = reftable_writer_add_log(writer, &log);
1285 clear_reftable_log_record(&log);
1286 return ret;
1287 }
1288
1289 static int reftable_be_create_symref(struct ref_store *ref_store,
1290 const char *refname,
1291 const char *target,
1292 const char *logmsg)
1293 {
1294 struct reftable_ref_store *refs =
1295 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_symref");
1296 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1297 struct write_create_symref_arg arg = {
1298 .refs = refs,
1299 .stack = stack,
1300 .refname = refname,
1301 .target = target,
1302 .logmsg = logmsg,
1303 };
1304 int ret;
1305
1306 ret = refs->err;
1307 if (ret < 0)
1308 goto done;
1309
1310 ret = reftable_stack_reload(stack);
1311 if (ret)
1312 goto done;
1313
1314 ret = reftable_stack_add(stack, &write_create_symref_table, &arg);
1315
1316 done:
1317 assert(ret != REFTABLE_API_ERROR);
1318 if (ret)
1319 error("unable to write symref for %s: %s", refname,
1320 reftable_error_str(ret));
1321 return ret;
1322 }
1323
1324 struct write_copy_arg {
1325 struct reftable_ref_store *refs;
1326 struct reftable_stack *stack;
1327 const char *oldname;
1328 const char *newname;
1329 const char *logmsg;
1330 int delete_old;
1331 };
1332
1333 static int write_copy_table(struct reftable_writer *writer, void *cb_data)
1334 {
1335 struct write_copy_arg *arg = cb_data;
1336 uint64_t deletion_ts, creation_ts;
1337 struct reftable_merged_table *mt = reftable_stack_merged_table(arg->stack);
1338 struct reftable_ref_record old_ref = {0}, refs[2] = {0};
1339 struct reftable_log_record old_log = {0}, *logs = NULL;
1340 struct reftable_iterator it = {0};
1341 struct string_list skip = STRING_LIST_INIT_NODUP;
1342 struct strbuf errbuf = STRBUF_INIT;
1343 size_t logs_nr = 0, logs_alloc = 0, i;
1344 int ret;
1345
1346 if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
1347 ret = error(_("refname %s not found"), arg->oldname);
1348 goto done;
1349 }
1350 if (old_ref.value_type == REFTABLE_REF_SYMREF) {
1351 ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
1352 arg->oldname);
1353 goto done;
1354 }
1355
1356 /*
1357 * There's nothing to do in case the old and new name are the same, so
1358 * we exit early in that case.
1359 */
1360 if (!strcmp(arg->oldname, arg->newname)) {
1361 ret = 0;
1362 goto done;
1363 }
1364
1365 /*
1366 * Verify that the new refname is available.
1367 */
1368 string_list_insert(&skip, arg->oldname);
1369 ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
1370 NULL, &skip, &errbuf);
1371 if (ret < 0) {
1372 error("%s", errbuf.buf);
1373 goto done;
1374 }
1375
1376 /*
1377 * When deleting the old reference we have to use two update indices:
1378 * once to delete the old ref and its reflog, and once to create the
1379 * new ref and its reflog. They need to be staged with two separate
1380 * indices because the new reflog needs to encode both the deletion of
1381 * the old branch and the creation of the new branch, and we cannot do
1382 * two changes to a reflog in a single update.
1383 */
1384 deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
1385 if (arg->delete_old)
1386 creation_ts++;
1387 reftable_writer_set_limits(writer, deletion_ts, creation_ts);
1388
1389 /*
1390 * Add the new reference. If this is a rename then we also delete the
1391 * old reference.
1392 */
1393 refs[0] = old_ref;
1394 refs[0].refname = (char *)arg->newname;
1395 refs[0].update_index = creation_ts;
1396 if (arg->delete_old) {
1397 refs[1].refname = (char *)arg->oldname;
1398 refs[1].value_type = REFTABLE_REF_DELETION;
1399 refs[1].update_index = deletion_ts;
1400 }
1401 ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
1402 if (ret < 0)
1403 goto done;
1404
1405 /*
1406 * When deleting the old branch we need to create a reflog entry on the
1407 * new branch name that indicates that the old branch has been deleted
1408 * and then recreated. This is a tad weird, but matches what the files
1409 * backend does.
1410 */
1411 if (arg->delete_old) {
1412 struct strbuf head_referent = STRBUF_INIT;
1413 struct object_id head_oid;
1414 int append_head_reflog;
1415 unsigned head_type = 0;
1416
1417 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1418 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1419 fill_reftable_log_record(&logs[logs_nr]);
1420 logs[logs_nr].refname = (char *)arg->newname;
1421 logs[logs_nr].update_index = deletion_ts;
1422 logs[logs_nr].value.update.message =
1423 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1424 logs[logs_nr].value.update.old_hash = old_ref.value.val1;
1425 logs_nr++;
1426
1427 ret = read_ref_without_reload(arg->stack, "HEAD", &head_oid, &head_referent, &head_type);
1428 if (ret < 0)
1429 goto done;
1430 append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
1431 strbuf_release(&head_referent);
1432
1433 /*
1434 * The files backend uses `refs_delete_ref()` to delete the old
1435 * branch name, which will append a reflog entry for HEAD in
1436 * case it points to the old branch.
1437 */
1438 if (append_head_reflog) {
1439 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1440 logs[logs_nr] = logs[logs_nr - 1];
1441 logs[logs_nr].refname = "HEAD";
1442 logs_nr++;
1443 }
1444 }
1445
1446 /*
1447 * Create the reflog entry for the newly created branch.
1448 */
1449 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1450 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1451 fill_reftable_log_record(&logs[logs_nr]);
1452 logs[logs_nr].refname = (char *)arg->newname;
1453 logs[logs_nr].update_index = creation_ts;
1454 logs[logs_nr].value.update.message =
1455 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1456 logs[logs_nr].value.update.new_hash = old_ref.value.val1;
1457 logs_nr++;
1458
1459 /*
1460 * In addition to writing the reflog entry for the new branch, we also
1461 * copy over all log entries from the old reflog. Last but not least,
1462 * when renaming we also have to delete all the old reflog entries.
1463 */
1464 ret = reftable_merged_table_seek_log(mt, &it, arg->oldname);
1465 if (ret < 0)
1466 goto done;
1467
1468 while (1) {
1469 ret = reftable_iterator_next_log(&it, &old_log);
1470 if (ret < 0)
1471 goto done;
1472 if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
1473 ret = 0;
1474 break;
1475 }
1476
1477 free(old_log.refname);
1478
1479 /*
1480 * Copy over the old reflog entry with the new refname.
1481 */
1482 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1483 logs[logs_nr] = old_log;
1484 logs[logs_nr].refname = (char *)arg->newname;
1485 logs_nr++;
1486
1487 /*
1488 * Delete the old reflog entry in case we are renaming.
1489 */
1490 if (arg->delete_old) {
1491 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1492 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1493 logs[logs_nr].refname = (char *)arg->oldname;
1494 logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
1495 logs[logs_nr].update_index = old_log.update_index;
1496 logs_nr++;
1497 }
1498
1499 /*
1500 * Transfer ownership of the log record we're iterating over to
1501 * the array of log records. Otherwise, the pointers would get
1502 * free'd or reallocated by the iterator.
1503 */
1504 memset(&old_log, 0, sizeof(old_log));
1505 }
1506
1507 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1508 if (ret < 0)
1509 goto done;
1510
1511 done:
1512 assert(ret != REFTABLE_API_ERROR);
1513 reftable_iterator_destroy(&it);
1514 string_list_clear(&skip, 0);
1515 strbuf_release(&errbuf);
1516 for (i = 0; i < logs_nr; i++) {
1517 if (!strcmp(logs[i].refname, "HEAD"))
1518 continue;
1519 if (logs[i].value.update.old_hash == old_ref.value.val1)
1520 logs[i].value.update.old_hash = NULL;
1521 if (logs[i].value.update.new_hash == old_ref.value.val1)
1522 logs[i].value.update.new_hash = NULL;
1523 logs[i].refname = NULL;
1524 reftable_log_record_release(&logs[i]);
1525 }
1526 free(logs);
1527 reftable_ref_record_release(&old_ref);
1528 reftable_log_record_release(&old_log);
1529 return ret;
1530 }
1531
1532 static int reftable_be_rename_ref(struct ref_store *ref_store,
1533 const char *oldrefname,
1534 const char *newrefname,
1535 const char *logmsg)
1536 {
1537 struct reftable_ref_store *refs =
1538 reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1539 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1540 struct write_copy_arg arg = {
1541 .refs = refs,
1542 .stack = stack,
1543 .oldname = oldrefname,
1544 .newname = newrefname,
1545 .logmsg = logmsg,
1546 .delete_old = 1,
1547 };
1548 int ret;
1549
1550 ret = refs->err;
1551 if (ret < 0)
1552 goto done;
1553
1554 ret = reftable_stack_reload(stack);
1555 if (ret)
1556 goto done;
1557 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1558
1559 done:
1560 assert(ret != REFTABLE_API_ERROR);
1561 return ret;
1562 }
1563
1564 static int reftable_be_copy_ref(struct ref_store *ref_store,
1565 const char *oldrefname,
1566 const char *newrefname,
1567 const char *logmsg)
1568 {
1569 struct reftable_ref_store *refs =
1570 reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
1571 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1572 struct write_copy_arg arg = {
1573 .refs = refs,
1574 .stack = stack,
1575 .oldname = oldrefname,
1576 .newname = newrefname,
1577 .logmsg = logmsg,
1578 };
1579 int ret;
1580
1581 ret = refs->err;
1582 if (ret < 0)
1583 goto done;
1584
1585 ret = reftable_stack_reload(stack);
1586 if (ret)
1587 goto done;
1588 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1589
1590 done:
1591 assert(ret != REFTABLE_API_ERROR);
1592 return ret;
1593 }
1594
1595 struct reftable_reflog_iterator {
1596 struct ref_iterator base;
1597 struct reftable_ref_store *refs;
1598 struct reftable_iterator iter;
1599 struct reftable_log_record log;
1600 char *last_name;
1601 int err;
1602 };
1603
1604 static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
1605 {
1606 struct reftable_reflog_iterator *iter =
1607 (struct reftable_reflog_iterator *)ref_iterator;
1608
1609 while (!iter->err) {
1610 iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
1611 if (iter->err)
1612 break;
1613
1614 /*
1615 * We want the refnames that we have reflogs for, so we skip if
1616 * we've already produced this name. This could be faster by
1617 * seeking directly to reflog@update_index==0.
1618 */
1619 if (iter->last_name && !strcmp(iter->log.refname, iter->last_name))
1620 continue;
1621
1622 if (check_refname_format(iter->log.refname,
1623 REFNAME_ALLOW_ONELEVEL))
1624 continue;
1625
1626 free(iter->last_name);
1627 iter->last_name = xstrdup(iter->log.refname);
1628 iter->base.refname = iter->log.refname;
1629
1630 break;
1631 }
1632
1633 if (iter->err > 0) {
1634 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
1635 return ITER_ERROR;
1636 return ITER_DONE;
1637 }
1638
1639 if (iter->err < 0) {
1640 ref_iterator_abort(ref_iterator);
1641 return ITER_ERROR;
1642 }
1643
1644 return ITER_OK;
1645 }
1646
1647 static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator,
1648 struct object_id *peeled)
1649 {
1650 BUG("reftable reflog iterator cannot be peeled");
1651 return -1;
1652 }
1653
1654 static int reftable_reflog_iterator_abort(struct ref_iterator *ref_iterator)
1655 {
1656 struct reftable_reflog_iterator *iter =
1657 (struct reftable_reflog_iterator *)ref_iterator;
1658 reftable_log_record_release(&iter->log);
1659 reftable_iterator_destroy(&iter->iter);
1660 free(iter->last_name);
1661 free(iter);
1662 return ITER_DONE;
1663 }
1664
1665 static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
1666 .advance = reftable_reflog_iterator_advance,
1667 .peel = reftable_reflog_iterator_peel,
1668 .abort = reftable_reflog_iterator_abort
1669 };
1670
1671 static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
1672 struct reftable_stack *stack)
1673 {
1674 struct reftable_merged_table *merged_table;
1675 struct reftable_reflog_iterator *iter;
1676 int ret;
1677
1678 iter = xcalloc(1, sizeof(*iter));
1679 base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable);
1680 iter->refs = refs;
1681
1682 ret = refs->err;
1683 if (ret)
1684 goto done;
1685
1686 ret = reftable_stack_reload(refs->main_stack);
1687 if (ret < 0)
1688 goto done;
1689
1690 merged_table = reftable_stack_merged_table(stack);
1691
1692 ret = reftable_merged_table_seek_log(merged_table, &iter->iter, "");
1693 if (ret < 0)
1694 goto done;
1695
1696 done:
1697 iter->err = ret;
1698 return iter;
1699 }
1700
1701 static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
1702 {
1703 struct reftable_ref_store *refs =
1704 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
1705 struct reftable_reflog_iterator *main_iter, *worktree_iter;
1706
1707 main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
1708 if (!refs->worktree_stack)
1709 return &main_iter->base;
1710
1711 worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
1712
1713 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
1714 ref_iterator_select, NULL);
1715 }
1716
1717 static int yield_log_record(struct reftable_log_record *log,
1718 each_reflog_ent_fn fn,
1719 void *cb_data)
1720 {
1721 struct object_id old_oid, new_oid;
1722 const char *full_committer;
1723
1724 oidread(&old_oid, log->value.update.old_hash);
1725 oidread(&new_oid, log->value.update.new_hash);
1726
1727 /*
1728 * When both the old object ID and the new object ID are null
1729 * then this is the reflog existence marker. The caller must
1730 * not be aware of it.
1731 */
1732 if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
1733 return 0;
1734
1735 full_committer = fmt_ident(log->value.update.name, log->value.update.email,
1736 WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
1737 return fn(&old_oid, &new_oid, full_committer,
1738 log->value.update.time, log->value.update.tz_offset,
1739 log->value.update.message, cb_data);
1740 }
1741
1742 static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1743 const char *refname,
1744 each_reflog_ent_fn fn,
1745 void *cb_data)
1746 {
1747 struct reftable_ref_store *refs =
1748 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
1749 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1750 struct reftable_merged_table *mt = NULL;
1751 struct reftable_log_record log = {0};
1752 struct reftable_iterator it = {0};
1753 int ret;
1754
1755 if (refs->err < 0)
1756 return refs->err;
1757
1758 mt = reftable_stack_merged_table(stack);
1759 ret = reftable_merged_table_seek_log(mt, &it, refname);
1760 while (!ret) {
1761 ret = reftable_iterator_next_log(&it, &log);
1762 if (ret < 0)
1763 break;
1764 if (ret > 0 || strcmp(log.refname, refname)) {
1765 ret = 0;
1766 break;
1767 }
1768
1769 ret = yield_log_record(&log, fn, cb_data);
1770 if (ret)
1771 break;
1772 }
1773
1774 reftable_log_record_release(&log);
1775 reftable_iterator_destroy(&it);
1776 return ret;
1777 }
1778
1779 static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
1780 const char *refname,
1781 each_reflog_ent_fn fn,
1782 void *cb_data)
1783 {
1784 struct reftable_ref_store *refs =
1785 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
1786 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1787 struct reftable_merged_table *mt = NULL;
1788 struct reftable_log_record *logs = NULL;
1789 struct reftable_iterator it = {0};
1790 size_t logs_alloc = 0, logs_nr = 0, i;
1791 int ret;
1792
1793 if (refs->err < 0)
1794 return refs->err;
1795
1796 mt = reftable_stack_merged_table(stack);
1797 ret = reftable_merged_table_seek_log(mt, &it, refname);
1798 while (!ret) {
1799 struct reftable_log_record log = {0};
1800
1801 ret = reftable_iterator_next_log(&it, &log);
1802 if (ret < 0)
1803 goto done;
1804 if (ret > 0 || strcmp(log.refname, refname)) {
1805 reftable_log_record_release(&log);
1806 ret = 0;
1807 break;
1808 }
1809
1810 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1811 logs[logs_nr++] = log;
1812 }
1813
1814 for (i = logs_nr; i--;) {
1815 ret = yield_log_record(&logs[i], fn, cb_data);
1816 if (ret)
1817 goto done;
1818 }
1819
1820 done:
1821 reftable_iterator_destroy(&it);
1822 for (i = 0; i < logs_nr; i++)
1823 reftable_log_record_release(&logs[i]);
1824 free(logs);
1825 return ret;
1826 }
1827
1828 static int reftable_be_reflog_exists(struct ref_store *ref_store,
1829 const char *refname)
1830 {
1831 struct reftable_ref_store *refs =
1832 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1833 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1834 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
1835 struct reftable_log_record log = {0};
1836 struct reftable_iterator it = {0};
1837 int ret;
1838
1839 ret = refs->err;
1840 if (ret < 0)
1841 goto done;
1842
1843 ret = reftable_stack_reload(stack);
1844 if (ret < 0)
1845 goto done;
1846
1847 ret = reftable_merged_table_seek_log(mt, &it, refname);
1848 if (ret < 0)
1849 goto done;
1850
1851 /*
1852 * Check whether we get at least one log record for the given ref name.
1853 * If so, the reflog exists, otherwise it doesn't.
1854 */
1855 ret = reftable_iterator_next_log(&it, &log);
1856 if (ret < 0)
1857 goto done;
1858 if (ret > 0) {
1859 ret = 0;
1860 goto done;
1861 }
1862
1863 ret = strcmp(log.refname, refname) == 0;
1864
1865 done:
1866 reftable_iterator_destroy(&it);
1867 reftable_log_record_release(&log);
1868 if (ret < 0)
1869 ret = 0;
1870 return ret;
1871 }
1872
1873 struct write_reflog_existence_arg {
1874 struct reftable_ref_store *refs;
1875 const char *refname;
1876 struct reftable_stack *stack;
1877 };
1878
1879 static int write_reflog_existence_table(struct reftable_writer *writer,
1880 void *cb_data)
1881 {
1882 struct write_reflog_existence_arg *arg = cb_data;
1883 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1884 struct reftable_log_record log = {0};
1885 int ret;
1886
1887 ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
1888 if (ret <= 0)
1889 goto done;
1890
1891 reftable_writer_set_limits(writer, ts, ts);
1892
1893 /*
1894 * The existence entry has both old and new object ID set to the the
1895 * null object ID. Our iterators are aware of this and will not present
1896 * them to their callers.
1897 */
1898 log.refname = xstrdup(arg->refname);
1899 log.update_index = ts;
1900 log.value_type = REFTABLE_LOG_UPDATE;
1901 ret = reftable_writer_add_log(writer, &log);
1902
1903 done:
1904 assert(ret != REFTABLE_API_ERROR);
1905 reftable_log_record_release(&log);
1906 return ret;
1907 }
1908
1909 static int reftable_be_create_reflog(struct ref_store *ref_store,
1910 const char *refname,
1911 struct strbuf *errmsg)
1912 {
1913 struct reftable_ref_store *refs =
1914 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1915 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1916 struct write_reflog_existence_arg arg = {
1917 .refs = refs,
1918 .stack = stack,
1919 .refname = refname,
1920 };
1921 int ret;
1922
1923 ret = refs->err;
1924 if (ret < 0)
1925 goto done;
1926
1927 ret = reftable_stack_reload(stack);
1928 if (ret)
1929 goto done;
1930
1931 ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
1932
1933 done:
1934 return ret;
1935 }
1936
1937 struct write_reflog_delete_arg {
1938 struct reftable_stack *stack;
1939 const char *refname;
1940 };
1941
1942 static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
1943 {
1944 struct write_reflog_delete_arg *arg = cb_data;
1945 struct reftable_merged_table *mt =
1946 reftable_stack_merged_table(arg->stack);
1947 struct reftable_log_record log = {0}, tombstone = {0};
1948 struct reftable_iterator it = {0};
1949 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1950 int ret;
1951
1952 reftable_writer_set_limits(writer, ts, ts);
1953
1954 /*
1955 * In order to delete a table we need to delete all reflog entries one
1956 * by one. This is inefficient, but the reftable format does not have a
1957 * better marker right now.
1958 */
1959 ret = reftable_merged_table_seek_log(mt, &it, arg->refname);
1960 while (ret == 0) {
1961 ret = reftable_iterator_next_log(&it, &log);
1962 if (ret < 0)
1963 break;
1964 if (ret > 0 || strcmp(log.refname, arg->refname)) {
1965 ret = 0;
1966 break;
1967 }
1968
1969 tombstone.refname = (char *)arg->refname;
1970 tombstone.value_type = REFTABLE_LOG_DELETION;
1971 tombstone.update_index = log.update_index;
1972
1973 ret = reftable_writer_add_log(writer, &tombstone);
1974 }
1975
1976 reftable_log_record_release(&log);
1977 reftable_iterator_destroy(&it);
1978 return ret;
1979 }
1980
1981 static int reftable_be_delete_reflog(struct ref_store *ref_store,
1982 const char *refname)
1983 {
1984 struct reftable_ref_store *refs =
1985 reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
1986 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1987 struct write_reflog_delete_arg arg = {
1988 .stack = stack,
1989 .refname = refname,
1990 };
1991 int ret;
1992
1993 ret = reftable_stack_reload(stack);
1994 if (ret)
1995 return ret;
1996 ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
1997
1998 assert(ret != REFTABLE_API_ERROR);
1999 return ret;
2000 }
2001
2002 struct reflog_expiry_arg {
2003 struct reftable_stack *stack;
2004 struct reftable_log_record *records;
2005 struct object_id update_oid;
2006 const char *refname;
2007 size_t len;
2008 };
2009
2010 static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
2011 {
2012 struct reflog_expiry_arg *arg = cb_data;
2013 uint64_t ts = reftable_stack_next_update_index(arg->stack);
2014 uint64_t live_records = 0;
2015 size_t i;
2016 int ret;
2017
2018 for (i = 0; i < arg->len; i++)
2019 if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
2020 live_records++;
2021
2022 reftable_writer_set_limits(writer, ts, ts);
2023
2024 if (!is_null_oid(&arg->update_oid)) {
2025 struct reftable_ref_record ref = {0};
2026 struct object_id peeled;
2027
2028 ref.refname = (char *)arg->refname;
2029 ref.update_index = ts;
2030
2031 if (!peel_object(&arg->update_oid, &peeled)) {
2032 ref.value_type = REFTABLE_REF_VAL2;
2033 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
2034 memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
2035 } else {
2036 ref.value_type = REFTABLE_REF_VAL1;
2037 memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
2038 }
2039
2040 ret = reftable_writer_add_ref(writer, &ref);
2041 if (ret < 0)
2042 return ret;
2043 }
2044
2045 /*
2046 * When there are no more entries left in the reflog we empty it
2047 * completely, but write a placeholder reflog entry that indicates that
2048 * the reflog still exists.
2049 */
2050 if (!live_records) {
2051 struct reftable_log_record log = {
2052 .refname = (char *)arg->refname,
2053 .value_type = REFTABLE_LOG_UPDATE,
2054 .update_index = ts,
2055 };
2056
2057 ret = reftable_writer_add_log(writer, &log);
2058 if (ret)
2059 return ret;
2060 }
2061
2062 for (i = 0; i < arg->len; i++) {
2063 ret = reftable_writer_add_log(writer, &arg->records[i]);
2064 if (ret)
2065 return ret;
2066 }
2067
2068 return 0;
2069 }
2070
2071 static int reftable_be_reflog_expire(struct ref_store *ref_store,
2072 const char *refname,
2073 unsigned int flags,
2074 reflog_expiry_prepare_fn prepare_fn,
2075 reflog_expiry_should_prune_fn should_prune_fn,
2076 reflog_expiry_cleanup_fn cleanup_fn,
2077 void *policy_cb_data)
2078 {
2079 /*
2080 * For log expiry, we write tombstones for every single reflog entry
2081 * that is to be expired. This means that the entries are still
2082 * retrievable by delving into the stack, and expiring entries
2083 * paradoxically takes extra memory. This memory is only reclaimed when
2084 * compacting the reftable stack.
2085 *
2086 * It would be better if the refs backend supported an API that sets a
2087 * criterion for all refs, passing the criterion to pack_refs().
2088 *
2089 * On the plus side, because we do the expiration per ref, we can easily
2090 * insert the reflog existence dummies.
2091 */
2092 struct reftable_ref_store *refs =
2093 reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2094 struct reftable_stack *stack = stack_for(refs, refname, &refname);
2095 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
2096 struct reftable_log_record *logs = NULL;
2097 struct reftable_log_record *rewritten = NULL;
2098 struct reftable_ref_record ref_record = {0};
2099 struct reftable_iterator it = {0};
2100 struct reftable_addition *add = NULL;
2101 struct reflog_expiry_arg arg = {0};
2102 struct object_id oid = {0};
2103 uint8_t *last_hash = NULL;
2104 size_t logs_nr = 0, logs_alloc = 0, i;
2105 int ret;
2106
2107 if (refs->err < 0)
2108 return refs->err;
2109
2110 ret = reftable_stack_reload(stack);
2111 if (ret < 0)
2112 goto done;
2113
2114 ret = reftable_merged_table_seek_log(mt, &it, refname);
2115 if (ret < 0)
2116 goto done;
2117
2118 ret = reftable_stack_new_addition(&add, stack);
2119 if (ret < 0)
2120 goto done;
2121
2122 ret = reftable_stack_read_ref(stack, refname, &ref_record);
2123 if (ret < 0)
2124 goto done;
2125 if (reftable_ref_record_val1(&ref_record))
2126 oidread(&oid, reftable_ref_record_val1(&ref_record));
2127 prepare_fn(refname, &oid, policy_cb_data);
2128
2129 while (1) {
2130 struct reftable_log_record log = {0};
2131 struct object_id old_oid, new_oid;
2132
2133 ret = reftable_iterator_next_log(&it, &log);
2134 if (ret < 0)
2135 goto done;
2136 if (ret > 0 || strcmp(log.refname, refname)) {
2137 reftable_log_record_release(&log);
2138 break;
2139 }
2140
2141 oidread(&old_oid, log.value.update.old_hash);
2142 oidread(&new_oid, log.value.update.new_hash);
2143
2144 /*
2145 * Skip over the reflog existence marker. We will add it back
2146 * in when there are no live reflog records.
2147 */
2148 if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
2149 reftable_log_record_release(&log);
2150 continue;
2151 }
2152
2153 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2154 logs[logs_nr++] = log;
2155 }
2156
2157 /*
2158 * We need to rewrite all reflog entries according to the pruning
2159 * callback function:
2160 *
2161 * - If a reflog entry shall be pruned we mark the record for
2162 * deletion.
2163 *
2164 * - Otherwise we may have to rewrite the chain of reflog entries so
2165 * that gaps created by just-deleted records get backfilled.
2166 */
2167 CALLOC_ARRAY(rewritten, logs_nr);
2168 for (i = logs_nr; i--;) {
2169 struct reftable_log_record *dest = &rewritten[i];
2170 struct object_id old_oid, new_oid;
2171
2172 *dest = logs[i];
2173 oidread(&old_oid, logs[i].value.update.old_hash);
2174 oidread(&new_oid, logs[i].value.update.new_hash);
2175
2176 if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
2177 (timestamp_t)logs[i].value.update.time,
2178 logs[i].value.update.tz_offset,
2179 logs[i].value.update.message,
2180 policy_cb_data)) {
2181 dest->value_type = REFTABLE_LOG_DELETION;
2182 } else {
2183 if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
2184 dest->value.update.old_hash = last_hash;
2185 last_hash = logs[i].value.update.new_hash;
2186 }
2187 }
2188
2189 if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
2190 reftable_ref_record_val1(&ref_record))
2191 oidread(&arg.update_oid, last_hash);
2192
2193 arg.records = rewritten;
2194 arg.len = logs_nr;
2195 arg.stack = stack,
2196 arg.refname = refname,
2197
2198 ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
2199 if (ret < 0)
2200 goto done;
2201
2202 /*
2203 * Future improvement: we could skip writing records that were
2204 * not changed.
2205 */
2206 if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
2207 ret = reftable_addition_commit(add);
2208
2209 done:
2210 if (add)
2211 cleanup_fn(policy_cb_data);
2212 assert(ret != REFTABLE_API_ERROR);
2213
2214 reftable_ref_record_release(&ref_record);
2215 reftable_iterator_destroy(&it);
2216 reftable_addition_destroy(add);
2217 for (i = 0; i < logs_nr; i++)
2218 reftable_log_record_release(&logs[i]);
2219 free(logs);
2220 free(rewritten);
2221 return ret;
2222 }
2223
2224 struct ref_storage_be refs_be_reftable = {
2225 .name = "reftable",
2226 .init = reftable_be_init,
2227 .init_db = reftable_be_init_db,
2228 .transaction_prepare = reftable_be_transaction_prepare,
2229 .transaction_finish = reftable_be_transaction_finish,
2230 .transaction_abort = reftable_be_transaction_abort,
2231 .initial_transaction_commit = reftable_be_initial_transaction_commit,
2232
2233 .pack_refs = reftable_be_pack_refs,
2234 .create_symref = reftable_be_create_symref,
2235 .rename_ref = reftable_be_rename_ref,
2236 .copy_ref = reftable_be_copy_ref,
2237
2238 .iterator_begin = reftable_be_iterator_begin,
2239 .read_raw_ref = reftable_be_read_raw_ref,
2240 .read_symbolic_ref = reftable_be_read_symbolic_ref,
2241
2242 .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
2243 .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
2244 .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
2245 .reflog_exists = reftable_be_reflog_exists,
2246 .create_reflog = reftable_be_create_reflog,
2247 .delete_reflog = reftable_be_delete_reflog,
2248 .reflog_expire = reftable_be_reflog_expire,
2249 };