]> git.ipfire.org Git - thirdparty/git.git/blob - refs/reftable-backend.c
Merge branch 'ps/pack-refs-auto' into jt/reftable-geometric-compaction
[thirdparty/git.git] / refs / reftable-backend.c
1 #include "../git-compat-util.h"
2 #include "../abspath.h"
3 #include "../chdir-notify.h"
4 #include "../environment.h"
5 #include "../gettext.h"
6 #include "../hash.h"
7 #include "../hex.h"
8 #include "../iterator.h"
9 #include "../ident.h"
10 #include "../lockfile.h"
11 #include "../object.h"
12 #include "../path.h"
13 #include "../refs.h"
14 #include "../reftable/reftable-stack.h"
15 #include "../reftable/reftable-record.h"
16 #include "../reftable/reftable-error.h"
17 #include "../reftable/reftable-iterator.h"
18 #include "../reftable/reftable-merged.h"
19 #include "../setup.h"
20 #include "../strmap.h"
21 #include "refs-internal.h"
22
23 /*
24 * Used as a flag in ref_update::flags when the ref_update was via an
25 * update to HEAD.
26 */
27 #define REF_UPDATE_VIA_HEAD (1 << 8)
28
29 struct reftable_ref_store {
30 struct ref_store base;
31
32 /*
33 * The main stack refers to the common dir and thus contains common
34 * refs as well as refs of the main repository.
35 */
36 struct reftable_stack *main_stack;
37 /*
38 * The worktree stack refers to the gitdir in case the refdb is opened
39 * via a worktree. It thus contains the per-worktree refs.
40 */
41 struct reftable_stack *worktree_stack;
42 /*
43 * Map of worktree stacks by their respective worktree names. The map
44 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
45 */
46 struct strmap worktree_stacks;
47 struct reftable_write_options write_options;
48
49 unsigned int store_flags;
50 int err;
51 };
52
53 /*
54 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
55 * reftable_ref_store. required_flags is compared with ref_store's store_flags
56 * to ensure the ref_store has all required capabilities. "caller" is used in
57 * any necessary error messages.
58 */
59 static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
60 unsigned int required_flags,
61 const char *caller)
62 {
63 struct reftable_ref_store *refs;
64
65 if (ref_store->be != &refs_be_reftable)
66 BUG("ref_store is type \"%s\" not \"reftables\" in %s",
67 ref_store->be->name, caller);
68
69 refs = (struct reftable_ref_store *)ref_store;
70
71 if ((refs->store_flags & required_flags) != required_flags)
72 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
73 caller, required_flags, refs->store_flags);
74
75 return refs;
76 }
77
78 /*
79 * Some refs are global to the repository (refs/heads/{*}), while others are
80 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
81 * multiple separate databases (ie. multiple reftable/ directories), one for
82 * the shared refs, one for the current worktree refs, and one for each
83 * additional worktree. For reading, we merge the view of both the shared and
84 * the current worktree's refs, when necessary.
85 *
86 * This function also optionally assigns the rewritten reference name that is
87 * local to the stack. This translation is required when using worktree refs
88 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
89 * those references in their normalized form.
90 */
91 static struct reftable_stack *stack_for(struct reftable_ref_store *store,
92 const char *refname,
93 const char **rewritten_ref)
94 {
95 const char *wtname;
96 int wtname_len;
97
98 if (!refname)
99 return store->main_stack;
100
101 switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
102 case REF_WORKTREE_OTHER: {
103 static struct strbuf wtname_buf = STRBUF_INIT;
104 struct strbuf wt_dir = STRBUF_INIT;
105 struct reftable_stack *stack;
106
107 /*
108 * We're using a static buffer here so that we don't need to
109 * allocate the worktree name whenever we look up a reference.
110 * This could be avoided if the strmap interface knew how to
111 * handle keys with a length.
112 */
113 strbuf_reset(&wtname_buf);
114 strbuf_add(&wtname_buf, wtname, wtname_len);
115
116 /*
117 * There is an edge case here: when the worktree references the
118 * current worktree, then we set up the stack once via
119 * `worktree_stacks` and once via `worktree_stack`. This is
120 * wasteful, but in the reading case it shouldn't matter. And
121 * in the writing case we would notice that the stack is locked
122 * already and error out when trying to write a reference via
123 * both stacks.
124 */
125 stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
126 if (!stack) {
127 strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
128 store->base.repo->commondir, wtname_buf.buf);
129
130 store->err = reftable_new_stack(&stack, wt_dir.buf,
131 store->write_options);
132 assert(store->err != REFTABLE_API_ERROR);
133 strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
134 }
135
136 strbuf_release(&wt_dir);
137 return stack;
138 }
139 case REF_WORKTREE_CURRENT:
140 /*
141 * If there is no worktree stack then we're currently in the
142 * main worktree. We thus return the main stack in that case.
143 */
144 if (!store->worktree_stack)
145 return store->main_stack;
146 return store->worktree_stack;
147 case REF_WORKTREE_MAIN:
148 case REF_WORKTREE_SHARED:
149 return store->main_stack;
150 default:
151 BUG("unhandled worktree reference type");
152 }
153 }
154
155 static int should_write_log(struct ref_store *refs, const char *refname)
156 {
157 if (log_all_ref_updates == LOG_REFS_UNSET)
158 log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
159
160 switch (log_all_ref_updates) {
161 case LOG_REFS_NONE:
162 return refs_reflog_exists(refs, refname);
163 case LOG_REFS_ALWAYS:
164 return 1;
165 case LOG_REFS_NORMAL:
166 if (should_autocreate_reflog(refname))
167 return 1;
168 return refs_reflog_exists(refs, refname);
169 default:
170 BUG("unhandled core.logAllRefUpdates value %d", log_all_ref_updates);
171 }
172 }
173
174 static void fill_reftable_log_record(struct reftable_log_record *log)
175 {
176 const char *info = git_committer_info(0);
177 struct ident_split split = {0};
178 int sign = 1;
179
180 if (split_ident_line(&split, info, strlen(info)))
181 BUG("failed splitting committer info");
182
183 reftable_log_record_release(log);
184 log->value_type = REFTABLE_LOG_UPDATE;
185 log->value.update.name =
186 xstrndup(split.name_begin, split.name_end - split.name_begin);
187 log->value.update.email =
188 xstrndup(split.mail_begin, split.mail_end - split.mail_begin);
189 log->value.update.time = atol(split.date_begin);
190 if (*split.tz_begin == '-') {
191 sign = -1;
192 split.tz_begin++;
193 }
194 if (*split.tz_begin == '+') {
195 sign = 1;
196 split.tz_begin++;
197 }
198
199 log->value.update.tz_offset = sign * atoi(split.tz_begin);
200 }
201
202 static int read_ref_without_reload(struct reftable_stack *stack,
203 const char *refname,
204 struct object_id *oid,
205 struct strbuf *referent,
206 unsigned int *type)
207 {
208 struct reftable_ref_record ref = {0};
209 int ret;
210
211 ret = reftable_stack_read_ref(stack, refname, &ref);
212 if (ret)
213 goto done;
214
215 if (ref.value_type == REFTABLE_REF_SYMREF) {
216 strbuf_reset(referent);
217 strbuf_addstr(referent, ref.value.symref);
218 *type |= REF_ISSYMREF;
219 } else if (reftable_ref_record_val1(&ref)) {
220 oidread(oid, reftable_ref_record_val1(&ref));
221 } else {
222 /* We got a tombstone, which should not happen. */
223 BUG("unhandled reference value type %d", ref.value_type);
224 }
225
226 done:
227 assert(ret != REFTABLE_API_ERROR);
228 reftable_ref_record_release(&ref);
229 return ret;
230 }
231
232 static struct ref_store *reftable_be_init(struct repository *repo,
233 const char *gitdir,
234 unsigned int store_flags)
235 {
236 struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
237 struct strbuf path = STRBUF_INIT;
238 int is_worktree;
239 mode_t mask;
240
241 mask = umask(0);
242 umask(mask);
243
244 base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
245 strmap_init(&refs->worktree_stacks);
246 refs->store_flags = store_flags;
247 refs->write_options.block_size = 4096;
248 refs->write_options.hash_id = repo->hash_algo->format_id;
249 refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
250
251 /*
252 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
253 * This stack contains both the shared and the main worktree refs.
254 *
255 * Note that we don't try to resolve the path in case we have a
256 * worktree because `get_common_dir_noenv()` already does it for us.
257 */
258 is_worktree = get_common_dir_noenv(&path, gitdir);
259 if (!is_worktree) {
260 strbuf_reset(&path);
261 strbuf_realpath(&path, gitdir, 0);
262 }
263 strbuf_addstr(&path, "/reftable");
264 refs->err = reftable_new_stack(&refs->main_stack, path.buf,
265 refs->write_options);
266 if (refs->err)
267 goto done;
268
269 /*
270 * If we're in a worktree we also need to set up the worktree reftable
271 * stack that is contained in the per-worktree GIT_DIR.
272 *
273 * Ideally, we would also add the stack to our worktree stack map. But
274 * we have no way to figure out the worktree name here and thus can't
275 * do it efficiently.
276 */
277 if (is_worktree) {
278 strbuf_reset(&path);
279 strbuf_addf(&path, "%s/reftable", gitdir);
280
281 refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
282 refs->write_options);
283 if (refs->err)
284 goto done;
285 }
286
287 chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
288
289 done:
290 assert(refs->err != REFTABLE_API_ERROR);
291 strbuf_release(&path);
292 return &refs->base;
293 }
294
295 static int reftable_be_init_db(struct ref_store *ref_store,
296 int flags UNUSED,
297 struct strbuf *err UNUSED)
298 {
299 struct reftable_ref_store *refs =
300 reftable_be_downcast(ref_store, REF_STORE_WRITE, "init_db");
301 struct strbuf sb = STRBUF_INIT;
302
303 strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
304 safe_create_dir(sb.buf, 1);
305 strbuf_reset(&sb);
306
307 strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
308 write_file(sb.buf, "ref: refs/heads/.invalid");
309 adjust_shared_perm(sb.buf);
310 strbuf_reset(&sb);
311
312 strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
313 safe_create_dir(sb.buf, 1);
314 strbuf_reset(&sb);
315
316 strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
317 write_file(sb.buf, "this repository uses the reftable format");
318 adjust_shared_perm(sb.buf);
319
320 strbuf_release(&sb);
321 return 0;
322 }
323
324 struct reftable_ref_iterator {
325 struct ref_iterator base;
326 struct reftable_ref_store *refs;
327 struct reftable_iterator iter;
328 struct reftable_ref_record ref;
329 struct object_id oid;
330
331 const char *prefix;
332 size_t prefix_len;
333 unsigned int flags;
334 int err;
335 };
336
337 static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
338 {
339 struct reftable_ref_iterator *iter =
340 (struct reftable_ref_iterator *)ref_iterator;
341 struct reftable_ref_store *refs = iter->refs;
342
343 while (!iter->err) {
344 int flags = 0;
345
346 iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
347 if (iter->err)
348 break;
349
350 /*
351 * The files backend only lists references contained in "refs/" unless
352 * the root refs are to be included. We emulate the same behaviour here.
353 */
354 if (!starts_with(iter->ref.refname, "refs/") &&
355 !(iter->flags & DO_FOR_EACH_INCLUDE_ROOT_REFS &&
356 (is_pseudoref(&iter->refs->base, iter->ref.refname) ||
357 is_headref(&iter->refs->base, iter->ref.refname)))) {
358 continue;
359 }
360
361 if (iter->prefix_len &&
362 strncmp(iter->prefix, iter->ref.refname, iter->prefix_len)) {
363 iter->err = 1;
364 break;
365 }
366
367 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
368 parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
369 REF_WORKTREE_CURRENT)
370 continue;
371
372 switch (iter->ref.value_type) {
373 case REFTABLE_REF_VAL1:
374 oidread(&iter->oid, iter->ref.value.val1);
375 break;
376 case REFTABLE_REF_VAL2:
377 oidread(&iter->oid, iter->ref.value.val2.value);
378 break;
379 case REFTABLE_REF_SYMREF:
380 if (!refs_resolve_ref_unsafe(&iter->refs->base, iter->ref.refname,
381 RESOLVE_REF_READING, &iter->oid, &flags))
382 oidclr(&iter->oid);
383 break;
384 default:
385 BUG("unhandled reference value type %d", iter->ref.value_type);
386 }
387
388 if (is_null_oid(&iter->oid))
389 flags |= REF_ISBROKEN;
390
391 if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
392 if (!refname_is_safe(iter->ref.refname))
393 die(_("refname is dangerous: %s"), iter->ref.refname);
394 oidclr(&iter->oid);
395 flags |= REF_BAD_NAME | REF_ISBROKEN;
396 }
397
398 if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
399 flags & REF_ISSYMREF &&
400 flags & REF_ISBROKEN)
401 continue;
402
403 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
404 !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
405 &iter->oid, flags))
406 continue;
407
408 iter->base.refname = iter->ref.refname;
409 iter->base.oid = &iter->oid;
410 iter->base.flags = flags;
411
412 break;
413 }
414
415 if (iter->err > 0) {
416 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
417 return ITER_ERROR;
418 return ITER_DONE;
419 }
420
421 if (iter->err < 0) {
422 ref_iterator_abort(ref_iterator);
423 return ITER_ERROR;
424 }
425
426 return ITER_OK;
427 }
428
429 static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
430 struct object_id *peeled)
431 {
432 struct reftable_ref_iterator *iter =
433 (struct reftable_ref_iterator *)ref_iterator;
434
435 if (iter->ref.value_type == REFTABLE_REF_VAL2) {
436 oidread(peeled, iter->ref.value.val2.target_value);
437 return 0;
438 }
439
440 return -1;
441 }
442
443 static int reftable_ref_iterator_abort(struct ref_iterator *ref_iterator)
444 {
445 struct reftable_ref_iterator *iter =
446 (struct reftable_ref_iterator *)ref_iterator;
447 reftable_ref_record_release(&iter->ref);
448 reftable_iterator_destroy(&iter->iter);
449 free(iter);
450 return ITER_DONE;
451 }
452
453 static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
454 .advance = reftable_ref_iterator_advance,
455 .peel = reftable_ref_iterator_peel,
456 .abort = reftable_ref_iterator_abort
457 };
458
459 static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
460 struct reftable_stack *stack,
461 const char *prefix,
462 int flags)
463 {
464 struct reftable_merged_table *merged_table;
465 struct reftable_ref_iterator *iter;
466 int ret;
467
468 iter = xcalloc(1, sizeof(*iter));
469 base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
470 iter->prefix = prefix;
471 iter->prefix_len = prefix ? strlen(prefix) : 0;
472 iter->base.oid = &iter->oid;
473 iter->flags = flags;
474 iter->refs = refs;
475
476 ret = refs->err;
477 if (ret)
478 goto done;
479
480 ret = reftable_stack_reload(stack);
481 if (ret)
482 goto done;
483
484 merged_table = reftable_stack_merged_table(stack);
485
486 ret = reftable_merged_table_seek_ref(merged_table, &iter->iter, prefix);
487 if (ret)
488 goto done;
489
490 done:
491 iter->err = ret;
492 return iter;
493 }
494
495 static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
496 const char *prefix,
497 const char **exclude_patterns,
498 unsigned int flags)
499 {
500 struct reftable_ref_iterator *main_iter, *worktree_iter;
501 struct reftable_ref_store *refs;
502 unsigned int required_flags = REF_STORE_READ;
503
504 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
505 required_flags |= REF_STORE_ODB;
506 refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
507
508 main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix, flags);
509
510 /*
511 * The worktree stack is only set when we're in an actual worktree
512 * right now. If we aren't, then we return the common reftable
513 * iterator, only.
514 */
515 if (!refs->worktree_stack)
516 return &main_iter->base;
517
518 /*
519 * Otherwise we merge both the common and the per-worktree refs into a
520 * single iterator.
521 */
522 worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix, flags);
523 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
524 ref_iterator_select, NULL);
525 }
526
527 static int reftable_be_read_raw_ref(struct ref_store *ref_store,
528 const char *refname,
529 struct object_id *oid,
530 struct strbuf *referent,
531 unsigned int *type,
532 int *failure_errno)
533 {
534 struct reftable_ref_store *refs =
535 reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
536 struct reftable_stack *stack = stack_for(refs, refname, &refname);
537 int ret;
538
539 if (refs->err < 0)
540 return refs->err;
541
542 ret = reftable_stack_reload(stack);
543 if (ret)
544 return ret;
545
546 ret = read_ref_without_reload(stack, refname, oid, referent, type);
547 if (ret < 0)
548 return ret;
549 if (ret > 0) {
550 *failure_errno = ENOENT;
551 return -1;
552 }
553
554 return 0;
555 }
556
557 static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
558 const char *refname,
559 struct strbuf *referent)
560 {
561 struct reftable_ref_store *refs =
562 reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
563 struct reftable_stack *stack = stack_for(refs, refname, &refname);
564 struct reftable_ref_record ref = {0};
565 int ret;
566
567 ret = reftable_stack_reload(stack);
568 if (ret)
569 return ret;
570
571 ret = reftable_stack_read_ref(stack, refname, &ref);
572 if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
573 strbuf_addstr(referent, ref.value.symref);
574 else
575 ret = -1;
576
577 reftable_ref_record_release(&ref);
578 return ret;
579 }
580
581 /*
582 * Return the refname under which update was originally requested.
583 */
584 static const char *original_update_refname(struct ref_update *update)
585 {
586 while (update->parent_update)
587 update = update->parent_update;
588 return update->refname;
589 }
590
591 struct reftable_transaction_update {
592 struct ref_update *update;
593 struct object_id current_oid;
594 };
595
596 struct write_transaction_table_arg {
597 struct reftable_ref_store *refs;
598 struct reftable_stack *stack;
599 struct reftable_addition *addition;
600 struct reftable_transaction_update *updates;
601 size_t updates_nr;
602 size_t updates_alloc;
603 size_t updates_expected;
604 };
605
606 struct reftable_transaction_data {
607 struct write_transaction_table_arg *args;
608 size_t args_nr, args_alloc;
609 };
610
611 static void free_transaction_data(struct reftable_transaction_data *tx_data)
612 {
613 if (!tx_data)
614 return;
615 for (size_t i = 0; i < tx_data->args_nr; i++) {
616 reftable_addition_destroy(tx_data->args[i].addition);
617 free(tx_data->args[i].updates);
618 }
619 free(tx_data->args);
620 free(tx_data);
621 }
622
623 /*
624 * Prepare transaction update for the given reference update. This will cause
625 * us to lock the corresponding reftable stack for concurrent modification.
626 */
627 static int prepare_transaction_update(struct write_transaction_table_arg **out,
628 struct reftable_ref_store *refs,
629 struct reftable_transaction_data *tx_data,
630 struct ref_update *update,
631 struct strbuf *err)
632 {
633 struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
634 struct write_transaction_table_arg *arg = NULL;
635 size_t i;
636 int ret;
637
638 /*
639 * Search for a preexisting stack update. If there is one then we add
640 * the update to it, otherwise we set up a new stack update.
641 */
642 for (i = 0; !arg && i < tx_data->args_nr; i++)
643 if (tx_data->args[i].stack == stack)
644 arg = &tx_data->args[i];
645
646 if (!arg) {
647 struct reftable_addition *addition;
648
649 ret = reftable_stack_reload(stack);
650 if (ret)
651 return ret;
652
653 ret = reftable_stack_new_addition(&addition, stack);
654 if (ret) {
655 if (ret == REFTABLE_LOCK_ERROR)
656 strbuf_addstr(err, "cannot lock references");
657 return ret;
658 }
659
660 ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
661 tx_data->args_alloc);
662 arg = &tx_data->args[tx_data->args_nr++];
663 arg->refs = refs;
664 arg->stack = stack;
665 arg->addition = addition;
666 arg->updates = NULL;
667 arg->updates_nr = 0;
668 arg->updates_alloc = 0;
669 arg->updates_expected = 0;
670 }
671
672 arg->updates_expected++;
673
674 if (out)
675 *out = arg;
676
677 return 0;
678 }
679
680 /*
681 * Queue a reference update for the correct stack. We potentially need to
682 * handle multiple stack updates in a single transaction when it spans across
683 * multiple worktrees.
684 */
685 static int queue_transaction_update(struct reftable_ref_store *refs,
686 struct reftable_transaction_data *tx_data,
687 struct ref_update *update,
688 struct object_id *current_oid,
689 struct strbuf *err)
690 {
691 struct write_transaction_table_arg *arg = NULL;
692 int ret;
693
694 if (update->backend_data)
695 BUG("reference update queued more than once");
696
697 ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
698 if (ret < 0)
699 return ret;
700
701 ALLOC_GROW(arg->updates, arg->updates_nr + 1,
702 arg->updates_alloc);
703 arg->updates[arg->updates_nr].update = update;
704 oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
705 update->backend_data = &arg->updates[arg->updates_nr++];
706
707 return 0;
708 }
709
710 static int reftable_be_transaction_prepare(struct ref_store *ref_store,
711 struct ref_transaction *transaction,
712 struct strbuf *err)
713 {
714 struct reftable_ref_store *refs =
715 reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
716 struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
717 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
718 struct reftable_transaction_data *tx_data = NULL;
719 struct object_id head_oid;
720 unsigned int head_type = 0;
721 size_t i;
722 int ret;
723
724 ret = refs->err;
725 if (ret < 0)
726 goto done;
727
728 tx_data = xcalloc(1, sizeof(*tx_data));
729
730 /*
731 * Preprocess all updates. For one we check that there are no duplicate
732 * reference updates in this transaction. Second, we lock all stacks
733 * that will be modified during the transaction.
734 */
735 for (i = 0; i < transaction->nr; i++) {
736 ret = prepare_transaction_update(NULL, refs, tx_data,
737 transaction->updates[i], err);
738 if (ret)
739 goto done;
740
741 string_list_append(&affected_refnames,
742 transaction->updates[i]->refname);
743 }
744
745 /*
746 * Now that we have counted updates per stack we can preallocate their
747 * arrays. This avoids having to reallocate many times.
748 */
749 for (i = 0; i < tx_data->args_nr; i++) {
750 CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
751 tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
752 }
753
754 /*
755 * Fail if a refname appears more than once in the transaction.
756 * This code is taken from the files backend and is a good candidate to
757 * be moved into the generic layer.
758 */
759 string_list_sort(&affected_refnames);
760 if (ref_update_reject_duplicates(&affected_refnames, err)) {
761 ret = TRANSACTION_GENERIC_ERROR;
762 goto done;
763 }
764
765 ret = read_ref_without_reload(stack_for(refs, "HEAD", NULL), "HEAD", &head_oid,
766 &head_referent, &head_type);
767 if (ret < 0)
768 goto done;
769 ret = 0;
770
771 for (i = 0; i < transaction->nr; i++) {
772 struct ref_update *u = transaction->updates[i];
773 struct object_id current_oid = {0};
774 struct reftable_stack *stack;
775 const char *rewritten_ref;
776
777 stack = stack_for(refs, u->refname, &rewritten_ref);
778
779 /* Verify that the new object ID is valid. */
780 if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
781 !(u->flags & REF_SKIP_OID_VERIFICATION) &&
782 !(u->flags & REF_LOG_ONLY)) {
783 struct object *o = parse_object(refs->base.repo, &u->new_oid);
784 if (!o) {
785 strbuf_addf(err,
786 _("trying to write ref '%s' with nonexistent object %s"),
787 u->refname, oid_to_hex(&u->new_oid));
788 ret = -1;
789 goto done;
790 }
791
792 if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
793 strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
794 oid_to_hex(&u->new_oid), u->refname);
795 ret = -1;
796 goto done;
797 }
798 }
799
800 /*
801 * When we update the reference that HEAD points to we enqueue
802 * a second log-only update for HEAD so that its reflog is
803 * updated accordingly.
804 */
805 if (head_type == REF_ISSYMREF &&
806 !(u->flags & REF_LOG_ONLY) &&
807 !(u->flags & REF_UPDATE_VIA_HEAD) &&
808 !strcmp(rewritten_ref, head_referent.buf)) {
809 struct ref_update *new_update;
810
811 /*
812 * First make sure that HEAD is not already in the
813 * transaction. This check is O(lg N) in the transaction
814 * size, but it happens at most once per transaction.
815 */
816 if (string_list_has_string(&affected_refnames, "HEAD")) {
817 /* An entry already existed */
818 strbuf_addf(err,
819 _("multiple updates for 'HEAD' (including one "
820 "via its referent '%s') are not allowed"),
821 u->refname);
822 ret = TRANSACTION_NAME_CONFLICT;
823 goto done;
824 }
825
826 new_update = ref_transaction_add_update(
827 transaction, "HEAD",
828 u->flags | REF_LOG_ONLY | REF_NO_DEREF,
829 &u->new_oid, &u->old_oid, u->msg);
830 string_list_insert(&affected_refnames, new_update->refname);
831 }
832
833 ret = read_ref_without_reload(stack, rewritten_ref,
834 &current_oid, &referent, &u->type);
835 if (ret < 0)
836 goto done;
837 if (ret > 0 && (!(u->flags & REF_HAVE_OLD) || is_null_oid(&u->old_oid))) {
838 /*
839 * The reference does not exist, and we either have no
840 * old object ID or expect the reference to not exist.
841 * We can thus skip below safety checks as well as the
842 * symref splitting. But we do want to verify that
843 * there is no conflicting reference here so that we
844 * can output a proper error message instead of failing
845 * at a later point.
846 */
847 ret = refs_verify_refname_available(ref_store, u->refname,
848 &affected_refnames, NULL, err);
849 if (ret < 0)
850 goto done;
851
852 /*
853 * There is no need to write the reference deletion
854 * when the reference in question doesn't exist.
855 */
856 if (u->flags & REF_HAVE_NEW && !is_null_oid(&u->new_oid)) {
857 ret = queue_transaction_update(refs, tx_data, u,
858 &current_oid, err);
859 if (ret)
860 goto done;
861 }
862
863 continue;
864 }
865 if (ret > 0) {
866 /* The reference does not exist, but we expected it to. */
867 strbuf_addf(err, _("cannot lock ref '%s': "
868 "unable to resolve reference '%s'"),
869 original_update_refname(u), u->refname);
870 ret = -1;
871 goto done;
872 }
873
874 if (u->type & REF_ISSYMREF) {
875 /*
876 * The reftable stack is locked at this point already,
877 * so it is safe to call `refs_resolve_ref_unsafe()`
878 * here without causing races.
879 */
880 const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
881 &current_oid, NULL);
882
883 if (u->flags & REF_NO_DEREF) {
884 if (u->flags & REF_HAVE_OLD && !resolved) {
885 strbuf_addf(err, _("cannot lock ref '%s': "
886 "error reading reference"), u->refname);
887 ret = -1;
888 goto done;
889 }
890 } else {
891 struct ref_update *new_update;
892 int new_flags;
893
894 new_flags = u->flags;
895 if (!strcmp(rewritten_ref, "HEAD"))
896 new_flags |= REF_UPDATE_VIA_HEAD;
897
898 /*
899 * If we are updating a symref (eg. HEAD), we should also
900 * update the branch that the symref points to.
901 *
902 * This is generic functionality, and would be better
903 * done in refs.c, but the current implementation is
904 * intertwined with the locking in files-backend.c.
905 */
906 new_update = ref_transaction_add_update(
907 transaction, referent.buf, new_flags,
908 &u->new_oid, &u->old_oid, u->msg);
909 new_update->parent_update = u;
910
911 /*
912 * Change the symbolic ref update to log only. Also, it
913 * doesn't need to check its old OID value, as that will be
914 * done when new_update is processed.
915 */
916 u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
917 u->flags &= ~REF_HAVE_OLD;
918
919 if (string_list_has_string(&affected_refnames, new_update->refname)) {
920 strbuf_addf(err,
921 _("multiple updates for '%s' (including one "
922 "via symref '%s') are not allowed"),
923 referent.buf, u->refname);
924 ret = TRANSACTION_NAME_CONFLICT;
925 goto done;
926 }
927 string_list_insert(&affected_refnames, new_update->refname);
928 }
929 }
930
931 /*
932 * Verify that the old object matches our expectations. Note
933 * that the error messages here do not make a lot of sense in
934 * the context of the reftable backend as we never lock
935 * individual refs. But the error messages match what the files
936 * backend returns, which keeps our tests happy.
937 */
938 if (u->flags & REF_HAVE_OLD && !oideq(&current_oid, &u->old_oid)) {
939 if (is_null_oid(&u->old_oid))
940 strbuf_addf(err, _("cannot lock ref '%s': "
941 "reference already exists"),
942 original_update_refname(u));
943 else if (is_null_oid(&current_oid))
944 strbuf_addf(err, _("cannot lock ref '%s': "
945 "reference is missing but expected %s"),
946 original_update_refname(u),
947 oid_to_hex(&u->old_oid));
948 else
949 strbuf_addf(err, _("cannot lock ref '%s': "
950 "is at %s but expected %s"),
951 original_update_refname(u),
952 oid_to_hex(&current_oid),
953 oid_to_hex(&u->old_oid));
954 ret = -1;
955 goto done;
956 }
957
958 /*
959 * If all of the following conditions are true:
960 *
961 * - We're not about to write a symref.
962 * - We're not about to write a log-only entry.
963 * - Old and new object ID are different.
964 *
965 * Then we're essentially doing a no-op update that can be
966 * skipped. This is not only for the sake of efficiency, but
967 * also skips writing unneeded reflog entries.
968 */
969 if ((u->type & REF_ISSYMREF) ||
970 (u->flags & REF_LOG_ONLY) ||
971 (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid))) {
972 ret = queue_transaction_update(refs, tx_data, u,
973 &current_oid, err);
974 if (ret)
975 goto done;
976 }
977 }
978
979 transaction->backend_data = tx_data;
980 transaction->state = REF_TRANSACTION_PREPARED;
981
982 done:
983 assert(ret != REFTABLE_API_ERROR);
984 if (ret < 0) {
985 free_transaction_data(tx_data);
986 transaction->state = REF_TRANSACTION_CLOSED;
987 if (!err->len)
988 strbuf_addf(err, _("reftable: transaction prepare: %s"),
989 reftable_error_str(ret));
990 }
991 string_list_clear(&affected_refnames, 0);
992 strbuf_release(&referent);
993 strbuf_release(&head_referent);
994
995 return ret;
996 }
997
998 static int reftable_be_transaction_abort(struct ref_store *ref_store,
999 struct ref_transaction *transaction,
1000 struct strbuf *err)
1001 {
1002 struct reftable_transaction_data *tx_data = transaction->backend_data;
1003 free_transaction_data(tx_data);
1004 transaction->state = REF_TRANSACTION_CLOSED;
1005 return 0;
1006 }
1007
1008 static int transaction_update_cmp(const void *a, const void *b)
1009 {
1010 return strcmp(((struct reftable_transaction_update *)a)->update->refname,
1011 ((struct reftable_transaction_update *)b)->update->refname);
1012 }
1013
1014 static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
1015 {
1016 struct write_transaction_table_arg *arg = cb_data;
1017 struct reftable_merged_table *mt =
1018 reftable_stack_merged_table(arg->stack);
1019 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1020 struct reftable_log_record *logs = NULL;
1021 size_t logs_nr = 0, logs_alloc = 0, i;
1022 int ret = 0;
1023
1024 QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
1025
1026 reftable_writer_set_limits(writer, ts, ts);
1027
1028 for (i = 0; i < arg->updates_nr; i++) {
1029 struct reftable_transaction_update *tx_update = &arg->updates[i];
1030 struct ref_update *u = tx_update->update;
1031
1032 /*
1033 * Write a reflog entry when updating a ref to point to
1034 * something new in either of the following cases:
1035 *
1036 * - The reference is about to be deleted. We always want to
1037 * delete the reflog in that case.
1038 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1039 * the reflog entry.
1040 * - `core.logAllRefUpdates` tells us to create the reflog for
1041 * the given ref.
1042 */
1043 if (u->flags & REF_HAVE_NEW && !(u->type & REF_ISSYMREF) && is_null_oid(&u->new_oid)) {
1044 struct reftable_log_record log = {0};
1045 struct reftable_iterator it = {0};
1046
1047 /*
1048 * When deleting refs we also delete all reflog entries
1049 * with them. While it is not strictly required to
1050 * delete reflogs together with their refs, this
1051 * matches the behaviour of the files backend.
1052 *
1053 * Unfortunately, we have no better way than to delete
1054 * all reflog entries one by one.
1055 */
1056 ret = reftable_merged_table_seek_log(mt, &it, u->refname);
1057 while (ret == 0) {
1058 struct reftable_log_record *tombstone;
1059
1060 ret = reftable_iterator_next_log(&it, &log);
1061 if (ret < 0)
1062 break;
1063 if (ret > 0 || strcmp(log.refname, u->refname)) {
1064 ret = 0;
1065 break;
1066 }
1067
1068 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1069 tombstone = &logs[logs_nr++];
1070 tombstone->refname = xstrdup(u->refname);
1071 tombstone->value_type = REFTABLE_LOG_DELETION;
1072 tombstone->update_index = log.update_index;
1073 }
1074
1075 reftable_log_record_release(&log);
1076 reftable_iterator_destroy(&it);
1077
1078 if (ret)
1079 goto done;
1080 } else if (u->flags & REF_HAVE_NEW &&
1081 (u->flags & REF_FORCE_CREATE_REFLOG ||
1082 should_write_log(&arg->refs->base, u->refname))) {
1083 struct reftable_log_record *log;
1084
1085 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1086 log = &logs[logs_nr++];
1087 memset(log, 0, sizeof(*log));
1088
1089 fill_reftable_log_record(log);
1090 log->update_index = ts;
1091 log->refname = xstrdup(u->refname);
1092 memcpy(log->value.update.new_hash, u->new_oid.hash, GIT_MAX_RAWSZ);
1093 memcpy(log->value.update.old_hash, tx_update->current_oid.hash, GIT_MAX_RAWSZ);
1094 log->value.update.message =
1095 xstrndup(u->msg, arg->refs->write_options.block_size / 2);
1096 }
1097
1098 if (u->flags & REF_LOG_ONLY)
1099 continue;
1100
1101 if (u->flags & REF_HAVE_NEW && is_null_oid(&u->new_oid)) {
1102 struct reftable_ref_record ref = {
1103 .refname = (char *)u->refname,
1104 .update_index = ts,
1105 .value_type = REFTABLE_REF_DELETION,
1106 };
1107
1108 ret = reftable_writer_add_ref(writer, &ref);
1109 if (ret < 0)
1110 goto done;
1111 } else if (u->flags & REF_HAVE_NEW) {
1112 struct reftable_ref_record ref = {0};
1113 struct object_id peeled;
1114 int peel_error;
1115
1116 ref.refname = (char *)u->refname;
1117 ref.update_index = ts;
1118
1119 peel_error = peel_object(&u->new_oid, &peeled);
1120 if (!peel_error) {
1121 ref.value_type = REFTABLE_REF_VAL2;
1122 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
1123 memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
1124 } else if (!is_null_oid(&u->new_oid)) {
1125 ref.value_type = REFTABLE_REF_VAL1;
1126 memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
1127 }
1128
1129 ret = reftable_writer_add_ref(writer, &ref);
1130 if (ret < 0)
1131 goto done;
1132 }
1133 }
1134
1135 /*
1136 * Logs are written at the end so that we do not have intermixed ref
1137 * and log blocks.
1138 */
1139 if (logs) {
1140 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1141 if (ret < 0)
1142 goto done;
1143 }
1144
1145 done:
1146 assert(ret != REFTABLE_API_ERROR);
1147 for (i = 0; i < logs_nr; i++)
1148 reftable_log_record_release(&logs[i]);
1149 free(logs);
1150 return ret;
1151 }
1152
1153 static int reftable_be_transaction_finish(struct ref_store *ref_store,
1154 struct ref_transaction *transaction,
1155 struct strbuf *err)
1156 {
1157 struct reftable_transaction_data *tx_data = transaction->backend_data;
1158 int ret = 0;
1159
1160 for (size_t i = 0; i < tx_data->args_nr; i++) {
1161 ret = reftable_addition_add(tx_data->args[i].addition,
1162 write_transaction_table, &tx_data->args[i]);
1163 if (ret < 0)
1164 goto done;
1165
1166 ret = reftable_addition_commit(tx_data->args[i].addition);
1167 if (ret < 0)
1168 goto done;
1169 }
1170
1171 done:
1172 assert(ret != REFTABLE_API_ERROR);
1173 free_transaction_data(tx_data);
1174 transaction->state = REF_TRANSACTION_CLOSED;
1175
1176 if (ret) {
1177 strbuf_addf(err, _("reftable: transaction failure: %s"),
1178 reftable_error_str(ret));
1179 return -1;
1180 }
1181 return ret;
1182 }
1183
1184 static int reftable_be_initial_transaction_commit(struct ref_store *ref_store UNUSED,
1185 struct ref_transaction *transaction,
1186 struct strbuf *err)
1187 {
1188 return ref_transaction_commit(transaction, err);
1189 }
1190
1191 static int reftable_be_pack_refs(struct ref_store *ref_store,
1192 struct pack_refs_opts *opts)
1193 {
1194 struct reftable_ref_store *refs =
1195 reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "pack_refs");
1196 struct reftable_stack *stack;
1197 int ret;
1198
1199 if (refs->err)
1200 return refs->err;
1201
1202 stack = refs->worktree_stack;
1203 if (!stack)
1204 stack = refs->main_stack;
1205
1206 if (opts->flags & PACK_REFS_AUTO)
1207 ret = reftable_stack_auto_compact(stack);
1208 else
1209 ret = reftable_stack_compact_all(stack, NULL);
1210 if (ret < 0) {
1211 ret = error(_("unable to compact stack: %s"),
1212 reftable_error_str(ret));
1213 goto out;
1214 }
1215
1216 ret = reftable_stack_clean(stack);
1217 if (ret)
1218 goto out;
1219
1220 out:
1221 return ret;
1222 }
1223
1224 struct write_create_symref_arg {
1225 struct reftable_ref_store *refs;
1226 struct reftable_stack *stack;
1227 const char *refname;
1228 const char *target;
1229 const char *logmsg;
1230 };
1231
1232 static int write_create_symref_table(struct reftable_writer *writer, void *cb_data)
1233 {
1234 struct write_create_symref_arg *create = cb_data;
1235 uint64_t ts = reftable_stack_next_update_index(create->stack);
1236 struct reftable_ref_record ref = {
1237 .refname = (char *)create->refname,
1238 .value_type = REFTABLE_REF_SYMREF,
1239 .value.symref = (char *)create->target,
1240 .update_index = ts,
1241 };
1242 struct reftable_log_record log = {0};
1243 struct object_id new_oid;
1244 struct object_id old_oid;
1245 int ret;
1246
1247 reftable_writer_set_limits(writer, ts, ts);
1248
1249 ret = reftable_writer_add_ref(writer, &ref);
1250 if (ret)
1251 return ret;
1252
1253 /*
1254 * Note that it is important to try and resolve the reference before we
1255 * write the log entry. This is because `should_write_log()` will munge
1256 * `core.logAllRefUpdates`, which is undesirable when we create a new
1257 * repository because it would be written into the config. As HEAD will
1258 * not resolve for new repositories this ordering will ensure that this
1259 * never happens.
1260 */
1261 if (!create->logmsg ||
1262 !refs_resolve_ref_unsafe(&create->refs->base, create->target,
1263 RESOLVE_REF_READING, &new_oid, NULL) ||
1264 !should_write_log(&create->refs->base, create->refname))
1265 return 0;
1266
1267 fill_reftable_log_record(&log);
1268 log.refname = xstrdup(create->refname);
1269 log.update_index = ts;
1270 log.value.update.message = xstrndup(create->logmsg,
1271 create->refs->write_options.block_size / 2);
1272 memcpy(log.value.update.new_hash, new_oid.hash, GIT_MAX_RAWSZ);
1273 if (refs_resolve_ref_unsafe(&create->refs->base, create->refname,
1274 RESOLVE_REF_READING, &old_oid, NULL))
1275 memcpy(log.value.update.old_hash, old_oid.hash, GIT_MAX_RAWSZ);
1276
1277 ret = reftable_writer_add_log(writer, &log);
1278 reftable_log_record_release(&log);
1279 return ret;
1280 }
1281
1282 static int reftable_be_create_symref(struct ref_store *ref_store,
1283 const char *refname,
1284 const char *target,
1285 const char *logmsg)
1286 {
1287 struct reftable_ref_store *refs =
1288 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_symref");
1289 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1290 struct write_create_symref_arg arg = {
1291 .refs = refs,
1292 .stack = stack,
1293 .refname = refname,
1294 .target = target,
1295 .logmsg = logmsg,
1296 };
1297 int ret;
1298
1299 ret = refs->err;
1300 if (ret < 0)
1301 goto done;
1302
1303 ret = reftable_stack_reload(stack);
1304 if (ret)
1305 goto done;
1306
1307 ret = reftable_stack_add(stack, &write_create_symref_table, &arg);
1308
1309 done:
1310 assert(ret != REFTABLE_API_ERROR);
1311 if (ret)
1312 error("unable to write symref for %s: %s", refname,
1313 reftable_error_str(ret));
1314 return ret;
1315 }
1316
1317 struct write_copy_arg {
1318 struct reftable_ref_store *refs;
1319 struct reftable_stack *stack;
1320 const char *oldname;
1321 const char *newname;
1322 const char *logmsg;
1323 int delete_old;
1324 };
1325
1326 static int write_copy_table(struct reftable_writer *writer, void *cb_data)
1327 {
1328 struct write_copy_arg *arg = cb_data;
1329 uint64_t deletion_ts, creation_ts;
1330 struct reftable_merged_table *mt = reftable_stack_merged_table(arg->stack);
1331 struct reftable_ref_record old_ref = {0}, refs[2] = {0};
1332 struct reftable_log_record old_log = {0}, *logs = NULL;
1333 struct reftable_iterator it = {0};
1334 struct string_list skip = STRING_LIST_INIT_NODUP;
1335 struct strbuf errbuf = STRBUF_INIT;
1336 size_t logs_nr = 0, logs_alloc = 0, i;
1337 int ret;
1338
1339 if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
1340 ret = error(_("refname %s not found"), arg->oldname);
1341 goto done;
1342 }
1343 if (old_ref.value_type == REFTABLE_REF_SYMREF) {
1344 ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
1345 arg->oldname);
1346 goto done;
1347 }
1348
1349 /*
1350 * There's nothing to do in case the old and new name are the same, so
1351 * we exit early in that case.
1352 */
1353 if (!strcmp(arg->oldname, arg->newname)) {
1354 ret = 0;
1355 goto done;
1356 }
1357
1358 /*
1359 * Verify that the new refname is available.
1360 */
1361 string_list_insert(&skip, arg->oldname);
1362 ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
1363 NULL, &skip, &errbuf);
1364 if (ret < 0) {
1365 error("%s", errbuf.buf);
1366 goto done;
1367 }
1368
1369 /*
1370 * When deleting the old reference we have to use two update indices:
1371 * once to delete the old ref and its reflog, and once to create the
1372 * new ref and its reflog. They need to be staged with two separate
1373 * indices because the new reflog needs to encode both the deletion of
1374 * the old branch and the creation of the new branch, and we cannot do
1375 * two changes to a reflog in a single update.
1376 */
1377 deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
1378 if (arg->delete_old)
1379 creation_ts++;
1380 reftable_writer_set_limits(writer, deletion_ts, creation_ts);
1381
1382 /*
1383 * Add the new reference. If this is a rename then we also delete the
1384 * old reference.
1385 */
1386 refs[0] = old_ref;
1387 refs[0].refname = (char *)arg->newname;
1388 refs[0].update_index = creation_ts;
1389 if (arg->delete_old) {
1390 refs[1].refname = (char *)arg->oldname;
1391 refs[1].value_type = REFTABLE_REF_DELETION;
1392 refs[1].update_index = deletion_ts;
1393 }
1394 ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
1395 if (ret < 0)
1396 goto done;
1397
1398 /*
1399 * When deleting the old branch we need to create a reflog entry on the
1400 * new branch name that indicates that the old branch has been deleted
1401 * and then recreated. This is a tad weird, but matches what the files
1402 * backend does.
1403 */
1404 if (arg->delete_old) {
1405 struct strbuf head_referent = STRBUF_INIT;
1406 struct object_id head_oid;
1407 int append_head_reflog;
1408 unsigned head_type = 0;
1409
1410 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1411 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1412 fill_reftable_log_record(&logs[logs_nr]);
1413 logs[logs_nr].refname = (char *)arg->newname;
1414 logs[logs_nr].update_index = deletion_ts;
1415 logs[logs_nr].value.update.message =
1416 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1417 memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1418 logs_nr++;
1419
1420 ret = read_ref_without_reload(arg->stack, "HEAD", &head_oid, &head_referent, &head_type);
1421 if (ret < 0)
1422 goto done;
1423 append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
1424 strbuf_release(&head_referent);
1425
1426 /*
1427 * The files backend uses `refs_delete_ref()` to delete the old
1428 * branch name, which will append a reflog entry for HEAD in
1429 * case it points to the old branch.
1430 */
1431 if (append_head_reflog) {
1432 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1433 logs[logs_nr] = logs[logs_nr - 1];
1434 logs[logs_nr].refname = "HEAD";
1435 logs_nr++;
1436 }
1437 }
1438
1439 /*
1440 * Create the reflog entry for the newly created branch.
1441 */
1442 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1443 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1444 fill_reftable_log_record(&logs[logs_nr]);
1445 logs[logs_nr].refname = (char *)arg->newname;
1446 logs[logs_nr].update_index = creation_ts;
1447 logs[logs_nr].value.update.message =
1448 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1449 memcpy(logs[logs_nr].value.update.new_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1450 logs_nr++;
1451
1452 /*
1453 * In addition to writing the reflog entry for the new branch, we also
1454 * copy over all log entries from the old reflog. Last but not least,
1455 * when renaming we also have to delete all the old reflog entries.
1456 */
1457 ret = reftable_merged_table_seek_log(mt, &it, arg->oldname);
1458 if (ret < 0)
1459 goto done;
1460
1461 while (1) {
1462 ret = reftable_iterator_next_log(&it, &old_log);
1463 if (ret < 0)
1464 goto done;
1465 if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
1466 ret = 0;
1467 break;
1468 }
1469
1470 free(old_log.refname);
1471
1472 /*
1473 * Copy over the old reflog entry with the new refname.
1474 */
1475 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1476 logs[logs_nr] = old_log;
1477 logs[logs_nr].refname = (char *)arg->newname;
1478 logs_nr++;
1479
1480 /*
1481 * Delete the old reflog entry in case we are renaming.
1482 */
1483 if (arg->delete_old) {
1484 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1485 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1486 logs[logs_nr].refname = (char *)arg->oldname;
1487 logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
1488 logs[logs_nr].update_index = old_log.update_index;
1489 logs_nr++;
1490 }
1491
1492 /*
1493 * Transfer ownership of the log record we're iterating over to
1494 * the array of log records. Otherwise, the pointers would get
1495 * free'd or reallocated by the iterator.
1496 */
1497 memset(&old_log, 0, sizeof(old_log));
1498 }
1499
1500 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1501 if (ret < 0)
1502 goto done;
1503
1504 done:
1505 assert(ret != REFTABLE_API_ERROR);
1506 reftable_iterator_destroy(&it);
1507 string_list_clear(&skip, 0);
1508 strbuf_release(&errbuf);
1509 for (i = 0; i < logs_nr; i++) {
1510 if (!strcmp(logs[i].refname, "HEAD"))
1511 continue;
1512 logs[i].refname = NULL;
1513 reftable_log_record_release(&logs[i]);
1514 }
1515 free(logs);
1516 reftable_ref_record_release(&old_ref);
1517 reftable_log_record_release(&old_log);
1518 return ret;
1519 }
1520
1521 static int reftable_be_rename_ref(struct ref_store *ref_store,
1522 const char *oldrefname,
1523 const char *newrefname,
1524 const char *logmsg)
1525 {
1526 struct reftable_ref_store *refs =
1527 reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1528 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1529 struct write_copy_arg arg = {
1530 .refs = refs,
1531 .stack = stack,
1532 .oldname = oldrefname,
1533 .newname = newrefname,
1534 .logmsg = logmsg,
1535 .delete_old = 1,
1536 };
1537 int ret;
1538
1539 ret = refs->err;
1540 if (ret < 0)
1541 goto done;
1542
1543 ret = reftable_stack_reload(stack);
1544 if (ret)
1545 goto done;
1546 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1547
1548 done:
1549 assert(ret != REFTABLE_API_ERROR);
1550 return ret;
1551 }
1552
1553 static int reftable_be_copy_ref(struct ref_store *ref_store,
1554 const char *oldrefname,
1555 const char *newrefname,
1556 const char *logmsg)
1557 {
1558 struct reftable_ref_store *refs =
1559 reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
1560 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1561 struct write_copy_arg arg = {
1562 .refs = refs,
1563 .stack = stack,
1564 .oldname = oldrefname,
1565 .newname = newrefname,
1566 .logmsg = logmsg,
1567 };
1568 int ret;
1569
1570 ret = refs->err;
1571 if (ret < 0)
1572 goto done;
1573
1574 ret = reftable_stack_reload(stack);
1575 if (ret)
1576 goto done;
1577 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1578
1579 done:
1580 assert(ret != REFTABLE_API_ERROR);
1581 return ret;
1582 }
1583
1584 struct reftable_reflog_iterator {
1585 struct ref_iterator base;
1586 struct reftable_ref_store *refs;
1587 struct reftable_iterator iter;
1588 struct reftable_log_record log;
1589 struct strbuf last_name;
1590 int err;
1591 };
1592
1593 static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
1594 {
1595 struct reftable_reflog_iterator *iter =
1596 (struct reftable_reflog_iterator *)ref_iterator;
1597
1598 while (!iter->err) {
1599 iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
1600 if (iter->err)
1601 break;
1602
1603 /*
1604 * We want the refnames that we have reflogs for, so we skip if
1605 * we've already produced this name. This could be faster by
1606 * seeking directly to reflog@update_index==0.
1607 */
1608 if (!strcmp(iter->log.refname, iter->last_name.buf))
1609 continue;
1610
1611 if (check_refname_format(iter->log.refname,
1612 REFNAME_ALLOW_ONELEVEL))
1613 continue;
1614
1615 strbuf_reset(&iter->last_name);
1616 strbuf_addstr(&iter->last_name, iter->log.refname);
1617 iter->base.refname = iter->log.refname;
1618
1619 break;
1620 }
1621
1622 if (iter->err > 0) {
1623 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
1624 return ITER_ERROR;
1625 return ITER_DONE;
1626 }
1627
1628 if (iter->err < 0) {
1629 ref_iterator_abort(ref_iterator);
1630 return ITER_ERROR;
1631 }
1632
1633 return ITER_OK;
1634 }
1635
1636 static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator,
1637 struct object_id *peeled)
1638 {
1639 BUG("reftable reflog iterator cannot be peeled");
1640 return -1;
1641 }
1642
1643 static int reftable_reflog_iterator_abort(struct ref_iterator *ref_iterator)
1644 {
1645 struct reftable_reflog_iterator *iter =
1646 (struct reftable_reflog_iterator *)ref_iterator;
1647 reftable_log_record_release(&iter->log);
1648 reftable_iterator_destroy(&iter->iter);
1649 strbuf_release(&iter->last_name);
1650 free(iter);
1651 return ITER_DONE;
1652 }
1653
1654 static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
1655 .advance = reftable_reflog_iterator_advance,
1656 .peel = reftable_reflog_iterator_peel,
1657 .abort = reftable_reflog_iterator_abort
1658 };
1659
1660 static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
1661 struct reftable_stack *stack)
1662 {
1663 struct reftable_merged_table *merged_table;
1664 struct reftable_reflog_iterator *iter;
1665 int ret;
1666
1667 iter = xcalloc(1, sizeof(*iter));
1668 base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable);
1669 strbuf_init(&iter->last_name, 0);
1670 iter->refs = refs;
1671
1672 ret = refs->err;
1673 if (ret)
1674 goto done;
1675
1676 ret = reftable_stack_reload(stack);
1677 if (ret < 0)
1678 goto done;
1679
1680 merged_table = reftable_stack_merged_table(stack);
1681
1682 ret = reftable_merged_table_seek_log(merged_table, &iter->iter, "");
1683 if (ret < 0)
1684 goto done;
1685
1686 done:
1687 iter->err = ret;
1688 return iter;
1689 }
1690
1691 static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
1692 {
1693 struct reftable_ref_store *refs =
1694 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
1695 struct reftable_reflog_iterator *main_iter, *worktree_iter;
1696
1697 main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
1698 if (!refs->worktree_stack)
1699 return &main_iter->base;
1700
1701 worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
1702
1703 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
1704 ref_iterator_select, NULL);
1705 }
1706
1707 static int yield_log_record(struct reftable_log_record *log,
1708 each_reflog_ent_fn fn,
1709 void *cb_data)
1710 {
1711 struct object_id old_oid, new_oid;
1712 const char *full_committer;
1713
1714 oidread(&old_oid, log->value.update.old_hash);
1715 oidread(&new_oid, log->value.update.new_hash);
1716
1717 /*
1718 * When both the old object ID and the new object ID are null
1719 * then this is the reflog existence marker. The caller must
1720 * not be aware of it.
1721 */
1722 if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
1723 return 0;
1724
1725 full_committer = fmt_ident(log->value.update.name, log->value.update.email,
1726 WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
1727 return fn(&old_oid, &new_oid, full_committer,
1728 log->value.update.time, log->value.update.tz_offset,
1729 log->value.update.message, cb_data);
1730 }
1731
1732 static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1733 const char *refname,
1734 each_reflog_ent_fn fn,
1735 void *cb_data)
1736 {
1737 struct reftable_ref_store *refs =
1738 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
1739 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1740 struct reftable_merged_table *mt = NULL;
1741 struct reftable_log_record log = {0};
1742 struct reftable_iterator it = {0};
1743 int ret;
1744
1745 if (refs->err < 0)
1746 return refs->err;
1747
1748 mt = reftable_stack_merged_table(stack);
1749 ret = reftable_merged_table_seek_log(mt, &it, refname);
1750 while (!ret) {
1751 ret = reftable_iterator_next_log(&it, &log);
1752 if (ret < 0)
1753 break;
1754 if (ret > 0 || strcmp(log.refname, refname)) {
1755 ret = 0;
1756 break;
1757 }
1758
1759 ret = yield_log_record(&log, fn, cb_data);
1760 if (ret)
1761 break;
1762 }
1763
1764 reftable_log_record_release(&log);
1765 reftable_iterator_destroy(&it);
1766 return ret;
1767 }
1768
1769 static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
1770 const char *refname,
1771 each_reflog_ent_fn fn,
1772 void *cb_data)
1773 {
1774 struct reftable_ref_store *refs =
1775 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
1776 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1777 struct reftable_merged_table *mt = NULL;
1778 struct reftable_log_record *logs = NULL;
1779 struct reftable_iterator it = {0};
1780 size_t logs_alloc = 0, logs_nr = 0, i;
1781 int ret;
1782
1783 if (refs->err < 0)
1784 return refs->err;
1785
1786 mt = reftable_stack_merged_table(stack);
1787 ret = reftable_merged_table_seek_log(mt, &it, refname);
1788 while (!ret) {
1789 struct reftable_log_record log = {0};
1790
1791 ret = reftable_iterator_next_log(&it, &log);
1792 if (ret < 0)
1793 goto done;
1794 if (ret > 0 || strcmp(log.refname, refname)) {
1795 reftable_log_record_release(&log);
1796 ret = 0;
1797 break;
1798 }
1799
1800 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1801 logs[logs_nr++] = log;
1802 }
1803
1804 for (i = logs_nr; i--;) {
1805 ret = yield_log_record(&logs[i], fn, cb_data);
1806 if (ret)
1807 goto done;
1808 }
1809
1810 done:
1811 reftable_iterator_destroy(&it);
1812 for (i = 0; i < logs_nr; i++)
1813 reftable_log_record_release(&logs[i]);
1814 free(logs);
1815 return ret;
1816 }
1817
1818 static int reftable_be_reflog_exists(struct ref_store *ref_store,
1819 const char *refname)
1820 {
1821 struct reftable_ref_store *refs =
1822 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1823 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1824 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
1825 struct reftable_log_record log = {0};
1826 struct reftable_iterator it = {0};
1827 int ret;
1828
1829 ret = refs->err;
1830 if (ret < 0)
1831 goto done;
1832
1833 ret = reftable_stack_reload(stack);
1834 if (ret < 0)
1835 goto done;
1836
1837 ret = reftable_merged_table_seek_log(mt, &it, refname);
1838 if (ret < 0)
1839 goto done;
1840
1841 /*
1842 * Check whether we get at least one log record for the given ref name.
1843 * If so, the reflog exists, otherwise it doesn't.
1844 */
1845 ret = reftable_iterator_next_log(&it, &log);
1846 if (ret < 0)
1847 goto done;
1848 if (ret > 0) {
1849 ret = 0;
1850 goto done;
1851 }
1852
1853 ret = strcmp(log.refname, refname) == 0;
1854
1855 done:
1856 reftable_iterator_destroy(&it);
1857 reftable_log_record_release(&log);
1858 if (ret < 0)
1859 ret = 0;
1860 return ret;
1861 }
1862
1863 struct write_reflog_existence_arg {
1864 struct reftable_ref_store *refs;
1865 const char *refname;
1866 struct reftable_stack *stack;
1867 };
1868
1869 static int write_reflog_existence_table(struct reftable_writer *writer,
1870 void *cb_data)
1871 {
1872 struct write_reflog_existence_arg *arg = cb_data;
1873 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1874 struct reftable_log_record log = {0};
1875 int ret;
1876
1877 ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
1878 if (ret <= 0)
1879 goto done;
1880
1881 reftable_writer_set_limits(writer, ts, ts);
1882
1883 /*
1884 * The existence entry has both old and new object ID set to the the
1885 * null object ID. Our iterators are aware of this and will not present
1886 * them to their callers.
1887 */
1888 log.refname = xstrdup(arg->refname);
1889 log.update_index = ts;
1890 log.value_type = REFTABLE_LOG_UPDATE;
1891 ret = reftable_writer_add_log(writer, &log);
1892
1893 done:
1894 assert(ret != REFTABLE_API_ERROR);
1895 reftable_log_record_release(&log);
1896 return ret;
1897 }
1898
1899 static int reftable_be_create_reflog(struct ref_store *ref_store,
1900 const char *refname,
1901 struct strbuf *errmsg)
1902 {
1903 struct reftable_ref_store *refs =
1904 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1905 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1906 struct write_reflog_existence_arg arg = {
1907 .refs = refs,
1908 .stack = stack,
1909 .refname = refname,
1910 };
1911 int ret;
1912
1913 ret = refs->err;
1914 if (ret < 0)
1915 goto done;
1916
1917 ret = reftable_stack_reload(stack);
1918 if (ret)
1919 goto done;
1920
1921 ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
1922
1923 done:
1924 return ret;
1925 }
1926
1927 struct write_reflog_delete_arg {
1928 struct reftable_stack *stack;
1929 const char *refname;
1930 };
1931
1932 static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
1933 {
1934 struct write_reflog_delete_arg *arg = cb_data;
1935 struct reftable_merged_table *mt =
1936 reftable_stack_merged_table(arg->stack);
1937 struct reftable_log_record log = {0}, tombstone = {0};
1938 struct reftable_iterator it = {0};
1939 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1940 int ret;
1941
1942 reftable_writer_set_limits(writer, ts, ts);
1943
1944 /*
1945 * In order to delete a table we need to delete all reflog entries one
1946 * by one. This is inefficient, but the reftable format does not have a
1947 * better marker right now.
1948 */
1949 ret = reftable_merged_table_seek_log(mt, &it, arg->refname);
1950 while (ret == 0) {
1951 ret = reftable_iterator_next_log(&it, &log);
1952 if (ret < 0)
1953 break;
1954 if (ret > 0 || strcmp(log.refname, arg->refname)) {
1955 ret = 0;
1956 break;
1957 }
1958
1959 tombstone.refname = (char *)arg->refname;
1960 tombstone.value_type = REFTABLE_LOG_DELETION;
1961 tombstone.update_index = log.update_index;
1962
1963 ret = reftable_writer_add_log(writer, &tombstone);
1964 }
1965
1966 reftable_log_record_release(&log);
1967 reftable_iterator_destroy(&it);
1968 return ret;
1969 }
1970
1971 static int reftable_be_delete_reflog(struct ref_store *ref_store,
1972 const char *refname)
1973 {
1974 struct reftable_ref_store *refs =
1975 reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
1976 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1977 struct write_reflog_delete_arg arg = {
1978 .stack = stack,
1979 .refname = refname,
1980 };
1981 int ret;
1982
1983 ret = reftable_stack_reload(stack);
1984 if (ret)
1985 return ret;
1986 ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
1987
1988 assert(ret != REFTABLE_API_ERROR);
1989 return ret;
1990 }
1991
1992 struct reflog_expiry_arg {
1993 struct reftable_stack *stack;
1994 struct reftable_log_record *records;
1995 struct object_id update_oid;
1996 const char *refname;
1997 size_t len;
1998 };
1999
2000 static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
2001 {
2002 struct reflog_expiry_arg *arg = cb_data;
2003 uint64_t ts = reftable_stack_next_update_index(arg->stack);
2004 uint64_t live_records = 0;
2005 size_t i;
2006 int ret;
2007
2008 for (i = 0; i < arg->len; i++)
2009 if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
2010 live_records++;
2011
2012 reftable_writer_set_limits(writer, ts, ts);
2013
2014 if (!is_null_oid(&arg->update_oid)) {
2015 struct reftable_ref_record ref = {0};
2016 struct object_id peeled;
2017
2018 ref.refname = (char *)arg->refname;
2019 ref.update_index = ts;
2020
2021 if (!peel_object(&arg->update_oid, &peeled)) {
2022 ref.value_type = REFTABLE_REF_VAL2;
2023 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
2024 memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
2025 } else {
2026 ref.value_type = REFTABLE_REF_VAL1;
2027 memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
2028 }
2029
2030 ret = reftable_writer_add_ref(writer, &ref);
2031 if (ret < 0)
2032 return ret;
2033 }
2034
2035 /*
2036 * When there are no more entries left in the reflog we empty it
2037 * completely, but write a placeholder reflog entry that indicates that
2038 * the reflog still exists.
2039 */
2040 if (!live_records) {
2041 struct reftable_log_record log = {
2042 .refname = (char *)arg->refname,
2043 .value_type = REFTABLE_LOG_UPDATE,
2044 .update_index = ts,
2045 };
2046
2047 ret = reftable_writer_add_log(writer, &log);
2048 if (ret)
2049 return ret;
2050 }
2051
2052 for (i = 0; i < arg->len; i++) {
2053 ret = reftable_writer_add_log(writer, &arg->records[i]);
2054 if (ret)
2055 return ret;
2056 }
2057
2058 return 0;
2059 }
2060
2061 static int reftable_be_reflog_expire(struct ref_store *ref_store,
2062 const char *refname,
2063 unsigned int flags,
2064 reflog_expiry_prepare_fn prepare_fn,
2065 reflog_expiry_should_prune_fn should_prune_fn,
2066 reflog_expiry_cleanup_fn cleanup_fn,
2067 void *policy_cb_data)
2068 {
2069 /*
2070 * For log expiry, we write tombstones for every single reflog entry
2071 * that is to be expired. This means that the entries are still
2072 * retrievable by delving into the stack, and expiring entries
2073 * paradoxically takes extra memory. This memory is only reclaimed when
2074 * compacting the reftable stack.
2075 *
2076 * It would be better if the refs backend supported an API that sets a
2077 * criterion for all refs, passing the criterion to pack_refs().
2078 *
2079 * On the plus side, because we do the expiration per ref, we can easily
2080 * insert the reflog existence dummies.
2081 */
2082 struct reftable_ref_store *refs =
2083 reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2084 struct reftable_stack *stack = stack_for(refs, refname, &refname);
2085 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
2086 struct reftable_log_record *logs = NULL;
2087 struct reftable_log_record *rewritten = NULL;
2088 struct reftable_ref_record ref_record = {0};
2089 struct reftable_iterator it = {0};
2090 struct reftable_addition *add = NULL;
2091 struct reflog_expiry_arg arg = {0};
2092 struct object_id oid = {0};
2093 uint8_t *last_hash = NULL;
2094 size_t logs_nr = 0, logs_alloc = 0, i;
2095 int ret;
2096
2097 if (refs->err < 0)
2098 return refs->err;
2099
2100 ret = reftable_stack_reload(stack);
2101 if (ret < 0)
2102 goto done;
2103
2104 ret = reftable_merged_table_seek_log(mt, &it, refname);
2105 if (ret < 0)
2106 goto done;
2107
2108 ret = reftable_stack_new_addition(&add, stack);
2109 if (ret < 0)
2110 goto done;
2111
2112 ret = reftable_stack_read_ref(stack, refname, &ref_record);
2113 if (ret < 0)
2114 goto done;
2115 if (reftable_ref_record_val1(&ref_record))
2116 oidread(&oid, reftable_ref_record_val1(&ref_record));
2117 prepare_fn(refname, &oid, policy_cb_data);
2118
2119 while (1) {
2120 struct reftable_log_record log = {0};
2121 struct object_id old_oid, new_oid;
2122
2123 ret = reftable_iterator_next_log(&it, &log);
2124 if (ret < 0)
2125 goto done;
2126 if (ret > 0 || strcmp(log.refname, refname)) {
2127 reftable_log_record_release(&log);
2128 break;
2129 }
2130
2131 oidread(&old_oid, log.value.update.old_hash);
2132 oidread(&new_oid, log.value.update.new_hash);
2133
2134 /*
2135 * Skip over the reflog existence marker. We will add it back
2136 * in when there are no live reflog records.
2137 */
2138 if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
2139 reftable_log_record_release(&log);
2140 continue;
2141 }
2142
2143 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2144 logs[logs_nr++] = log;
2145 }
2146
2147 /*
2148 * We need to rewrite all reflog entries according to the pruning
2149 * callback function:
2150 *
2151 * - If a reflog entry shall be pruned we mark the record for
2152 * deletion.
2153 *
2154 * - Otherwise we may have to rewrite the chain of reflog entries so
2155 * that gaps created by just-deleted records get backfilled.
2156 */
2157 CALLOC_ARRAY(rewritten, logs_nr);
2158 for (i = logs_nr; i--;) {
2159 struct reftable_log_record *dest = &rewritten[i];
2160 struct object_id old_oid, new_oid;
2161
2162 *dest = logs[i];
2163 oidread(&old_oid, logs[i].value.update.old_hash);
2164 oidread(&new_oid, logs[i].value.update.new_hash);
2165
2166 if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
2167 (timestamp_t)logs[i].value.update.time,
2168 logs[i].value.update.tz_offset,
2169 logs[i].value.update.message,
2170 policy_cb_data)) {
2171 dest->value_type = REFTABLE_LOG_DELETION;
2172 } else {
2173 if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
2174 memcpy(dest->value.update.old_hash, last_hash, GIT_MAX_RAWSZ);
2175 last_hash = logs[i].value.update.new_hash;
2176 }
2177 }
2178
2179 if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
2180 reftable_ref_record_val1(&ref_record))
2181 oidread(&arg.update_oid, last_hash);
2182
2183 arg.records = rewritten;
2184 arg.len = logs_nr;
2185 arg.stack = stack,
2186 arg.refname = refname,
2187
2188 ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
2189 if (ret < 0)
2190 goto done;
2191
2192 /*
2193 * Future improvement: we could skip writing records that were
2194 * not changed.
2195 */
2196 if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
2197 ret = reftable_addition_commit(add);
2198
2199 done:
2200 if (add)
2201 cleanup_fn(policy_cb_data);
2202 assert(ret != REFTABLE_API_ERROR);
2203
2204 reftable_ref_record_release(&ref_record);
2205 reftable_iterator_destroy(&it);
2206 reftable_addition_destroy(add);
2207 for (i = 0; i < logs_nr; i++)
2208 reftable_log_record_release(&logs[i]);
2209 free(logs);
2210 free(rewritten);
2211 return ret;
2212 }
2213
2214 struct ref_storage_be refs_be_reftable = {
2215 .name = "reftable",
2216 .init = reftable_be_init,
2217 .init_db = reftable_be_init_db,
2218 .transaction_prepare = reftable_be_transaction_prepare,
2219 .transaction_finish = reftable_be_transaction_finish,
2220 .transaction_abort = reftable_be_transaction_abort,
2221 .initial_transaction_commit = reftable_be_initial_transaction_commit,
2222
2223 .pack_refs = reftable_be_pack_refs,
2224 .create_symref = reftable_be_create_symref,
2225 .rename_ref = reftable_be_rename_ref,
2226 .copy_ref = reftable_be_copy_ref,
2227
2228 .iterator_begin = reftable_be_iterator_begin,
2229 .read_raw_ref = reftable_be_read_raw_ref,
2230 .read_symbolic_ref = reftable_be_read_symbolic_ref,
2231
2232 .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
2233 .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
2234 .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
2235 .reflog_exists = reftable_be_reflog_exists,
2236 .create_reflog = reftable_be_create_reflog,
2237 .delete_reflog = reftable_be_delete_reflog,
2238 .reflog_expire = reftable_be_reflog_expire,
2239 };