]> git.ipfire.org Git - thirdparty/git.git/blob - refs/reftable-backend.c
t1016: local VAR="VAL" fix
[thirdparty/git.git] / refs / reftable-backend.c
1 #include "../git-compat-util.h"
2 #include "../abspath.h"
3 #include "../chdir-notify.h"
4 #include "../environment.h"
5 #include "../gettext.h"
6 #include "../hash.h"
7 #include "../hex.h"
8 #include "../iterator.h"
9 #include "../ident.h"
10 #include "../lockfile.h"
11 #include "../object.h"
12 #include "../path.h"
13 #include "../refs.h"
14 #include "../reftable/reftable-stack.h"
15 #include "../reftable/reftable-record.h"
16 #include "../reftable/reftable-error.h"
17 #include "../reftable/reftable-iterator.h"
18 #include "../reftable/reftable-merged.h"
19 #include "../setup.h"
20 #include "../strmap.h"
21 #include "refs-internal.h"
22
23 /*
24 * Used as a flag in ref_update::flags when the ref_update was via an
25 * update to HEAD.
26 */
27 #define REF_UPDATE_VIA_HEAD (1 << 8)
28
29 struct reftable_ref_store {
30 struct ref_store base;
31
32 /*
33 * The main stack refers to the common dir and thus contains common
34 * refs as well as refs of the main repository.
35 */
36 struct reftable_stack *main_stack;
37 /*
38 * The worktree stack refers to the gitdir in case the refdb is opened
39 * via a worktree. It thus contains the per-worktree refs.
40 */
41 struct reftable_stack *worktree_stack;
42 /*
43 * Map of worktree stacks by their respective worktree names. The map
44 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
45 */
46 struct strmap worktree_stacks;
47 struct reftable_write_options write_options;
48
49 unsigned int store_flags;
50 int err;
51 };
52
53 /*
54 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
55 * reftable_ref_store. required_flags is compared with ref_store's store_flags
56 * to ensure the ref_store has all required capabilities. "caller" is used in
57 * any necessary error messages.
58 */
59 static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
60 unsigned int required_flags,
61 const char *caller)
62 {
63 struct reftable_ref_store *refs;
64
65 if (ref_store->be != &refs_be_reftable)
66 BUG("ref_store is type \"%s\" not \"reftables\" in %s",
67 ref_store->be->name, caller);
68
69 refs = (struct reftable_ref_store *)ref_store;
70
71 if ((refs->store_flags & required_flags) != required_flags)
72 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
73 caller, required_flags, refs->store_flags);
74
75 return refs;
76 }
77
78 /*
79 * Some refs are global to the repository (refs/heads/{*}), while others are
80 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
81 * multiple separate databases (ie. multiple reftable/ directories), one for
82 * the shared refs, one for the current worktree refs, and one for each
83 * additional worktree. For reading, we merge the view of both the shared and
84 * the current worktree's refs, when necessary.
85 *
86 * This function also optionally assigns the rewritten reference name that is
87 * local to the stack. This translation is required when using worktree refs
88 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
89 * those references in their normalized form.
90 */
91 static struct reftable_stack *stack_for(struct reftable_ref_store *store,
92 const char *refname,
93 const char **rewritten_ref)
94 {
95 const char *wtname;
96 int wtname_len;
97
98 if (!refname)
99 return store->main_stack;
100
101 switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
102 case REF_WORKTREE_OTHER: {
103 static struct strbuf wtname_buf = STRBUF_INIT;
104 struct strbuf wt_dir = STRBUF_INIT;
105 struct reftable_stack *stack;
106
107 /*
108 * We're using a static buffer here so that we don't need to
109 * allocate the worktree name whenever we look up a reference.
110 * This could be avoided if the strmap interface knew how to
111 * handle keys with a length.
112 */
113 strbuf_reset(&wtname_buf);
114 strbuf_add(&wtname_buf, wtname, wtname_len);
115
116 /*
117 * There is an edge case here: when the worktree references the
118 * current worktree, then we set up the stack once via
119 * `worktree_stacks` and once via `worktree_stack`. This is
120 * wasteful, but in the reading case it shouldn't matter. And
121 * in the writing case we would notice that the stack is locked
122 * already and error out when trying to write a reference via
123 * both stacks.
124 */
125 stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
126 if (!stack) {
127 strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
128 store->base.repo->commondir, wtname_buf.buf);
129
130 store->err = reftable_new_stack(&stack, wt_dir.buf,
131 store->write_options);
132 assert(store->err != REFTABLE_API_ERROR);
133 strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
134 }
135
136 strbuf_release(&wt_dir);
137 return stack;
138 }
139 case REF_WORKTREE_CURRENT:
140 /*
141 * If there is no worktree stack then we're currently in the
142 * main worktree. We thus return the main stack in that case.
143 */
144 if (!store->worktree_stack)
145 return store->main_stack;
146 return store->worktree_stack;
147 case REF_WORKTREE_MAIN:
148 case REF_WORKTREE_SHARED:
149 return store->main_stack;
150 default:
151 BUG("unhandled worktree reference type");
152 }
153 }
154
155 static int should_write_log(struct ref_store *refs, const char *refname)
156 {
157 if (log_all_ref_updates == LOG_REFS_UNSET)
158 log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
159
160 switch (log_all_ref_updates) {
161 case LOG_REFS_NONE:
162 return refs_reflog_exists(refs, refname);
163 case LOG_REFS_ALWAYS:
164 return 1;
165 case LOG_REFS_NORMAL:
166 if (should_autocreate_reflog(refname))
167 return 1;
168 return refs_reflog_exists(refs, refname);
169 default:
170 BUG("unhandled core.logAllRefUpdates value %d", log_all_ref_updates);
171 }
172 }
173
174 static void fill_reftable_log_record(struct reftable_log_record *log)
175 {
176 const char *info = git_committer_info(0);
177 struct ident_split split = {0};
178 int sign = 1;
179
180 if (split_ident_line(&split, info, strlen(info)))
181 BUG("failed splitting committer info");
182
183 reftable_log_record_release(log);
184 log->value_type = REFTABLE_LOG_UPDATE;
185 log->value.update.name =
186 xstrndup(split.name_begin, split.name_end - split.name_begin);
187 log->value.update.email =
188 xstrndup(split.mail_begin, split.mail_end - split.mail_begin);
189 log->value.update.time = atol(split.date_begin);
190 if (*split.tz_begin == '-') {
191 sign = -1;
192 split.tz_begin++;
193 }
194 if (*split.tz_begin == '+') {
195 sign = 1;
196 split.tz_begin++;
197 }
198
199 log->value.update.tz_offset = sign * atoi(split.tz_begin);
200 }
201
202 static int read_ref_without_reload(struct reftable_stack *stack,
203 const char *refname,
204 struct object_id *oid,
205 struct strbuf *referent,
206 unsigned int *type)
207 {
208 struct reftable_ref_record ref = {0};
209 int ret;
210
211 ret = reftable_stack_read_ref(stack, refname, &ref);
212 if (ret)
213 goto done;
214
215 if (ref.value_type == REFTABLE_REF_SYMREF) {
216 strbuf_reset(referent);
217 strbuf_addstr(referent, ref.value.symref);
218 *type |= REF_ISSYMREF;
219 } else if (reftable_ref_record_val1(&ref)) {
220 oidread(oid, reftable_ref_record_val1(&ref));
221 } else {
222 /* We got a tombstone, which should not happen. */
223 BUG("unhandled reference value type %d", ref.value_type);
224 }
225
226 done:
227 assert(ret != REFTABLE_API_ERROR);
228 reftable_ref_record_release(&ref);
229 return ret;
230 }
231
232 static struct ref_store *reftable_be_init(struct repository *repo,
233 const char *gitdir,
234 unsigned int store_flags)
235 {
236 struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
237 struct strbuf path = STRBUF_INIT;
238 int is_worktree;
239 mode_t mask;
240
241 mask = umask(0);
242 umask(mask);
243
244 base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
245 strmap_init(&refs->worktree_stacks);
246 refs->store_flags = store_flags;
247 refs->write_options.block_size = 4096;
248 refs->write_options.hash_id = repo->hash_algo->format_id;
249 refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
250
251 /*
252 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
253 * This stack contains both the shared and the main worktree refs.
254 *
255 * Note that we don't try to resolve the path in case we have a
256 * worktree because `get_common_dir_noenv()` already does it for us.
257 */
258 is_worktree = get_common_dir_noenv(&path, gitdir);
259 if (!is_worktree) {
260 strbuf_reset(&path);
261 strbuf_realpath(&path, gitdir, 0);
262 }
263 strbuf_addstr(&path, "/reftable");
264 refs->err = reftable_new_stack(&refs->main_stack, path.buf,
265 refs->write_options);
266 if (refs->err)
267 goto done;
268
269 /*
270 * If we're in a worktree we also need to set up the worktree reftable
271 * stack that is contained in the per-worktree GIT_DIR.
272 *
273 * Ideally, we would also add the stack to our worktree stack map. But
274 * we have no way to figure out the worktree name here and thus can't
275 * do it efficiently.
276 */
277 if (is_worktree) {
278 strbuf_reset(&path);
279 strbuf_addf(&path, "%s/reftable", gitdir);
280
281 refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
282 refs->write_options);
283 if (refs->err)
284 goto done;
285 }
286
287 chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
288
289 done:
290 assert(refs->err != REFTABLE_API_ERROR);
291 strbuf_release(&path);
292 return &refs->base;
293 }
294
295 static int reftable_be_init_db(struct ref_store *ref_store,
296 int flags UNUSED,
297 struct strbuf *err UNUSED)
298 {
299 struct reftable_ref_store *refs =
300 reftable_be_downcast(ref_store, REF_STORE_WRITE, "init_db");
301 struct strbuf sb = STRBUF_INIT;
302
303 strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
304 safe_create_dir(sb.buf, 1);
305 strbuf_reset(&sb);
306
307 strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
308 write_file(sb.buf, "ref: refs/heads/.invalid");
309 adjust_shared_perm(sb.buf);
310 strbuf_reset(&sb);
311
312 strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
313 safe_create_dir(sb.buf, 1);
314 strbuf_reset(&sb);
315
316 strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
317 write_file(sb.buf, "this repository uses the reftable format");
318 adjust_shared_perm(sb.buf);
319
320 strbuf_release(&sb);
321 return 0;
322 }
323
324 struct reftable_ref_iterator {
325 struct ref_iterator base;
326 struct reftable_ref_store *refs;
327 struct reftable_iterator iter;
328 struct reftable_ref_record ref;
329 struct object_id oid;
330
331 const char *prefix;
332 size_t prefix_len;
333 unsigned int flags;
334 int err;
335 };
336
337 static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
338 {
339 struct reftable_ref_iterator *iter =
340 (struct reftable_ref_iterator *)ref_iterator;
341 struct reftable_ref_store *refs = iter->refs;
342
343 while (!iter->err) {
344 int flags = 0;
345
346 iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
347 if (iter->err)
348 break;
349
350 /*
351 * The files backend only lists references contained in "refs/" unless
352 * the root refs are to be included. We emulate the same behaviour here.
353 */
354 if (!starts_with(iter->ref.refname, "refs/") &&
355 !(iter->flags & DO_FOR_EACH_INCLUDE_ROOT_REFS &&
356 (is_pseudoref(&iter->refs->base, iter->ref.refname) ||
357 is_headref(&iter->refs->base, iter->ref.refname)))) {
358 continue;
359 }
360
361 if (iter->prefix_len &&
362 strncmp(iter->prefix, iter->ref.refname, iter->prefix_len)) {
363 iter->err = 1;
364 break;
365 }
366
367 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
368 parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
369 REF_WORKTREE_CURRENT)
370 continue;
371
372 switch (iter->ref.value_type) {
373 case REFTABLE_REF_VAL1:
374 oidread(&iter->oid, iter->ref.value.val1);
375 break;
376 case REFTABLE_REF_VAL2:
377 oidread(&iter->oid, iter->ref.value.val2.value);
378 break;
379 case REFTABLE_REF_SYMREF:
380 if (!refs_resolve_ref_unsafe(&iter->refs->base, iter->ref.refname,
381 RESOLVE_REF_READING, &iter->oid, &flags))
382 oidclr(&iter->oid);
383 break;
384 default:
385 BUG("unhandled reference value type %d", iter->ref.value_type);
386 }
387
388 if (is_null_oid(&iter->oid))
389 flags |= REF_ISBROKEN;
390
391 if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
392 if (!refname_is_safe(iter->ref.refname))
393 die(_("refname is dangerous: %s"), iter->ref.refname);
394 oidclr(&iter->oid);
395 flags |= REF_BAD_NAME | REF_ISBROKEN;
396 }
397
398 if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
399 flags & REF_ISSYMREF &&
400 flags & REF_ISBROKEN)
401 continue;
402
403 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
404 !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
405 &iter->oid, flags))
406 continue;
407
408 iter->base.refname = iter->ref.refname;
409 iter->base.oid = &iter->oid;
410 iter->base.flags = flags;
411
412 break;
413 }
414
415 if (iter->err > 0) {
416 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
417 return ITER_ERROR;
418 return ITER_DONE;
419 }
420
421 if (iter->err < 0) {
422 ref_iterator_abort(ref_iterator);
423 return ITER_ERROR;
424 }
425
426 return ITER_OK;
427 }
428
429 static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
430 struct object_id *peeled)
431 {
432 struct reftable_ref_iterator *iter =
433 (struct reftable_ref_iterator *)ref_iterator;
434
435 if (iter->ref.value_type == REFTABLE_REF_VAL2) {
436 oidread(peeled, iter->ref.value.val2.target_value);
437 return 0;
438 }
439
440 return -1;
441 }
442
443 static int reftable_ref_iterator_abort(struct ref_iterator *ref_iterator)
444 {
445 struct reftable_ref_iterator *iter =
446 (struct reftable_ref_iterator *)ref_iterator;
447 reftable_ref_record_release(&iter->ref);
448 reftable_iterator_destroy(&iter->iter);
449 free(iter);
450 return ITER_DONE;
451 }
452
453 static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
454 .advance = reftable_ref_iterator_advance,
455 .peel = reftable_ref_iterator_peel,
456 .abort = reftable_ref_iterator_abort
457 };
458
459 static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
460 struct reftable_stack *stack,
461 const char *prefix,
462 int flags)
463 {
464 struct reftable_merged_table *merged_table;
465 struct reftable_ref_iterator *iter;
466 int ret;
467
468 iter = xcalloc(1, sizeof(*iter));
469 base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
470 iter->prefix = prefix;
471 iter->prefix_len = prefix ? strlen(prefix) : 0;
472 iter->base.oid = &iter->oid;
473 iter->flags = flags;
474 iter->refs = refs;
475
476 ret = refs->err;
477 if (ret)
478 goto done;
479
480 ret = reftable_stack_reload(stack);
481 if (ret)
482 goto done;
483
484 merged_table = reftable_stack_merged_table(stack);
485
486 ret = reftable_merged_table_seek_ref(merged_table, &iter->iter, prefix);
487 if (ret)
488 goto done;
489
490 done:
491 iter->err = ret;
492 return iter;
493 }
494
495 static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
496 const char *prefix,
497 const char **exclude_patterns,
498 unsigned int flags)
499 {
500 struct reftable_ref_iterator *main_iter, *worktree_iter;
501 struct reftable_ref_store *refs;
502 unsigned int required_flags = REF_STORE_READ;
503
504 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
505 required_flags |= REF_STORE_ODB;
506 refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
507
508 main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix, flags);
509
510 /*
511 * The worktree stack is only set when we're in an actual worktree
512 * right now. If we aren't, then we return the common reftable
513 * iterator, only.
514 */
515 if (!refs->worktree_stack)
516 return &main_iter->base;
517
518 /*
519 * Otherwise we merge both the common and the per-worktree refs into a
520 * single iterator.
521 */
522 worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix, flags);
523 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
524 ref_iterator_select, NULL);
525 }
526
527 static int reftable_be_read_raw_ref(struct ref_store *ref_store,
528 const char *refname,
529 struct object_id *oid,
530 struct strbuf *referent,
531 unsigned int *type,
532 int *failure_errno)
533 {
534 struct reftable_ref_store *refs =
535 reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
536 struct reftable_stack *stack = stack_for(refs, refname, &refname);
537 int ret;
538
539 if (refs->err < 0)
540 return refs->err;
541
542 ret = reftable_stack_reload(stack);
543 if (ret)
544 return ret;
545
546 ret = read_ref_without_reload(stack, refname, oid, referent, type);
547 if (ret < 0)
548 return ret;
549 if (ret > 0) {
550 *failure_errno = ENOENT;
551 return -1;
552 }
553
554 return 0;
555 }
556
557 static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
558 const char *refname,
559 struct strbuf *referent)
560 {
561 struct reftable_ref_store *refs =
562 reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
563 struct reftable_stack *stack = stack_for(refs, refname, &refname);
564 struct reftable_ref_record ref = {0};
565 int ret;
566
567 ret = reftable_stack_reload(stack);
568 if (ret)
569 return ret;
570
571 ret = reftable_stack_read_ref(stack, refname, &ref);
572 if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
573 strbuf_addstr(referent, ref.value.symref);
574 else
575 ret = -1;
576
577 reftable_ref_record_release(&ref);
578 return ret;
579 }
580
581 /*
582 * Return the refname under which update was originally requested.
583 */
584 static const char *original_update_refname(struct ref_update *update)
585 {
586 while (update->parent_update)
587 update = update->parent_update;
588 return update->refname;
589 }
590
591 struct reftable_transaction_update {
592 struct ref_update *update;
593 struct object_id current_oid;
594 };
595
596 struct write_transaction_table_arg {
597 struct reftable_ref_store *refs;
598 struct reftable_stack *stack;
599 struct reftable_addition *addition;
600 struct reftable_transaction_update *updates;
601 size_t updates_nr;
602 size_t updates_alloc;
603 size_t updates_expected;
604 };
605
606 struct reftable_transaction_data {
607 struct write_transaction_table_arg *args;
608 size_t args_nr, args_alloc;
609 };
610
611 static void free_transaction_data(struct reftable_transaction_data *tx_data)
612 {
613 if (!tx_data)
614 return;
615 for (size_t i = 0; i < tx_data->args_nr; i++) {
616 reftable_addition_destroy(tx_data->args[i].addition);
617 free(tx_data->args[i].updates);
618 }
619 free(tx_data->args);
620 free(tx_data);
621 }
622
623 /*
624 * Prepare transaction update for the given reference update. This will cause
625 * us to lock the corresponding reftable stack for concurrent modification.
626 */
627 static int prepare_transaction_update(struct write_transaction_table_arg **out,
628 struct reftable_ref_store *refs,
629 struct reftable_transaction_data *tx_data,
630 struct ref_update *update,
631 struct strbuf *err)
632 {
633 struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
634 struct write_transaction_table_arg *arg = NULL;
635 size_t i;
636 int ret;
637
638 /*
639 * Search for a preexisting stack update. If there is one then we add
640 * the update to it, otherwise we set up a new stack update.
641 */
642 for (i = 0; !arg && i < tx_data->args_nr; i++)
643 if (tx_data->args[i].stack == stack)
644 arg = &tx_data->args[i];
645
646 if (!arg) {
647 struct reftable_addition *addition;
648
649 ret = reftable_stack_reload(stack);
650 if (ret)
651 return ret;
652
653 ret = reftable_stack_new_addition(&addition, stack);
654 if (ret) {
655 if (ret == REFTABLE_LOCK_ERROR)
656 strbuf_addstr(err, "cannot lock references");
657 return ret;
658 }
659
660 ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
661 tx_data->args_alloc);
662 arg = &tx_data->args[tx_data->args_nr++];
663 arg->refs = refs;
664 arg->stack = stack;
665 arg->addition = addition;
666 arg->updates = NULL;
667 arg->updates_nr = 0;
668 arg->updates_alloc = 0;
669 arg->updates_expected = 0;
670 }
671
672 arg->updates_expected++;
673
674 if (out)
675 *out = arg;
676
677 return 0;
678 }
679
680 /*
681 * Queue a reference update for the correct stack. We potentially need to
682 * handle multiple stack updates in a single transaction when it spans across
683 * multiple worktrees.
684 */
685 static int queue_transaction_update(struct reftable_ref_store *refs,
686 struct reftable_transaction_data *tx_data,
687 struct ref_update *update,
688 struct object_id *current_oid,
689 struct strbuf *err)
690 {
691 struct write_transaction_table_arg *arg = NULL;
692 int ret;
693
694 if (update->backend_data)
695 BUG("reference update queued more than once");
696
697 ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
698 if (ret < 0)
699 return ret;
700
701 ALLOC_GROW(arg->updates, arg->updates_nr + 1,
702 arg->updates_alloc);
703 arg->updates[arg->updates_nr].update = update;
704 oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
705 update->backend_data = &arg->updates[arg->updates_nr++];
706
707 return 0;
708 }
709
710 static int reftable_be_transaction_prepare(struct ref_store *ref_store,
711 struct ref_transaction *transaction,
712 struct strbuf *err)
713 {
714 struct reftable_ref_store *refs =
715 reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
716 struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
717 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
718 struct reftable_transaction_data *tx_data = NULL;
719 struct object_id head_oid;
720 unsigned int head_type = 0;
721 size_t i;
722 int ret;
723
724 ret = refs->err;
725 if (ret < 0)
726 goto done;
727
728 tx_data = xcalloc(1, sizeof(*tx_data));
729
730 /*
731 * Preprocess all updates. For one we check that there are no duplicate
732 * reference updates in this transaction. Second, we lock all stacks
733 * that will be modified during the transaction.
734 */
735 for (i = 0; i < transaction->nr; i++) {
736 ret = prepare_transaction_update(NULL, refs, tx_data,
737 transaction->updates[i], err);
738 if (ret)
739 goto done;
740
741 string_list_append(&affected_refnames,
742 transaction->updates[i]->refname);
743 }
744
745 /*
746 * Now that we have counted updates per stack we can preallocate their
747 * arrays. This avoids having to reallocate many times.
748 */
749 for (i = 0; i < tx_data->args_nr; i++) {
750 CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
751 tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
752 }
753
754 /*
755 * Fail if a refname appears more than once in the transaction.
756 * This code is taken from the files backend and is a good candidate to
757 * be moved into the generic layer.
758 */
759 string_list_sort(&affected_refnames);
760 if (ref_update_reject_duplicates(&affected_refnames, err)) {
761 ret = TRANSACTION_GENERIC_ERROR;
762 goto done;
763 }
764
765 ret = read_ref_without_reload(stack_for(refs, "HEAD", NULL), "HEAD", &head_oid,
766 &head_referent, &head_type);
767 if (ret < 0)
768 goto done;
769 ret = 0;
770
771 for (i = 0; i < transaction->nr; i++) {
772 struct ref_update *u = transaction->updates[i];
773 struct object_id current_oid = {0};
774 struct reftable_stack *stack;
775 const char *rewritten_ref;
776
777 stack = stack_for(refs, u->refname, &rewritten_ref);
778
779 /* Verify that the new object ID is valid. */
780 if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
781 !(u->flags & REF_SKIP_OID_VERIFICATION) &&
782 !(u->flags & REF_LOG_ONLY)) {
783 struct object *o = parse_object(refs->base.repo, &u->new_oid);
784 if (!o) {
785 strbuf_addf(err,
786 _("trying to write ref '%s' with nonexistent object %s"),
787 u->refname, oid_to_hex(&u->new_oid));
788 ret = -1;
789 goto done;
790 }
791
792 if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
793 strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
794 oid_to_hex(&u->new_oid), u->refname);
795 ret = -1;
796 goto done;
797 }
798 }
799
800 /*
801 * When we update the reference that HEAD points to we enqueue
802 * a second log-only update for HEAD so that its reflog is
803 * updated accordingly.
804 */
805 if (head_type == REF_ISSYMREF &&
806 !(u->flags & REF_LOG_ONLY) &&
807 !(u->flags & REF_UPDATE_VIA_HEAD) &&
808 !strcmp(rewritten_ref, head_referent.buf)) {
809 struct ref_update *new_update;
810
811 /*
812 * First make sure that HEAD is not already in the
813 * transaction. This check is O(lg N) in the transaction
814 * size, but it happens at most once per transaction.
815 */
816 if (string_list_has_string(&affected_refnames, "HEAD")) {
817 /* An entry already existed */
818 strbuf_addf(err,
819 _("multiple updates for 'HEAD' (including one "
820 "via its referent '%s') are not allowed"),
821 u->refname);
822 ret = TRANSACTION_NAME_CONFLICT;
823 goto done;
824 }
825
826 new_update = ref_transaction_add_update(
827 transaction, "HEAD",
828 u->flags | REF_LOG_ONLY | REF_NO_DEREF,
829 &u->new_oid, &u->old_oid, u->msg);
830 string_list_insert(&affected_refnames, new_update->refname);
831 }
832
833 ret = read_ref_without_reload(stack, rewritten_ref,
834 &current_oid, &referent, &u->type);
835 if (ret < 0)
836 goto done;
837 if (ret > 0 && (!(u->flags & REF_HAVE_OLD) || is_null_oid(&u->old_oid))) {
838 /*
839 * The reference does not exist, and we either have no
840 * old object ID or expect the reference to not exist.
841 * We can thus skip below safety checks as well as the
842 * symref splitting. But we do want to verify that
843 * there is no conflicting reference here so that we
844 * can output a proper error message instead of failing
845 * at a later point.
846 */
847 ret = refs_verify_refname_available(ref_store, u->refname,
848 &affected_refnames, NULL, err);
849 if (ret < 0)
850 goto done;
851
852 /*
853 * There is no need to write the reference deletion
854 * when the reference in question doesn't exist.
855 */
856 if (u->flags & REF_HAVE_NEW && !is_null_oid(&u->new_oid)) {
857 ret = queue_transaction_update(refs, tx_data, u,
858 &current_oid, err);
859 if (ret)
860 goto done;
861 }
862
863 continue;
864 }
865 if (ret > 0) {
866 /* The reference does not exist, but we expected it to. */
867 strbuf_addf(err, _("cannot lock ref '%s': "
868 "unable to resolve reference '%s'"),
869 original_update_refname(u), u->refname);
870 ret = -1;
871 goto done;
872 }
873
874 if (u->type & REF_ISSYMREF) {
875 /*
876 * The reftable stack is locked at this point already,
877 * so it is safe to call `refs_resolve_ref_unsafe()`
878 * here without causing races.
879 */
880 const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
881 &current_oid, NULL);
882
883 if (u->flags & REF_NO_DEREF) {
884 if (u->flags & REF_HAVE_OLD && !resolved) {
885 strbuf_addf(err, _("cannot lock ref '%s': "
886 "error reading reference"), u->refname);
887 ret = -1;
888 goto done;
889 }
890 } else {
891 struct ref_update *new_update;
892 int new_flags;
893
894 new_flags = u->flags;
895 if (!strcmp(rewritten_ref, "HEAD"))
896 new_flags |= REF_UPDATE_VIA_HEAD;
897
898 /*
899 * If we are updating a symref (eg. HEAD), we should also
900 * update the branch that the symref points to.
901 *
902 * This is generic functionality, and would be better
903 * done in refs.c, but the current implementation is
904 * intertwined with the locking in files-backend.c.
905 */
906 new_update = ref_transaction_add_update(
907 transaction, referent.buf, new_flags,
908 &u->new_oid, &u->old_oid, u->msg);
909 new_update->parent_update = u;
910
911 /*
912 * Change the symbolic ref update to log only. Also, it
913 * doesn't need to check its old OID value, as that will be
914 * done when new_update is processed.
915 */
916 u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
917 u->flags &= ~REF_HAVE_OLD;
918
919 if (string_list_has_string(&affected_refnames, new_update->refname)) {
920 strbuf_addf(err,
921 _("multiple updates for '%s' (including one "
922 "via symref '%s') are not allowed"),
923 referent.buf, u->refname);
924 ret = TRANSACTION_NAME_CONFLICT;
925 goto done;
926 }
927 string_list_insert(&affected_refnames, new_update->refname);
928 }
929 }
930
931 /*
932 * Verify that the old object matches our expectations. Note
933 * that the error messages here do not make a lot of sense in
934 * the context of the reftable backend as we never lock
935 * individual refs. But the error messages match what the files
936 * backend returns, which keeps our tests happy.
937 */
938 if (u->flags & REF_HAVE_OLD && !oideq(&current_oid, &u->old_oid)) {
939 if (is_null_oid(&u->old_oid))
940 strbuf_addf(err, _("cannot lock ref '%s': "
941 "reference already exists"),
942 original_update_refname(u));
943 else if (is_null_oid(&current_oid))
944 strbuf_addf(err, _("cannot lock ref '%s': "
945 "reference is missing but expected %s"),
946 original_update_refname(u),
947 oid_to_hex(&u->old_oid));
948 else
949 strbuf_addf(err, _("cannot lock ref '%s': "
950 "is at %s but expected %s"),
951 original_update_refname(u),
952 oid_to_hex(&current_oid),
953 oid_to_hex(&u->old_oid));
954 ret = -1;
955 goto done;
956 }
957
958 /*
959 * If all of the following conditions are true:
960 *
961 * - We're not about to write a symref.
962 * - We're not about to write a log-only entry.
963 * - Old and new object ID are different.
964 *
965 * Then we're essentially doing a no-op update that can be
966 * skipped. This is not only for the sake of efficiency, but
967 * also skips writing unneeded reflog entries.
968 */
969 if ((u->type & REF_ISSYMREF) ||
970 (u->flags & REF_LOG_ONLY) ||
971 (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid))) {
972 ret = queue_transaction_update(refs, tx_data, u,
973 &current_oid, err);
974 if (ret)
975 goto done;
976 }
977 }
978
979 transaction->backend_data = tx_data;
980 transaction->state = REF_TRANSACTION_PREPARED;
981
982 done:
983 assert(ret != REFTABLE_API_ERROR);
984 if (ret < 0) {
985 free_transaction_data(tx_data);
986 transaction->state = REF_TRANSACTION_CLOSED;
987 if (!err->len)
988 strbuf_addf(err, _("reftable: transaction prepare: %s"),
989 reftable_error_str(ret));
990 }
991 string_list_clear(&affected_refnames, 0);
992 strbuf_release(&referent);
993 strbuf_release(&head_referent);
994
995 return ret;
996 }
997
998 static int reftable_be_transaction_abort(struct ref_store *ref_store,
999 struct ref_transaction *transaction,
1000 struct strbuf *err)
1001 {
1002 struct reftable_transaction_data *tx_data = transaction->backend_data;
1003 free_transaction_data(tx_data);
1004 transaction->state = REF_TRANSACTION_CLOSED;
1005 return 0;
1006 }
1007
1008 static int transaction_update_cmp(const void *a, const void *b)
1009 {
1010 return strcmp(((struct reftable_transaction_update *)a)->update->refname,
1011 ((struct reftable_transaction_update *)b)->update->refname);
1012 }
1013
1014 static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
1015 {
1016 struct write_transaction_table_arg *arg = cb_data;
1017 struct reftable_merged_table *mt =
1018 reftable_stack_merged_table(arg->stack);
1019 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1020 struct reftable_log_record *logs = NULL;
1021 size_t logs_nr = 0, logs_alloc = 0, i;
1022 int ret = 0;
1023
1024 QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
1025
1026 reftable_writer_set_limits(writer, ts, ts);
1027
1028 for (i = 0; i < arg->updates_nr; i++) {
1029 struct reftable_transaction_update *tx_update = &arg->updates[i];
1030 struct ref_update *u = tx_update->update;
1031
1032 /*
1033 * Write a reflog entry when updating a ref to point to
1034 * something new in either of the following cases:
1035 *
1036 * - The reference is about to be deleted. We always want to
1037 * delete the reflog in that case.
1038 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1039 * the reflog entry.
1040 * - `core.logAllRefUpdates` tells us to create the reflog for
1041 * the given ref.
1042 */
1043 if (u->flags & REF_HAVE_NEW && !(u->type & REF_ISSYMREF) && is_null_oid(&u->new_oid)) {
1044 struct reftable_log_record log = {0};
1045 struct reftable_iterator it = {0};
1046
1047 /*
1048 * When deleting refs we also delete all reflog entries
1049 * with them. While it is not strictly required to
1050 * delete reflogs together with their refs, this
1051 * matches the behaviour of the files backend.
1052 *
1053 * Unfortunately, we have no better way than to delete
1054 * all reflog entries one by one.
1055 */
1056 ret = reftable_merged_table_seek_log(mt, &it, u->refname);
1057 while (ret == 0) {
1058 struct reftable_log_record *tombstone;
1059
1060 ret = reftable_iterator_next_log(&it, &log);
1061 if (ret < 0)
1062 break;
1063 if (ret > 0 || strcmp(log.refname, u->refname)) {
1064 ret = 0;
1065 break;
1066 }
1067
1068 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1069 tombstone = &logs[logs_nr++];
1070 tombstone->refname = xstrdup(u->refname);
1071 tombstone->value_type = REFTABLE_LOG_DELETION;
1072 tombstone->update_index = log.update_index;
1073 }
1074
1075 reftable_log_record_release(&log);
1076 reftable_iterator_destroy(&it);
1077
1078 if (ret)
1079 goto done;
1080 } else if (u->flags & REF_HAVE_NEW &&
1081 (u->flags & REF_FORCE_CREATE_REFLOG ||
1082 should_write_log(&arg->refs->base, u->refname))) {
1083 struct reftable_log_record *log;
1084
1085 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1086 log = &logs[logs_nr++];
1087 memset(log, 0, sizeof(*log));
1088
1089 fill_reftable_log_record(log);
1090 log->update_index = ts;
1091 log->refname = xstrdup(u->refname);
1092 memcpy(log->value.update.new_hash, u->new_oid.hash, GIT_MAX_RAWSZ);
1093 memcpy(log->value.update.old_hash, tx_update->current_oid.hash, GIT_MAX_RAWSZ);
1094 log->value.update.message =
1095 xstrndup(u->msg, arg->refs->write_options.block_size / 2);
1096 }
1097
1098 if (u->flags & REF_LOG_ONLY)
1099 continue;
1100
1101 if (u->flags & REF_HAVE_NEW && is_null_oid(&u->new_oid)) {
1102 struct reftable_ref_record ref = {
1103 .refname = (char *)u->refname,
1104 .update_index = ts,
1105 .value_type = REFTABLE_REF_DELETION,
1106 };
1107
1108 ret = reftable_writer_add_ref(writer, &ref);
1109 if (ret < 0)
1110 goto done;
1111 } else if (u->flags & REF_HAVE_NEW) {
1112 struct reftable_ref_record ref = {0};
1113 struct object_id peeled;
1114 int peel_error;
1115
1116 ref.refname = (char *)u->refname;
1117 ref.update_index = ts;
1118
1119 peel_error = peel_object(&u->new_oid, &peeled);
1120 if (!peel_error) {
1121 ref.value_type = REFTABLE_REF_VAL2;
1122 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
1123 memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
1124 } else if (!is_null_oid(&u->new_oid)) {
1125 ref.value_type = REFTABLE_REF_VAL1;
1126 memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
1127 }
1128
1129 ret = reftable_writer_add_ref(writer, &ref);
1130 if (ret < 0)
1131 goto done;
1132 }
1133 }
1134
1135 /*
1136 * Logs are written at the end so that we do not have intermixed ref
1137 * and log blocks.
1138 */
1139 if (logs) {
1140 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1141 if (ret < 0)
1142 goto done;
1143 }
1144
1145 done:
1146 assert(ret != REFTABLE_API_ERROR);
1147 for (i = 0; i < logs_nr; i++)
1148 reftable_log_record_release(&logs[i]);
1149 free(logs);
1150 return ret;
1151 }
1152
1153 static int reftable_be_transaction_finish(struct ref_store *ref_store,
1154 struct ref_transaction *transaction,
1155 struct strbuf *err)
1156 {
1157 struct reftable_transaction_data *tx_data = transaction->backend_data;
1158 int ret = 0;
1159
1160 for (size_t i = 0; i < tx_data->args_nr; i++) {
1161 ret = reftable_addition_add(tx_data->args[i].addition,
1162 write_transaction_table, &tx_data->args[i]);
1163 if (ret < 0)
1164 goto done;
1165
1166 ret = reftable_addition_commit(tx_data->args[i].addition);
1167 if (ret < 0)
1168 goto done;
1169 }
1170
1171 done:
1172 assert(ret != REFTABLE_API_ERROR);
1173 free_transaction_data(tx_data);
1174 transaction->state = REF_TRANSACTION_CLOSED;
1175
1176 if (ret) {
1177 strbuf_addf(err, _("reftable: transaction failure: %s"),
1178 reftable_error_str(ret));
1179 return -1;
1180 }
1181 return ret;
1182 }
1183
1184 static int reftable_be_initial_transaction_commit(struct ref_store *ref_store UNUSED,
1185 struct ref_transaction *transaction,
1186 struct strbuf *err)
1187 {
1188 return ref_transaction_commit(transaction, err);
1189 }
1190
1191 static int reftable_be_pack_refs(struct ref_store *ref_store,
1192 struct pack_refs_opts *opts)
1193 {
1194 struct reftable_ref_store *refs =
1195 reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "pack_refs");
1196 struct reftable_stack *stack;
1197 int ret;
1198
1199 if (refs->err)
1200 return refs->err;
1201
1202 stack = refs->worktree_stack;
1203 if (!stack)
1204 stack = refs->main_stack;
1205
1206 ret = reftable_stack_compact_all(stack, NULL);
1207 if (ret)
1208 goto out;
1209 ret = reftable_stack_clean(stack);
1210 if (ret)
1211 goto out;
1212
1213 out:
1214 return ret;
1215 }
1216
1217 struct write_create_symref_arg {
1218 struct reftable_ref_store *refs;
1219 struct reftable_stack *stack;
1220 const char *refname;
1221 const char *target;
1222 const char *logmsg;
1223 };
1224
1225 static int write_create_symref_table(struct reftable_writer *writer, void *cb_data)
1226 {
1227 struct write_create_symref_arg *create = cb_data;
1228 uint64_t ts = reftable_stack_next_update_index(create->stack);
1229 struct reftable_ref_record ref = {
1230 .refname = (char *)create->refname,
1231 .value_type = REFTABLE_REF_SYMREF,
1232 .value.symref = (char *)create->target,
1233 .update_index = ts,
1234 };
1235 struct reftable_log_record log = {0};
1236 struct object_id new_oid;
1237 struct object_id old_oid;
1238 int ret;
1239
1240 reftable_writer_set_limits(writer, ts, ts);
1241
1242 ret = reftable_writer_add_ref(writer, &ref);
1243 if (ret)
1244 return ret;
1245
1246 /*
1247 * Note that it is important to try and resolve the reference before we
1248 * write the log entry. This is because `should_write_log()` will munge
1249 * `core.logAllRefUpdates`, which is undesirable when we create a new
1250 * repository because it would be written into the config. As HEAD will
1251 * not resolve for new repositories this ordering will ensure that this
1252 * never happens.
1253 */
1254 if (!create->logmsg ||
1255 !refs_resolve_ref_unsafe(&create->refs->base, create->target,
1256 RESOLVE_REF_READING, &new_oid, NULL) ||
1257 !should_write_log(&create->refs->base, create->refname))
1258 return 0;
1259
1260 fill_reftable_log_record(&log);
1261 log.refname = xstrdup(create->refname);
1262 log.update_index = ts;
1263 log.value.update.message = xstrndup(create->logmsg,
1264 create->refs->write_options.block_size / 2);
1265 memcpy(log.value.update.new_hash, new_oid.hash, GIT_MAX_RAWSZ);
1266 if (refs_resolve_ref_unsafe(&create->refs->base, create->refname,
1267 RESOLVE_REF_READING, &old_oid, NULL))
1268 memcpy(log.value.update.old_hash, old_oid.hash, GIT_MAX_RAWSZ);
1269
1270 ret = reftable_writer_add_log(writer, &log);
1271 reftable_log_record_release(&log);
1272 return ret;
1273 }
1274
1275 static int reftable_be_create_symref(struct ref_store *ref_store,
1276 const char *refname,
1277 const char *target,
1278 const char *logmsg)
1279 {
1280 struct reftable_ref_store *refs =
1281 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_symref");
1282 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1283 struct write_create_symref_arg arg = {
1284 .refs = refs,
1285 .stack = stack,
1286 .refname = refname,
1287 .target = target,
1288 .logmsg = logmsg,
1289 };
1290 int ret;
1291
1292 ret = refs->err;
1293 if (ret < 0)
1294 goto done;
1295
1296 ret = reftable_stack_reload(stack);
1297 if (ret)
1298 goto done;
1299
1300 ret = reftable_stack_add(stack, &write_create_symref_table, &arg);
1301
1302 done:
1303 assert(ret != REFTABLE_API_ERROR);
1304 if (ret)
1305 error("unable to write symref for %s: %s", refname,
1306 reftable_error_str(ret));
1307 return ret;
1308 }
1309
1310 struct write_copy_arg {
1311 struct reftable_ref_store *refs;
1312 struct reftable_stack *stack;
1313 const char *oldname;
1314 const char *newname;
1315 const char *logmsg;
1316 int delete_old;
1317 };
1318
1319 static int write_copy_table(struct reftable_writer *writer, void *cb_data)
1320 {
1321 struct write_copy_arg *arg = cb_data;
1322 uint64_t deletion_ts, creation_ts;
1323 struct reftable_merged_table *mt = reftable_stack_merged_table(arg->stack);
1324 struct reftable_ref_record old_ref = {0}, refs[2] = {0};
1325 struct reftable_log_record old_log = {0}, *logs = NULL;
1326 struct reftable_iterator it = {0};
1327 struct string_list skip = STRING_LIST_INIT_NODUP;
1328 struct strbuf errbuf = STRBUF_INIT;
1329 size_t logs_nr = 0, logs_alloc = 0, i;
1330 int ret;
1331
1332 if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
1333 ret = error(_("refname %s not found"), arg->oldname);
1334 goto done;
1335 }
1336 if (old_ref.value_type == REFTABLE_REF_SYMREF) {
1337 ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
1338 arg->oldname);
1339 goto done;
1340 }
1341
1342 /*
1343 * There's nothing to do in case the old and new name are the same, so
1344 * we exit early in that case.
1345 */
1346 if (!strcmp(arg->oldname, arg->newname)) {
1347 ret = 0;
1348 goto done;
1349 }
1350
1351 /*
1352 * Verify that the new refname is available.
1353 */
1354 string_list_insert(&skip, arg->oldname);
1355 ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
1356 NULL, &skip, &errbuf);
1357 if (ret < 0) {
1358 error("%s", errbuf.buf);
1359 goto done;
1360 }
1361
1362 /*
1363 * When deleting the old reference we have to use two update indices:
1364 * once to delete the old ref and its reflog, and once to create the
1365 * new ref and its reflog. They need to be staged with two separate
1366 * indices because the new reflog needs to encode both the deletion of
1367 * the old branch and the creation of the new branch, and we cannot do
1368 * two changes to a reflog in a single update.
1369 */
1370 deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
1371 if (arg->delete_old)
1372 creation_ts++;
1373 reftable_writer_set_limits(writer, deletion_ts, creation_ts);
1374
1375 /*
1376 * Add the new reference. If this is a rename then we also delete the
1377 * old reference.
1378 */
1379 refs[0] = old_ref;
1380 refs[0].refname = (char *)arg->newname;
1381 refs[0].update_index = creation_ts;
1382 if (arg->delete_old) {
1383 refs[1].refname = (char *)arg->oldname;
1384 refs[1].value_type = REFTABLE_REF_DELETION;
1385 refs[1].update_index = deletion_ts;
1386 }
1387 ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
1388 if (ret < 0)
1389 goto done;
1390
1391 /*
1392 * When deleting the old branch we need to create a reflog entry on the
1393 * new branch name that indicates that the old branch has been deleted
1394 * and then recreated. This is a tad weird, but matches what the files
1395 * backend does.
1396 */
1397 if (arg->delete_old) {
1398 struct strbuf head_referent = STRBUF_INIT;
1399 struct object_id head_oid;
1400 int append_head_reflog;
1401 unsigned head_type = 0;
1402
1403 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1404 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1405 fill_reftable_log_record(&logs[logs_nr]);
1406 logs[logs_nr].refname = (char *)arg->newname;
1407 logs[logs_nr].update_index = deletion_ts;
1408 logs[logs_nr].value.update.message =
1409 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1410 memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1411 logs_nr++;
1412
1413 ret = read_ref_without_reload(arg->stack, "HEAD", &head_oid, &head_referent, &head_type);
1414 if (ret < 0)
1415 goto done;
1416 append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
1417 strbuf_release(&head_referent);
1418
1419 /*
1420 * The files backend uses `refs_delete_ref()` to delete the old
1421 * branch name, which will append a reflog entry for HEAD in
1422 * case it points to the old branch.
1423 */
1424 if (append_head_reflog) {
1425 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1426 logs[logs_nr] = logs[logs_nr - 1];
1427 logs[logs_nr].refname = "HEAD";
1428 logs_nr++;
1429 }
1430 }
1431
1432 /*
1433 * Create the reflog entry for the newly created branch.
1434 */
1435 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1436 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1437 fill_reftable_log_record(&logs[logs_nr]);
1438 logs[logs_nr].refname = (char *)arg->newname;
1439 logs[logs_nr].update_index = creation_ts;
1440 logs[logs_nr].value.update.message =
1441 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1442 memcpy(logs[logs_nr].value.update.new_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
1443 logs_nr++;
1444
1445 /*
1446 * In addition to writing the reflog entry for the new branch, we also
1447 * copy over all log entries from the old reflog. Last but not least,
1448 * when renaming we also have to delete all the old reflog entries.
1449 */
1450 ret = reftable_merged_table_seek_log(mt, &it, arg->oldname);
1451 if (ret < 0)
1452 goto done;
1453
1454 while (1) {
1455 ret = reftable_iterator_next_log(&it, &old_log);
1456 if (ret < 0)
1457 goto done;
1458 if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
1459 ret = 0;
1460 break;
1461 }
1462
1463 free(old_log.refname);
1464
1465 /*
1466 * Copy over the old reflog entry with the new refname.
1467 */
1468 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1469 logs[logs_nr] = old_log;
1470 logs[logs_nr].refname = (char *)arg->newname;
1471 logs_nr++;
1472
1473 /*
1474 * Delete the old reflog entry in case we are renaming.
1475 */
1476 if (arg->delete_old) {
1477 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1478 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1479 logs[logs_nr].refname = (char *)arg->oldname;
1480 logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
1481 logs[logs_nr].update_index = old_log.update_index;
1482 logs_nr++;
1483 }
1484
1485 /*
1486 * Transfer ownership of the log record we're iterating over to
1487 * the array of log records. Otherwise, the pointers would get
1488 * free'd or reallocated by the iterator.
1489 */
1490 memset(&old_log, 0, sizeof(old_log));
1491 }
1492
1493 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1494 if (ret < 0)
1495 goto done;
1496
1497 done:
1498 assert(ret != REFTABLE_API_ERROR);
1499 reftable_iterator_destroy(&it);
1500 string_list_clear(&skip, 0);
1501 strbuf_release(&errbuf);
1502 for (i = 0; i < logs_nr; i++) {
1503 if (!strcmp(logs[i].refname, "HEAD"))
1504 continue;
1505 logs[i].refname = NULL;
1506 reftable_log_record_release(&logs[i]);
1507 }
1508 free(logs);
1509 reftable_ref_record_release(&old_ref);
1510 reftable_log_record_release(&old_log);
1511 return ret;
1512 }
1513
1514 static int reftable_be_rename_ref(struct ref_store *ref_store,
1515 const char *oldrefname,
1516 const char *newrefname,
1517 const char *logmsg)
1518 {
1519 struct reftable_ref_store *refs =
1520 reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1521 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1522 struct write_copy_arg arg = {
1523 .refs = refs,
1524 .stack = stack,
1525 .oldname = oldrefname,
1526 .newname = newrefname,
1527 .logmsg = logmsg,
1528 .delete_old = 1,
1529 };
1530 int ret;
1531
1532 ret = refs->err;
1533 if (ret < 0)
1534 goto done;
1535
1536 ret = reftable_stack_reload(stack);
1537 if (ret)
1538 goto done;
1539 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1540
1541 done:
1542 assert(ret != REFTABLE_API_ERROR);
1543 return ret;
1544 }
1545
1546 static int reftable_be_copy_ref(struct ref_store *ref_store,
1547 const char *oldrefname,
1548 const char *newrefname,
1549 const char *logmsg)
1550 {
1551 struct reftable_ref_store *refs =
1552 reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
1553 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1554 struct write_copy_arg arg = {
1555 .refs = refs,
1556 .stack = stack,
1557 .oldname = oldrefname,
1558 .newname = newrefname,
1559 .logmsg = logmsg,
1560 };
1561 int ret;
1562
1563 ret = refs->err;
1564 if (ret < 0)
1565 goto done;
1566
1567 ret = reftable_stack_reload(stack);
1568 if (ret)
1569 goto done;
1570 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1571
1572 done:
1573 assert(ret != REFTABLE_API_ERROR);
1574 return ret;
1575 }
1576
1577 struct reftable_reflog_iterator {
1578 struct ref_iterator base;
1579 struct reftable_ref_store *refs;
1580 struct reftable_iterator iter;
1581 struct reftable_log_record log;
1582 struct strbuf last_name;
1583 int err;
1584 };
1585
1586 static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
1587 {
1588 struct reftable_reflog_iterator *iter =
1589 (struct reftable_reflog_iterator *)ref_iterator;
1590
1591 while (!iter->err) {
1592 iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
1593 if (iter->err)
1594 break;
1595
1596 /*
1597 * We want the refnames that we have reflogs for, so we skip if
1598 * we've already produced this name. This could be faster by
1599 * seeking directly to reflog@update_index==0.
1600 */
1601 if (!strcmp(iter->log.refname, iter->last_name.buf))
1602 continue;
1603
1604 if (check_refname_format(iter->log.refname,
1605 REFNAME_ALLOW_ONELEVEL))
1606 continue;
1607
1608 strbuf_reset(&iter->last_name);
1609 strbuf_addstr(&iter->last_name, iter->log.refname);
1610 iter->base.refname = iter->log.refname;
1611
1612 break;
1613 }
1614
1615 if (iter->err > 0) {
1616 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
1617 return ITER_ERROR;
1618 return ITER_DONE;
1619 }
1620
1621 if (iter->err < 0) {
1622 ref_iterator_abort(ref_iterator);
1623 return ITER_ERROR;
1624 }
1625
1626 return ITER_OK;
1627 }
1628
1629 static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator,
1630 struct object_id *peeled)
1631 {
1632 BUG("reftable reflog iterator cannot be peeled");
1633 return -1;
1634 }
1635
1636 static int reftable_reflog_iterator_abort(struct ref_iterator *ref_iterator)
1637 {
1638 struct reftable_reflog_iterator *iter =
1639 (struct reftable_reflog_iterator *)ref_iterator;
1640 reftable_log_record_release(&iter->log);
1641 reftable_iterator_destroy(&iter->iter);
1642 strbuf_release(&iter->last_name);
1643 free(iter);
1644 return ITER_DONE;
1645 }
1646
1647 static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
1648 .advance = reftable_reflog_iterator_advance,
1649 .peel = reftable_reflog_iterator_peel,
1650 .abort = reftable_reflog_iterator_abort
1651 };
1652
1653 static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
1654 struct reftable_stack *stack)
1655 {
1656 struct reftable_merged_table *merged_table;
1657 struct reftable_reflog_iterator *iter;
1658 int ret;
1659
1660 iter = xcalloc(1, sizeof(*iter));
1661 base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable);
1662 strbuf_init(&iter->last_name, 0);
1663 iter->refs = refs;
1664
1665 ret = refs->err;
1666 if (ret)
1667 goto done;
1668
1669 ret = reftable_stack_reload(stack);
1670 if (ret < 0)
1671 goto done;
1672
1673 merged_table = reftable_stack_merged_table(stack);
1674
1675 ret = reftable_merged_table_seek_log(merged_table, &iter->iter, "");
1676 if (ret < 0)
1677 goto done;
1678
1679 done:
1680 iter->err = ret;
1681 return iter;
1682 }
1683
1684 static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
1685 {
1686 struct reftable_ref_store *refs =
1687 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
1688 struct reftable_reflog_iterator *main_iter, *worktree_iter;
1689
1690 main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
1691 if (!refs->worktree_stack)
1692 return &main_iter->base;
1693
1694 worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
1695
1696 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
1697 ref_iterator_select, NULL);
1698 }
1699
1700 static int yield_log_record(struct reftable_log_record *log,
1701 each_reflog_ent_fn fn,
1702 void *cb_data)
1703 {
1704 struct object_id old_oid, new_oid;
1705 const char *full_committer;
1706
1707 oidread(&old_oid, log->value.update.old_hash);
1708 oidread(&new_oid, log->value.update.new_hash);
1709
1710 /*
1711 * When both the old object ID and the new object ID are null
1712 * then this is the reflog existence marker. The caller must
1713 * not be aware of it.
1714 */
1715 if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
1716 return 0;
1717
1718 full_committer = fmt_ident(log->value.update.name, log->value.update.email,
1719 WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
1720 return fn(&old_oid, &new_oid, full_committer,
1721 log->value.update.time, log->value.update.tz_offset,
1722 log->value.update.message, cb_data);
1723 }
1724
1725 static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1726 const char *refname,
1727 each_reflog_ent_fn fn,
1728 void *cb_data)
1729 {
1730 struct reftable_ref_store *refs =
1731 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
1732 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1733 struct reftable_merged_table *mt = NULL;
1734 struct reftable_log_record log = {0};
1735 struct reftable_iterator it = {0};
1736 int ret;
1737
1738 if (refs->err < 0)
1739 return refs->err;
1740
1741 mt = reftable_stack_merged_table(stack);
1742 ret = reftable_merged_table_seek_log(mt, &it, refname);
1743 while (!ret) {
1744 ret = reftable_iterator_next_log(&it, &log);
1745 if (ret < 0)
1746 break;
1747 if (ret > 0 || strcmp(log.refname, refname)) {
1748 ret = 0;
1749 break;
1750 }
1751
1752 ret = yield_log_record(&log, fn, cb_data);
1753 if (ret)
1754 break;
1755 }
1756
1757 reftable_log_record_release(&log);
1758 reftable_iterator_destroy(&it);
1759 return ret;
1760 }
1761
1762 static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
1763 const char *refname,
1764 each_reflog_ent_fn fn,
1765 void *cb_data)
1766 {
1767 struct reftable_ref_store *refs =
1768 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
1769 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1770 struct reftable_merged_table *mt = NULL;
1771 struct reftable_log_record *logs = NULL;
1772 struct reftable_iterator it = {0};
1773 size_t logs_alloc = 0, logs_nr = 0, i;
1774 int ret;
1775
1776 if (refs->err < 0)
1777 return refs->err;
1778
1779 mt = reftable_stack_merged_table(stack);
1780 ret = reftable_merged_table_seek_log(mt, &it, refname);
1781 while (!ret) {
1782 struct reftable_log_record log = {0};
1783
1784 ret = reftable_iterator_next_log(&it, &log);
1785 if (ret < 0)
1786 goto done;
1787 if (ret > 0 || strcmp(log.refname, refname)) {
1788 reftable_log_record_release(&log);
1789 ret = 0;
1790 break;
1791 }
1792
1793 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1794 logs[logs_nr++] = log;
1795 }
1796
1797 for (i = logs_nr; i--;) {
1798 ret = yield_log_record(&logs[i], fn, cb_data);
1799 if (ret)
1800 goto done;
1801 }
1802
1803 done:
1804 reftable_iterator_destroy(&it);
1805 for (i = 0; i < logs_nr; i++)
1806 reftable_log_record_release(&logs[i]);
1807 free(logs);
1808 return ret;
1809 }
1810
1811 static int reftable_be_reflog_exists(struct ref_store *ref_store,
1812 const char *refname)
1813 {
1814 struct reftable_ref_store *refs =
1815 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1816 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1817 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
1818 struct reftable_log_record log = {0};
1819 struct reftable_iterator it = {0};
1820 int ret;
1821
1822 ret = refs->err;
1823 if (ret < 0)
1824 goto done;
1825
1826 ret = reftable_stack_reload(stack);
1827 if (ret < 0)
1828 goto done;
1829
1830 ret = reftable_merged_table_seek_log(mt, &it, refname);
1831 if (ret < 0)
1832 goto done;
1833
1834 /*
1835 * Check whether we get at least one log record for the given ref name.
1836 * If so, the reflog exists, otherwise it doesn't.
1837 */
1838 ret = reftable_iterator_next_log(&it, &log);
1839 if (ret < 0)
1840 goto done;
1841 if (ret > 0) {
1842 ret = 0;
1843 goto done;
1844 }
1845
1846 ret = strcmp(log.refname, refname) == 0;
1847
1848 done:
1849 reftable_iterator_destroy(&it);
1850 reftable_log_record_release(&log);
1851 if (ret < 0)
1852 ret = 0;
1853 return ret;
1854 }
1855
1856 struct write_reflog_existence_arg {
1857 struct reftable_ref_store *refs;
1858 const char *refname;
1859 struct reftable_stack *stack;
1860 };
1861
1862 static int write_reflog_existence_table(struct reftable_writer *writer,
1863 void *cb_data)
1864 {
1865 struct write_reflog_existence_arg *arg = cb_data;
1866 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1867 struct reftable_log_record log = {0};
1868 int ret;
1869
1870 ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
1871 if (ret <= 0)
1872 goto done;
1873
1874 reftable_writer_set_limits(writer, ts, ts);
1875
1876 /*
1877 * The existence entry has both old and new object ID set to the the
1878 * null object ID. Our iterators are aware of this and will not present
1879 * them to their callers.
1880 */
1881 log.refname = xstrdup(arg->refname);
1882 log.update_index = ts;
1883 log.value_type = REFTABLE_LOG_UPDATE;
1884 ret = reftable_writer_add_log(writer, &log);
1885
1886 done:
1887 assert(ret != REFTABLE_API_ERROR);
1888 reftable_log_record_release(&log);
1889 return ret;
1890 }
1891
1892 static int reftable_be_create_reflog(struct ref_store *ref_store,
1893 const char *refname,
1894 struct strbuf *errmsg)
1895 {
1896 struct reftable_ref_store *refs =
1897 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1898 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1899 struct write_reflog_existence_arg arg = {
1900 .refs = refs,
1901 .stack = stack,
1902 .refname = refname,
1903 };
1904 int ret;
1905
1906 ret = refs->err;
1907 if (ret < 0)
1908 goto done;
1909
1910 ret = reftable_stack_reload(stack);
1911 if (ret)
1912 goto done;
1913
1914 ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
1915
1916 done:
1917 return ret;
1918 }
1919
1920 struct write_reflog_delete_arg {
1921 struct reftable_stack *stack;
1922 const char *refname;
1923 };
1924
1925 static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
1926 {
1927 struct write_reflog_delete_arg *arg = cb_data;
1928 struct reftable_merged_table *mt =
1929 reftable_stack_merged_table(arg->stack);
1930 struct reftable_log_record log = {0}, tombstone = {0};
1931 struct reftable_iterator it = {0};
1932 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1933 int ret;
1934
1935 reftable_writer_set_limits(writer, ts, ts);
1936
1937 /*
1938 * In order to delete a table we need to delete all reflog entries one
1939 * by one. This is inefficient, but the reftable format does not have a
1940 * better marker right now.
1941 */
1942 ret = reftable_merged_table_seek_log(mt, &it, arg->refname);
1943 while (ret == 0) {
1944 ret = reftable_iterator_next_log(&it, &log);
1945 if (ret < 0)
1946 break;
1947 if (ret > 0 || strcmp(log.refname, arg->refname)) {
1948 ret = 0;
1949 break;
1950 }
1951
1952 tombstone.refname = (char *)arg->refname;
1953 tombstone.value_type = REFTABLE_LOG_DELETION;
1954 tombstone.update_index = log.update_index;
1955
1956 ret = reftable_writer_add_log(writer, &tombstone);
1957 }
1958
1959 reftable_log_record_release(&log);
1960 reftable_iterator_destroy(&it);
1961 return ret;
1962 }
1963
1964 static int reftable_be_delete_reflog(struct ref_store *ref_store,
1965 const char *refname)
1966 {
1967 struct reftable_ref_store *refs =
1968 reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
1969 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1970 struct write_reflog_delete_arg arg = {
1971 .stack = stack,
1972 .refname = refname,
1973 };
1974 int ret;
1975
1976 ret = reftable_stack_reload(stack);
1977 if (ret)
1978 return ret;
1979 ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
1980
1981 assert(ret != REFTABLE_API_ERROR);
1982 return ret;
1983 }
1984
1985 struct reflog_expiry_arg {
1986 struct reftable_stack *stack;
1987 struct reftable_log_record *records;
1988 struct object_id update_oid;
1989 const char *refname;
1990 size_t len;
1991 };
1992
1993 static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
1994 {
1995 struct reflog_expiry_arg *arg = cb_data;
1996 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1997 uint64_t live_records = 0;
1998 size_t i;
1999 int ret;
2000
2001 for (i = 0; i < arg->len; i++)
2002 if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
2003 live_records++;
2004
2005 reftable_writer_set_limits(writer, ts, ts);
2006
2007 if (!is_null_oid(&arg->update_oid)) {
2008 struct reftable_ref_record ref = {0};
2009 struct object_id peeled;
2010
2011 ref.refname = (char *)arg->refname;
2012 ref.update_index = ts;
2013
2014 if (!peel_object(&arg->update_oid, &peeled)) {
2015 ref.value_type = REFTABLE_REF_VAL2;
2016 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
2017 memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
2018 } else {
2019 ref.value_type = REFTABLE_REF_VAL1;
2020 memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
2021 }
2022
2023 ret = reftable_writer_add_ref(writer, &ref);
2024 if (ret < 0)
2025 return ret;
2026 }
2027
2028 /*
2029 * When there are no more entries left in the reflog we empty it
2030 * completely, but write a placeholder reflog entry that indicates that
2031 * the reflog still exists.
2032 */
2033 if (!live_records) {
2034 struct reftable_log_record log = {
2035 .refname = (char *)arg->refname,
2036 .value_type = REFTABLE_LOG_UPDATE,
2037 .update_index = ts,
2038 };
2039
2040 ret = reftable_writer_add_log(writer, &log);
2041 if (ret)
2042 return ret;
2043 }
2044
2045 for (i = 0; i < arg->len; i++) {
2046 ret = reftable_writer_add_log(writer, &arg->records[i]);
2047 if (ret)
2048 return ret;
2049 }
2050
2051 return 0;
2052 }
2053
2054 static int reftable_be_reflog_expire(struct ref_store *ref_store,
2055 const char *refname,
2056 unsigned int flags,
2057 reflog_expiry_prepare_fn prepare_fn,
2058 reflog_expiry_should_prune_fn should_prune_fn,
2059 reflog_expiry_cleanup_fn cleanup_fn,
2060 void *policy_cb_data)
2061 {
2062 /*
2063 * For log expiry, we write tombstones for every single reflog entry
2064 * that is to be expired. This means that the entries are still
2065 * retrievable by delving into the stack, and expiring entries
2066 * paradoxically takes extra memory. This memory is only reclaimed when
2067 * compacting the reftable stack.
2068 *
2069 * It would be better if the refs backend supported an API that sets a
2070 * criterion for all refs, passing the criterion to pack_refs().
2071 *
2072 * On the plus side, because we do the expiration per ref, we can easily
2073 * insert the reflog existence dummies.
2074 */
2075 struct reftable_ref_store *refs =
2076 reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2077 struct reftable_stack *stack = stack_for(refs, refname, &refname);
2078 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
2079 struct reftable_log_record *logs = NULL;
2080 struct reftable_log_record *rewritten = NULL;
2081 struct reftable_ref_record ref_record = {0};
2082 struct reftable_iterator it = {0};
2083 struct reftable_addition *add = NULL;
2084 struct reflog_expiry_arg arg = {0};
2085 struct object_id oid = {0};
2086 uint8_t *last_hash = NULL;
2087 size_t logs_nr = 0, logs_alloc = 0, i;
2088 int ret;
2089
2090 if (refs->err < 0)
2091 return refs->err;
2092
2093 ret = reftable_stack_reload(stack);
2094 if (ret < 0)
2095 goto done;
2096
2097 ret = reftable_merged_table_seek_log(mt, &it, refname);
2098 if (ret < 0)
2099 goto done;
2100
2101 ret = reftable_stack_new_addition(&add, stack);
2102 if (ret < 0)
2103 goto done;
2104
2105 ret = reftable_stack_read_ref(stack, refname, &ref_record);
2106 if (ret < 0)
2107 goto done;
2108 if (reftable_ref_record_val1(&ref_record))
2109 oidread(&oid, reftable_ref_record_val1(&ref_record));
2110 prepare_fn(refname, &oid, policy_cb_data);
2111
2112 while (1) {
2113 struct reftable_log_record log = {0};
2114 struct object_id old_oid, new_oid;
2115
2116 ret = reftable_iterator_next_log(&it, &log);
2117 if (ret < 0)
2118 goto done;
2119 if (ret > 0 || strcmp(log.refname, refname)) {
2120 reftable_log_record_release(&log);
2121 break;
2122 }
2123
2124 oidread(&old_oid, log.value.update.old_hash);
2125 oidread(&new_oid, log.value.update.new_hash);
2126
2127 /*
2128 * Skip over the reflog existence marker. We will add it back
2129 * in when there are no live reflog records.
2130 */
2131 if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
2132 reftable_log_record_release(&log);
2133 continue;
2134 }
2135
2136 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2137 logs[logs_nr++] = log;
2138 }
2139
2140 /*
2141 * We need to rewrite all reflog entries according to the pruning
2142 * callback function:
2143 *
2144 * - If a reflog entry shall be pruned we mark the record for
2145 * deletion.
2146 *
2147 * - Otherwise we may have to rewrite the chain of reflog entries so
2148 * that gaps created by just-deleted records get backfilled.
2149 */
2150 CALLOC_ARRAY(rewritten, logs_nr);
2151 for (i = logs_nr; i--;) {
2152 struct reftable_log_record *dest = &rewritten[i];
2153 struct object_id old_oid, new_oid;
2154
2155 *dest = logs[i];
2156 oidread(&old_oid, logs[i].value.update.old_hash);
2157 oidread(&new_oid, logs[i].value.update.new_hash);
2158
2159 if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
2160 (timestamp_t)logs[i].value.update.time,
2161 logs[i].value.update.tz_offset,
2162 logs[i].value.update.message,
2163 policy_cb_data)) {
2164 dest->value_type = REFTABLE_LOG_DELETION;
2165 } else {
2166 if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
2167 memcpy(dest->value.update.old_hash, last_hash, GIT_MAX_RAWSZ);
2168 last_hash = logs[i].value.update.new_hash;
2169 }
2170 }
2171
2172 if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
2173 reftable_ref_record_val1(&ref_record))
2174 oidread(&arg.update_oid, last_hash);
2175
2176 arg.records = rewritten;
2177 arg.len = logs_nr;
2178 arg.stack = stack,
2179 arg.refname = refname,
2180
2181 ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
2182 if (ret < 0)
2183 goto done;
2184
2185 /*
2186 * Future improvement: we could skip writing records that were
2187 * not changed.
2188 */
2189 if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
2190 ret = reftable_addition_commit(add);
2191
2192 done:
2193 if (add)
2194 cleanup_fn(policy_cb_data);
2195 assert(ret != REFTABLE_API_ERROR);
2196
2197 reftable_ref_record_release(&ref_record);
2198 reftable_iterator_destroy(&it);
2199 reftable_addition_destroy(add);
2200 for (i = 0; i < logs_nr; i++)
2201 reftable_log_record_release(&logs[i]);
2202 free(logs);
2203 free(rewritten);
2204 return ret;
2205 }
2206
2207 struct ref_storage_be refs_be_reftable = {
2208 .name = "reftable",
2209 .init = reftable_be_init,
2210 .init_db = reftable_be_init_db,
2211 .transaction_prepare = reftable_be_transaction_prepare,
2212 .transaction_finish = reftable_be_transaction_finish,
2213 .transaction_abort = reftable_be_transaction_abort,
2214 .initial_transaction_commit = reftable_be_initial_transaction_commit,
2215
2216 .pack_refs = reftable_be_pack_refs,
2217 .create_symref = reftable_be_create_symref,
2218 .rename_ref = reftable_be_rename_ref,
2219 .copy_ref = reftable_be_copy_ref,
2220
2221 .iterator_begin = reftable_be_iterator_begin,
2222 .read_raw_ref = reftable_be_read_raw_ref,
2223 .read_symbolic_ref = reftable_be_read_symbolic_ref,
2224
2225 .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
2226 .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
2227 .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
2228 .reflog_exists = reftable_be_reflog_exists,
2229 .create_reflog = reftable_be_create_reflog,
2230 .delete_reflog = reftable_be_delete_reflog,
2231 .reflog_expire = reftable_be_reflog_expire,
2232 };