]> git.ipfire.org Git - thirdparty/git.git/blame - refs/reftable-backend.c
refs/reftable: fix D/F conflict error message on ref copy
[thirdparty/git.git] / refs / reftable-backend.c
CommitLineData
57db2a09
PS
1#include "../git-compat-util.h"
2#include "../abspath.h"
3#include "../chdir-notify.h"
4#include "../environment.h"
5#include "../gettext.h"
6#include "../hash.h"
7#include "../hex.h"
8#include "../iterator.h"
9#include "../ident.h"
10#include "../lockfile.h"
11#include "../object.h"
12#include "../path.h"
13#include "../refs.h"
14#include "../reftable/reftable-stack.h"
15#include "../reftable/reftable-record.h"
16#include "../reftable/reftable-error.h"
17#include "../reftable/reftable-iterator.h"
18#include "../reftable/reftable-merged.h"
19#include "../setup.h"
20#include "../strmap.h"
21#include "refs-internal.h"
22
23/*
24 * Used as a flag in ref_update::flags when the ref_update was via an
25 * update to HEAD.
26 */
27#define REF_UPDATE_VIA_HEAD (1 << 8)
28
29struct reftable_ref_store {
30 struct ref_store base;
31
32 /*
33 * The main stack refers to the common dir and thus contains common
34 * refs as well as refs of the main repository.
35 */
36 struct reftable_stack *main_stack;
37 /*
38 * The worktree stack refers to the gitdir in case the refdb is opened
39 * via a worktree. It thus contains the per-worktree refs.
40 */
41 struct reftable_stack *worktree_stack;
42 /*
43 * Map of worktree stacks by their respective worktree names. The map
44 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
45 */
46 struct strmap worktree_stacks;
47 struct reftable_write_options write_options;
48
49 unsigned int store_flags;
50 int err;
51};
52
53/*
54 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
55 * reftable_ref_store. required_flags is compared with ref_store's store_flags
56 * to ensure the ref_store has all required capabilities. "caller" is used in
57 * any necessary error messages.
58 */
59static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
60 unsigned int required_flags,
61 const char *caller)
62{
63 struct reftable_ref_store *refs;
64
65 if (ref_store->be != &refs_be_reftable)
66 BUG("ref_store is type \"%s\" not \"reftables\" in %s",
67 ref_store->be->name, caller);
68
69 refs = (struct reftable_ref_store *)ref_store;
70
71 if ((refs->store_flags & required_flags) != required_flags)
72 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
73 caller, required_flags, refs->store_flags);
74
75 return refs;
76}
77
78/*
79 * Some refs are global to the repository (refs/heads/{*}), while others are
80 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
81 * multiple separate databases (ie. multiple reftable/ directories), one for
82 * the shared refs, one for the current worktree refs, and one for each
83 * additional worktree. For reading, we merge the view of both the shared and
84 * the current worktree's refs, when necessary.
85 *
86 * This function also optionally assigns the rewritten reference name that is
87 * local to the stack. This translation is required when using worktree refs
88 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
89 * those references in their normalized form.
90 */
91static struct reftable_stack *stack_for(struct reftable_ref_store *store,
92 const char *refname,
93 const char **rewritten_ref)
94{
95 const char *wtname;
96 int wtname_len;
97
98 if (!refname)
99 return store->main_stack;
100
101 switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
102 case REF_WORKTREE_OTHER: {
103 static struct strbuf wtname_buf = STRBUF_INIT;
104 struct strbuf wt_dir = STRBUF_INIT;
105 struct reftable_stack *stack;
106
107 /*
108 * We're using a static buffer here so that we don't need to
109 * allocate the worktree name whenever we look up a reference.
110 * This could be avoided if the strmap interface knew how to
111 * handle keys with a length.
112 */
113 strbuf_reset(&wtname_buf);
114 strbuf_add(&wtname_buf, wtname, wtname_len);
115
116 /*
117 * There is an edge case here: when the worktree references the
118 * current worktree, then we set up the stack once via
119 * `worktree_stacks` and once via `worktree_stack`. This is
120 * wasteful, but in the reading case it shouldn't matter. And
121 * in the writing case we would notice that the stack is locked
122 * already and error out when trying to write a reference via
123 * both stacks.
124 */
125 stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
126 if (!stack) {
127 strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
128 store->base.repo->commondir, wtname_buf.buf);
129
130 store->err = reftable_new_stack(&stack, wt_dir.buf,
131 store->write_options);
132 assert(store->err != REFTABLE_API_ERROR);
133 strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
134 }
135
136 strbuf_release(&wt_dir);
137 return stack;
138 }
139 case REF_WORKTREE_CURRENT:
140 /*
141 * If there is no worktree stack then we're currently in the
142 * main worktree. We thus return the main stack in that case.
143 */
144 if (!store->worktree_stack)
145 return store->main_stack;
146 return store->worktree_stack;
147 case REF_WORKTREE_MAIN:
148 case REF_WORKTREE_SHARED:
149 return store->main_stack;
150 default:
151 BUG("unhandled worktree reference type");
152 }
153}
154
155static int should_write_log(struct ref_store *refs, const char *refname)
156{
157 if (log_all_ref_updates == LOG_REFS_UNSET)
158 log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
159
160 switch (log_all_ref_updates) {
161 case LOG_REFS_NONE:
162 return refs_reflog_exists(refs, refname);
163 case LOG_REFS_ALWAYS:
164 return 1;
165 case LOG_REFS_NORMAL:
166 if (should_autocreate_reflog(refname))
167 return 1;
168 return refs_reflog_exists(refs, refname);
169 default:
170 BUG("unhandled core.logAllRefUpdates value %d", log_all_ref_updates);
171 }
172}
173
57db2a09
PS
174static void fill_reftable_log_record(struct reftable_log_record *log)
175{
176 const char *info = git_committer_info(0);
177 struct ident_split split = {0};
178 int sign = 1;
179
180 if (split_ident_line(&split, info, strlen(info)))
181 BUG("failed splitting committer info");
182
183 reftable_log_record_release(log);
184 log->value_type = REFTABLE_LOG_UPDATE;
185 log->value.update.name =
186 xstrndup(split.name_begin, split.name_end - split.name_begin);
187 log->value.update.email =
188 xstrndup(split.mail_begin, split.mail_end - split.mail_begin);
189 log->value.update.time = atol(split.date_begin);
190 if (*split.tz_begin == '-') {
191 sign = -1;
192 split.tz_begin++;
193 }
194 if (*split.tz_begin == '+') {
195 sign = 1;
196 split.tz_begin++;
197 }
198
199 log->value.update.tz_offset = sign * atoi(split.tz_begin);
200}
201
202static int read_ref_without_reload(struct reftable_stack *stack,
203 const char *refname,
204 struct object_id *oid,
205 struct strbuf *referent,
206 unsigned int *type)
207{
208 struct reftable_ref_record ref = {0};
209 int ret;
210
211 ret = reftable_stack_read_ref(stack, refname, &ref);
212 if (ret)
213 goto done;
214
215 if (ref.value_type == REFTABLE_REF_SYMREF) {
216 strbuf_reset(referent);
217 strbuf_addstr(referent, ref.value.symref);
218 *type |= REF_ISSYMREF;
219 } else if (reftable_ref_record_val1(&ref)) {
220 oidread(oid, reftable_ref_record_val1(&ref));
221 } else {
222 /* We got a tombstone, which should not happen. */
223 BUG("unhandled reference value type %d", ref.value_type);
224 }
225
226done:
227 assert(ret != REFTABLE_API_ERROR);
228 reftable_ref_record_release(&ref);
229 return ret;
230}
231
232static struct ref_store *reftable_be_init(struct repository *repo,
233 const char *gitdir,
234 unsigned int store_flags)
235{
236 struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
237 struct strbuf path = STRBUF_INIT;
238 int is_worktree;
239 mode_t mask;
240
241 mask = umask(0);
242 umask(mask);
243
244 base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
245 strmap_init(&refs->worktree_stacks);
246 refs->store_flags = store_flags;
247 refs->write_options.block_size = 4096;
248 refs->write_options.hash_id = repo->hash_algo->format_id;
249 refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
250
251 /*
252 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
253 * This stack contains both the shared and the main worktree refs.
254 *
255 * Note that we don't try to resolve the path in case we have a
256 * worktree because `get_common_dir_noenv()` already does it for us.
257 */
258 is_worktree = get_common_dir_noenv(&path, gitdir);
259 if (!is_worktree) {
260 strbuf_reset(&path);
261 strbuf_realpath(&path, gitdir, 0);
262 }
263 strbuf_addstr(&path, "/reftable");
264 refs->err = reftable_new_stack(&refs->main_stack, path.buf,
265 refs->write_options);
266 if (refs->err)
267 goto done;
268
269 /*
270 * If we're in a worktree we also need to set up the worktree reftable
271 * stack that is contained in the per-worktree GIT_DIR.
272 *
273 * Ideally, we would also add the stack to our worktree stack map. But
274 * we have no way to figure out the worktree name here and thus can't
275 * do it efficiently.
276 */
277 if (is_worktree) {
278 strbuf_reset(&path);
279 strbuf_addf(&path, "%s/reftable", gitdir);
280
281 refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
282 refs->write_options);
283 if (refs->err)
284 goto done;
285 }
286
287 chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
288
289done:
290 assert(refs->err != REFTABLE_API_ERROR);
291 strbuf_release(&path);
292 return &refs->base;
293}
294
295static int reftable_be_init_db(struct ref_store *ref_store,
296 int flags UNUSED,
297 struct strbuf *err UNUSED)
298{
299 struct reftable_ref_store *refs =
300 reftable_be_downcast(ref_store, REF_STORE_WRITE, "init_db");
301 struct strbuf sb = STRBUF_INIT;
302
303 strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
304 safe_create_dir(sb.buf, 1);
305 strbuf_reset(&sb);
306
307 strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
308 write_file(sb.buf, "ref: refs/heads/.invalid");
309 adjust_shared_perm(sb.buf);
310 strbuf_reset(&sb);
311
312 strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
313 safe_create_dir(sb.buf, 1);
314 strbuf_reset(&sb);
315
316 strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
317 write_file(sb.buf, "this repository uses the reftable format");
318 adjust_shared_perm(sb.buf);
319
320 strbuf_release(&sb);
321 return 0;
322}
323
324struct reftable_ref_iterator {
325 struct ref_iterator base;
326 struct reftable_ref_store *refs;
327 struct reftable_iterator iter;
328 struct reftable_ref_record ref;
329 struct object_id oid;
330
331 const char *prefix;
43f70eae 332 size_t prefix_len;
57db2a09
PS
333 unsigned int flags;
334 int err;
335};
336
337static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
338{
339 struct reftable_ref_iterator *iter =
340 (struct reftable_ref_iterator *)ref_iterator;
341 struct reftable_ref_store *refs = iter->refs;
342
343 while (!iter->err) {
344 int flags = 0;
345
346 iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
347 if (iter->err)
348 break;
349
350 /*
33d15b54
KN
351 * The files backend only lists references contained in "refs/" unless
352 * the root refs are to be included. We emulate the same behaviour here.
57db2a09 353 */
33d15b54
KN
354 if (!starts_with(iter->ref.refname, "refs/") &&
355 !(iter->flags & DO_FOR_EACH_INCLUDE_ROOT_REFS &&
356 (is_pseudoref(&iter->refs->base, iter->ref.refname) ||
357 is_headref(&iter->refs->base, iter->ref.refname)))) {
57db2a09 358 continue;
33d15b54 359 }
57db2a09 360
43f70eae
PS
361 if (iter->prefix_len &&
362 strncmp(iter->prefix, iter->ref.refname, iter->prefix_len)) {
57db2a09
PS
363 iter->err = 1;
364 break;
365 }
366
367 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
368 parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
369 REF_WORKTREE_CURRENT)
370 continue;
371
372 switch (iter->ref.value_type) {
373 case REFTABLE_REF_VAL1:
374 oidread(&iter->oid, iter->ref.value.val1);
375 break;
376 case REFTABLE_REF_VAL2:
377 oidread(&iter->oid, iter->ref.value.val2.value);
378 break;
379 case REFTABLE_REF_SYMREF:
380 if (!refs_resolve_ref_unsafe(&iter->refs->base, iter->ref.refname,
381 RESOLVE_REF_READING, &iter->oid, &flags))
382 oidclr(&iter->oid);
383 break;
384 default:
385 BUG("unhandled reference value type %d", iter->ref.value_type);
386 }
387
388 if (is_null_oid(&iter->oid))
389 flags |= REF_ISBROKEN;
390
391 if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
392 if (!refname_is_safe(iter->ref.refname))
393 die(_("refname is dangerous: %s"), iter->ref.refname);
394 oidclr(&iter->oid);
395 flags |= REF_BAD_NAME | REF_ISBROKEN;
396 }
397
398 if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
399 flags & REF_ISSYMREF &&
400 flags & REF_ISBROKEN)
401 continue;
402
403 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
404 !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
405 &iter->oid, flags))
406 continue;
407
408 iter->base.refname = iter->ref.refname;
409 iter->base.oid = &iter->oid;
410 iter->base.flags = flags;
411
412 break;
413 }
414
415 if (iter->err > 0) {
416 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
417 return ITER_ERROR;
418 return ITER_DONE;
419 }
420
421 if (iter->err < 0) {
422 ref_iterator_abort(ref_iterator);
423 return ITER_ERROR;
424 }
425
426 return ITER_OK;
427}
428
429static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
430 struct object_id *peeled)
431{
432 struct reftable_ref_iterator *iter =
433 (struct reftable_ref_iterator *)ref_iterator;
434
435 if (iter->ref.value_type == REFTABLE_REF_VAL2) {
436 oidread(peeled, iter->ref.value.val2.target_value);
437 return 0;
438 }
439
440 return -1;
441}
442
443static int reftable_ref_iterator_abort(struct ref_iterator *ref_iterator)
444{
445 struct reftable_ref_iterator *iter =
446 (struct reftable_ref_iterator *)ref_iterator;
447 reftable_ref_record_release(&iter->ref);
448 reftable_iterator_destroy(&iter->iter);
449 free(iter);
450 return ITER_DONE;
451}
452
453static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
454 .advance = reftable_ref_iterator_advance,
455 .peel = reftable_ref_iterator_peel,
456 .abort = reftable_ref_iterator_abort
457};
458
459static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
460 struct reftable_stack *stack,
461 const char *prefix,
462 int flags)
463{
464 struct reftable_merged_table *merged_table;
465 struct reftable_ref_iterator *iter;
466 int ret;
467
468 iter = xcalloc(1, sizeof(*iter));
5e01d838 469 base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
57db2a09 470 iter->prefix = prefix;
43f70eae 471 iter->prefix_len = prefix ? strlen(prefix) : 0;
57db2a09
PS
472 iter->base.oid = &iter->oid;
473 iter->flags = flags;
474 iter->refs = refs;
475
476 ret = refs->err;
477 if (ret)
478 goto done;
479
480 ret = reftable_stack_reload(stack);
481 if (ret)
482 goto done;
483
484 merged_table = reftable_stack_merged_table(stack);
485
486 ret = reftable_merged_table_seek_ref(merged_table, &iter->iter, prefix);
487 if (ret)
488 goto done;
489
490done:
491 iter->err = ret;
492 return iter;
493}
494
57db2a09
PS
495static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
496 const char *prefix,
497 const char **exclude_patterns,
498 unsigned int flags)
499{
500 struct reftable_ref_iterator *main_iter, *worktree_iter;
501 struct reftable_ref_store *refs;
502 unsigned int required_flags = REF_STORE_READ;
503
504 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
505 required_flags |= REF_STORE_ODB;
506 refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
507
508 main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix, flags);
509
510 /*
511 * The worktree stack is only set when we're in an actual worktree
512 * right now. If we aren't, then we return the common reftable
513 * iterator, only.
514 */
515 if (!refs->worktree_stack)
516 return &main_iter->base;
517
518 /*
519 * Otherwise we merge both the common and the per-worktree refs into a
520 * single iterator.
521 */
522 worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix, flags);
5e01d838 523 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
6f227800 524 ref_iterator_select, NULL);
57db2a09
PS
525}
526
527static int reftable_be_read_raw_ref(struct ref_store *ref_store,
528 const char *refname,
529 struct object_id *oid,
530 struct strbuf *referent,
531 unsigned int *type,
532 int *failure_errno)
533{
534 struct reftable_ref_store *refs =
535 reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
536 struct reftable_stack *stack = stack_for(refs, refname, &refname);
537 int ret;
538
539 if (refs->err < 0)
540 return refs->err;
541
542 ret = reftable_stack_reload(stack);
543 if (ret)
544 return ret;
545
546 ret = read_ref_without_reload(stack, refname, oid, referent, type);
547 if (ret < 0)
548 return ret;
549 if (ret > 0) {
550 *failure_errno = ENOENT;
551 return -1;
552 }
553
554 return 0;
555}
556
557static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
558 const char *refname,
559 struct strbuf *referent)
560{
561 struct reftable_ref_store *refs =
562 reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
563 struct reftable_stack *stack = stack_for(refs, refname, &refname);
564 struct reftable_ref_record ref = {0};
565 int ret;
566
567 ret = reftable_stack_reload(stack);
568 if (ret)
569 return ret;
570
571 ret = reftable_stack_read_ref(stack, refname, &ref);
572 if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
573 strbuf_addstr(referent, ref.value.symref);
574 else
575 ret = -1;
576
577 reftable_ref_record_release(&ref);
578 return ret;
579}
580
581/*
582 * Return the refname under which update was originally requested.
583 */
584static const char *original_update_refname(struct ref_update *update)
585{
586 while (update->parent_update)
587 update = update->parent_update;
588 return update->refname;
589}
590
591struct reftable_transaction_update {
592 struct ref_update *update;
593 struct object_id current_oid;
594};
595
596struct write_transaction_table_arg {
597 struct reftable_ref_store *refs;
598 struct reftable_stack *stack;
599 struct reftable_addition *addition;
600 struct reftable_transaction_update *updates;
601 size_t updates_nr;
602 size_t updates_alloc;
603 size_t updates_expected;
604};
605
606struct reftable_transaction_data {
607 struct write_transaction_table_arg *args;
608 size_t args_nr, args_alloc;
609};
610
611static void free_transaction_data(struct reftable_transaction_data *tx_data)
612{
613 if (!tx_data)
614 return;
615 for (size_t i = 0; i < tx_data->args_nr; i++) {
616 reftable_addition_destroy(tx_data->args[i].addition);
617 free(tx_data->args[i].updates);
618 }
619 free(tx_data->args);
620 free(tx_data);
621}
622
623/*
624 * Prepare transaction update for the given reference update. This will cause
625 * us to lock the corresponding reftable stack for concurrent modification.
626 */
627static int prepare_transaction_update(struct write_transaction_table_arg **out,
628 struct reftable_ref_store *refs,
629 struct reftable_transaction_data *tx_data,
630 struct ref_update *update,
631 struct strbuf *err)
632{
633 struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
634 struct write_transaction_table_arg *arg = NULL;
635 size_t i;
636 int ret;
637
638 /*
639 * Search for a preexisting stack update. If there is one then we add
640 * the update to it, otherwise we set up a new stack update.
641 */
642 for (i = 0; !arg && i < tx_data->args_nr; i++)
643 if (tx_data->args[i].stack == stack)
644 arg = &tx_data->args[i];
645
646 if (!arg) {
647 struct reftable_addition *addition;
648
649 ret = reftable_stack_reload(stack);
650 if (ret)
651 return ret;
652
653 ret = reftable_stack_new_addition(&addition, stack);
654 if (ret) {
655 if (ret == REFTABLE_LOCK_ERROR)
656 strbuf_addstr(err, "cannot lock references");
657 return ret;
658 }
659
660 ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
661 tx_data->args_alloc);
662 arg = &tx_data->args[tx_data->args_nr++];
663 arg->refs = refs;
664 arg->stack = stack;
665 arg->addition = addition;
666 arg->updates = NULL;
667 arg->updates_nr = 0;
668 arg->updates_alloc = 0;
669 arg->updates_expected = 0;
670 }
671
672 arg->updates_expected++;
673
674 if (out)
675 *out = arg;
676
677 return 0;
678}
679
680/*
681 * Queue a reference update for the correct stack. We potentially need to
682 * handle multiple stack updates in a single transaction when it spans across
683 * multiple worktrees.
684 */
685static int queue_transaction_update(struct reftable_ref_store *refs,
686 struct reftable_transaction_data *tx_data,
687 struct ref_update *update,
688 struct object_id *current_oid,
689 struct strbuf *err)
690{
691 struct write_transaction_table_arg *arg = NULL;
692 int ret;
693
694 if (update->backend_data)
695 BUG("reference update queued more than once");
696
697 ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
698 if (ret < 0)
699 return ret;
700
701 ALLOC_GROW(arg->updates, arg->updates_nr + 1,
702 arg->updates_alloc);
703 arg->updates[arg->updates_nr].update = update;
704 oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
705 update->backend_data = &arg->updates[arg->updates_nr++];
706
707 return 0;
708}
709
710static int reftable_be_transaction_prepare(struct ref_store *ref_store,
711 struct ref_transaction *transaction,
712 struct strbuf *err)
713{
714 struct reftable_ref_store *refs =
715 reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
716 struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
717 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
718 struct reftable_transaction_data *tx_data = NULL;
719 struct object_id head_oid;
720 unsigned int head_type = 0;
721 size_t i;
722 int ret;
723
724 ret = refs->err;
725 if (ret < 0)
726 goto done;
727
728 tx_data = xcalloc(1, sizeof(*tx_data));
729
730 /*
731 * Preprocess all updates. For one we check that there are no duplicate
732 * reference updates in this transaction. Second, we lock all stacks
733 * that will be modified during the transaction.
734 */
735 for (i = 0; i < transaction->nr; i++) {
736 ret = prepare_transaction_update(NULL, refs, tx_data,
737 transaction->updates[i], err);
738 if (ret)
739 goto done;
740
741 string_list_append(&affected_refnames,
742 transaction->updates[i]->refname);
743 }
744
745 /*
746 * Now that we have counted updates per stack we can preallocate their
747 * arrays. This avoids having to reallocate many times.
748 */
749 for (i = 0; i < tx_data->args_nr; i++) {
750 CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
751 tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
752 }
753
754 /*
755 * Fail if a refname appears more than once in the transaction.
756 * This code is taken from the files backend and is a good candidate to
757 * be moved into the generic layer.
758 */
759 string_list_sort(&affected_refnames);
760 if (ref_update_reject_duplicates(&affected_refnames, err)) {
761 ret = TRANSACTION_GENERIC_ERROR;
762 goto done;
763 }
764
765 ret = read_ref_without_reload(stack_for(refs, "HEAD", NULL), "HEAD", &head_oid,
766 &head_referent, &head_type);
767 if (ret < 0)
768 goto done;
b0f6b6b5 769 ret = 0;
57db2a09
PS
770
771 for (i = 0; i < transaction->nr; i++) {
772 struct ref_update *u = transaction->updates[i];
773 struct object_id current_oid = {0};
774 struct reftable_stack *stack;
775 const char *rewritten_ref;
776
777 stack = stack_for(refs, u->refname, &rewritten_ref);
778
779 /* Verify that the new object ID is valid. */
780 if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
781 !(u->flags & REF_SKIP_OID_VERIFICATION) &&
782 !(u->flags & REF_LOG_ONLY)) {
783 struct object *o = parse_object(refs->base.repo, &u->new_oid);
784 if (!o) {
785 strbuf_addf(err,
786 _("trying to write ref '%s' with nonexistent object %s"),
787 u->refname, oid_to_hex(&u->new_oid));
788 ret = -1;
789 goto done;
790 }
791
792 if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
793 strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
794 oid_to_hex(&u->new_oid), u->refname);
795 ret = -1;
796 goto done;
797 }
798 }
799
800 /*
801 * When we update the reference that HEAD points to we enqueue
802 * a second log-only update for HEAD so that its reflog is
803 * updated accordingly.
804 */
805 if (head_type == REF_ISSYMREF &&
806 !(u->flags & REF_LOG_ONLY) &&
807 !(u->flags & REF_UPDATE_VIA_HEAD) &&
808 !strcmp(rewritten_ref, head_referent.buf)) {
809 struct ref_update *new_update;
810
811 /*
812 * First make sure that HEAD is not already in the
813 * transaction. This check is O(lg N) in the transaction
814 * size, but it happens at most once per transaction.
815 */
816 if (string_list_has_string(&affected_refnames, "HEAD")) {
817 /* An entry already existed */
818 strbuf_addf(err,
819 _("multiple updates for 'HEAD' (including one "
820 "via its referent '%s') are not allowed"),
821 u->refname);
822 ret = TRANSACTION_NAME_CONFLICT;
823 goto done;
824 }
825
826 new_update = ref_transaction_add_update(
827 transaction, "HEAD",
828 u->flags | REF_LOG_ONLY | REF_NO_DEREF,
829 &u->new_oid, &u->old_oid, u->msg);
830 string_list_insert(&affected_refnames, new_update->refname);
831 }
832
833 ret = read_ref_without_reload(stack, rewritten_ref,
834 &current_oid, &referent, &u->type);
835 if (ret < 0)
836 goto done;
837 if (ret > 0 && (!(u->flags & REF_HAVE_OLD) || is_null_oid(&u->old_oid))) {
838 /*
839 * The reference does not exist, and we either have no
840 * old object ID or expect the reference to not exist.
841 * We can thus skip below safety checks as well as the
842 * symref splitting. But we do want to verify that
843 * there is no conflicting reference here so that we
844 * can output a proper error message instead of failing
845 * at a later point.
846 */
847 ret = refs_verify_refname_available(ref_store, u->refname,
848 &affected_refnames, NULL, err);
849 if (ret < 0)
850 goto done;
851
852 /*
853 * There is no need to write the reference deletion
854 * when the reference in question doesn't exist.
855 */
856 if (u->flags & REF_HAVE_NEW && !is_null_oid(&u->new_oid)) {
857 ret = queue_transaction_update(refs, tx_data, u,
858 &current_oid, err);
859 if (ret)
860 goto done;
861 }
862
863 continue;
864 }
865 if (ret > 0) {
866 /* The reference does not exist, but we expected it to. */
867 strbuf_addf(err, _("cannot lock ref '%s': "
868 "unable to resolve reference '%s'"),
869 original_update_refname(u), u->refname);
870 ret = -1;
871 goto done;
872 }
873
874 if (u->type & REF_ISSYMREF) {
875 /*
876 * The reftable stack is locked at this point already,
877 * so it is safe to call `refs_resolve_ref_unsafe()`
878 * here without causing races.
879 */
880 const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
881 &current_oid, NULL);
882
883 if (u->flags & REF_NO_DEREF) {
884 if (u->flags & REF_HAVE_OLD && !resolved) {
885 strbuf_addf(err, _("cannot lock ref '%s': "
886 "error reading reference"), u->refname);
887 ret = -1;
888 goto done;
889 }
890 } else {
891 struct ref_update *new_update;
892 int new_flags;
893
894 new_flags = u->flags;
895 if (!strcmp(rewritten_ref, "HEAD"))
896 new_flags |= REF_UPDATE_VIA_HEAD;
897
898 /*
899 * If we are updating a symref (eg. HEAD), we should also
900 * update the branch that the symref points to.
901 *
902 * This is generic functionality, and would be better
903 * done in refs.c, but the current implementation is
904 * intertwined with the locking in files-backend.c.
905 */
906 new_update = ref_transaction_add_update(
907 transaction, referent.buf, new_flags,
908 &u->new_oid, &u->old_oid, u->msg);
909 new_update->parent_update = u;
910
911 /*
912 * Change the symbolic ref update to log only. Also, it
913 * doesn't need to check its old OID value, as that will be
914 * done when new_update is processed.
915 */
916 u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
917 u->flags &= ~REF_HAVE_OLD;
918
919 if (string_list_has_string(&affected_refnames, new_update->refname)) {
920 strbuf_addf(err,
921 _("multiple updates for '%s' (including one "
922 "via symref '%s') are not allowed"),
923 referent.buf, u->refname);
924 ret = TRANSACTION_NAME_CONFLICT;
925 goto done;
926 }
927 string_list_insert(&affected_refnames, new_update->refname);
928 }
929 }
930
931 /*
932 * Verify that the old object matches our expectations. Note
933 * that the error messages here do not make a lot of sense in
934 * the context of the reftable backend as we never lock
935 * individual refs. But the error messages match what the files
936 * backend returns, which keeps our tests happy.
937 */
938 if (u->flags & REF_HAVE_OLD && !oideq(&current_oid, &u->old_oid)) {
939 if (is_null_oid(&u->old_oid))
940 strbuf_addf(err, _("cannot lock ref '%s': "
941 "reference already exists"),
942 original_update_refname(u));
943 else if (is_null_oid(&current_oid))
944 strbuf_addf(err, _("cannot lock ref '%s': "
945 "reference is missing but expected %s"),
946 original_update_refname(u),
947 oid_to_hex(&u->old_oid));
948 else
949 strbuf_addf(err, _("cannot lock ref '%s': "
950 "is at %s but expected %s"),
951 original_update_refname(u),
952 oid_to_hex(&current_oid),
953 oid_to_hex(&u->old_oid));
954 ret = -1;
955 goto done;
956 }
957
958 /*
959 * If all of the following conditions are true:
960 *
961 * - We're not about to write a symref.
962 * - We're not about to write a log-only entry.
963 * - Old and new object ID are different.
964 *
965 * Then we're essentially doing a no-op update that can be
966 * skipped. This is not only for the sake of efficiency, but
967 * also skips writing unneeded reflog entries.
968 */
969 if ((u->type & REF_ISSYMREF) ||
970 (u->flags & REF_LOG_ONLY) ||
971 (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid))) {
972 ret = queue_transaction_update(refs, tx_data, u,
973 &current_oid, err);
974 if (ret)
975 goto done;
976 }
977 }
978
979 transaction->backend_data = tx_data;
980 transaction->state = REF_TRANSACTION_PREPARED;
981
982done:
983 assert(ret != REFTABLE_API_ERROR);
984 if (ret < 0) {
985 free_transaction_data(tx_data);
986 transaction->state = REF_TRANSACTION_CLOSED;
987 if (!err->len)
988 strbuf_addf(err, _("reftable: transaction prepare: %s"),
989 reftable_error_str(ret));
990 }
991 string_list_clear(&affected_refnames, 0);
992 strbuf_release(&referent);
993 strbuf_release(&head_referent);
994
995 return ret;
996}
997
998static int reftable_be_transaction_abort(struct ref_store *ref_store,
999 struct ref_transaction *transaction,
1000 struct strbuf *err)
1001{
1002 struct reftable_transaction_data *tx_data = transaction->backend_data;
1003 free_transaction_data(tx_data);
1004 transaction->state = REF_TRANSACTION_CLOSED;
1005 return 0;
1006}
1007
1008static int transaction_update_cmp(const void *a, const void *b)
1009{
1010 return strcmp(((struct reftable_transaction_update *)a)->update->refname,
1011 ((struct reftable_transaction_update *)b)->update->refname);
1012}
1013
1014static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
1015{
1016 struct write_transaction_table_arg *arg = cb_data;
1017 struct reftable_merged_table *mt =
1018 reftable_stack_merged_table(arg->stack);
1019 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1020 struct reftable_log_record *logs = NULL;
1021 size_t logs_nr = 0, logs_alloc = 0, i;
1022 int ret = 0;
1023
1024 QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
1025
1026 reftable_writer_set_limits(writer, ts, ts);
1027
1028 for (i = 0; i < arg->updates_nr; i++) {
1029 struct reftable_transaction_update *tx_update = &arg->updates[i];
1030 struct ref_update *u = tx_update->update;
1031
1032 /*
1033 * Write a reflog entry when updating a ref to point to
1034 * something new in either of the following cases:
1035 *
1036 * - The reference is about to be deleted. We always want to
1037 * delete the reflog in that case.
1038 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1039 * the reflog entry.
1040 * - `core.logAllRefUpdates` tells us to create the reflog for
1041 * the given ref.
1042 */
1043 if (u->flags & REF_HAVE_NEW && !(u->type & REF_ISSYMREF) && is_null_oid(&u->new_oid)) {
1044 struct reftable_log_record log = {0};
1045 struct reftable_iterator it = {0};
1046
1047 /*
1048 * When deleting refs we also delete all reflog entries
1049 * with them. While it is not strictly required to
1050 * delete reflogs together with their refs, this
1051 * matches the behaviour of the files backend.
1052 *
1053 * Unfortunately, we have no better way than to delete
1054 * all reflog entries one by one.
1055 */
1056 ret = reftable_merged_table_seek_log(mt, &it, u->refname);
1057 while (ret == 0) {
1058 struct reftable_log_record *tombstone;
1059
1060 ret = reftable_iterator_next_log(&it, &log);
1061 if (ret < 0)
1062 break;
1063 if (ret > 0 || strcmp(log.refname, u->refname)) {
1064 ret = 0;
1065 break;
1066 }
1067
1068 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1069 tombstone = &logs[logs_nr++];
1070 tombstone->refname = xstrdup(u->refname);
1071 tombstone->value_type = REFTABLE_LOG_DELETION;
1072 tombstone->update_index = log.update_index;
1073 }
1074
1075 reftable_log_record_release(&log);
1076 reftable_iterator_destroy(&it);
1077
1078 if (ret)
1079 goto done;
1080 } else if (u->flags & REF_HAVE_NEW &&
1081 (u->flags & REF_FORCE_CREATE_REFLOG ||
1082 should_write_log(&arg->refs->base, u->refname))) {
1083 struct reftable_log_record *log;
1084
1085 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1086 log = &logs[logs_nr++];
1087 memset(log, 0, sizeof(*log));
1088
1089 fill_reftable_log_record(log);
1090 log->update_index = ts;
1091 log->refname = xstrdup(u->refname);
87ff7230
PS
1092 memcpy(log->value.update.new_hash, u->new_oid.hash, GIT_MAX_RAWSZ);
1093 memcpy(log->value.update.old_hash, tx_update->current_oid.hash, GIT_MAX_RAWSZ);
57db2a09
PS
1094 log->value.update.message =
1095 xstrndup(u->msg, arg->refs->write_options.block_size / 2);
1096 }
1097
1098 if (u->flags & REF_LOG_ONLY)
1099 continue;
1100
1101 if (u->flags & REF_HAVE_NEW && is_null_oid(&u->new_oid)) {
1102 struct reftable_ref_record ref = {
1103 .refname = (char *)u->refname,
1104 .update_index = ts,
1105 .value_type = REFTABLE_REF_DELETION,
1106 };
1107
1108 ret = reftable_writer_add_ref(writer, &ref);
1109 if (ret < 0)
1110 goto done;
1111 } else if (u->flags & REF_HAVE_NEW) {
1112 struct reftable_ref_record ref = {0};
1113 struct object_id peeled;
1114 int peel_error;
1115
1116 ref.refname = (char *)u->refname;
1117 ref.update_index = ts;
1118
1119 peel_error = peel_object(&u->new_oid, &peeled);
1120 if (!peel_error) {
1121 ref.value_type = REFTABLE_REF_VAL2;
1122 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
1123 memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
1124 } else if (!is_null_oid(&u->new_oid)) {
1125 ref.value_type = REFTABLE_REF_VAL1;
1126 memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
1127 }
1128
1129 ret = reftable_writer_add_ref(writer, &ref);
1130 if (ret < 0)
1131 goto done;
1132 }
1133 }
1134
1135 /*
1136 * Logs are written at the end so that we do not have intermixed ref
1137 * and log blocks.
1138 */
1139 if (logs) {
1140 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1141 if (ret < 0)
1142 goto done;
1143 }
1144
1145done:
1146 assert(ret != REFTABLE_API_ERROR);
1147 for (i = 0; i < logs_nr; i++)
87ff7230 1148 reftable_log_record_release(&logs[i]);
57db2a09
PS
1149 free(logs);
1150 return ret;
1151}
1152
1153static int reftable_be_transaction_finish(struct ref_store *ref_store,
1154 struct ref_transaction *transaction,
1155 struct strbuf *err)
1156{
1157 struct reftable_transaction_data *tx_data = transaction->backend_data;
1158 int ret = 0;
1159
1160 for (size_t i = 0; i < tx_data->args_nr; i++) {
1161 ret = reftable_addition_add(tx_data->args[i].addition,
1162 write_transaction_table, &tx_data->args[i]);
1163 if (ret < 0)
1164 goto done;
1165
1166 ret = reftable_addition_commit(tx_data->args[i].addition);
1167 if (ret < 0)
1168 goto done;
1169 }
1170
1171done:
1172 assert(ret != REFTABLE_API_ERROR);
1173 free_transaction_data(tx_data);
1174 transaction->state = REF_TRANSACTION_CLOSED;
1175
1176 if (ret) {
1177 strbuf_addf(err, _("reftable: transaction failure: %s"),
1178 reftable_error_str(ret));
1179 return -1;
1180 }
1181 return ret;
1182}
1183
1184static int reftable_be_initial_transaction_commit(struct ref_store *ref_store UNUSED,
1185 struct ref_transaction *transaction,
1186 struct strbuf *err)
1187{
1188 return ref_transaction_commit(transaction, err);
1189}
1190
1191static int reftable_be_pack_refs(struct ref_store *ref_store,
1192 struct pack_refs_opts *opts)
1193{
1194 struct reftable_ref_store *refs =
1195 reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "pack_refs");
1196 struct reftable_stack *stack;
1197 int ret;
1198
1199 if (refs->err)
1200 return refs->err;
1201
1202 stack = refs->worktree_stack;
1203 if (!stack)
1204 stack = refs->main_stack;
1205
1206 ret = reftable_stack_compact_all(stack, NULL);
1207 if (ret)
1208 goto out;
1209 ret = reftable_stack_clean(stack);
1210 if (ret)
1211 goto out;
1212
1213out:
1214 return ret;
1215}
1216
1217struct write_create_symref_arg {
1218 struct reftable_ref_store *refs;
1219 struct reftable_stack *stack;
1220 const char *refname;
1221 const char *target;
1222 const char *logmsg;
1223};
1224
1225static int write_create_symref_table(struct reftable_writer *writer, void *cb_data)
1226{
1227 struct write_create_symref_arg *create = cb_data;
1228 uint64_t ts = reftable_stack_next_update_index(create->stack);
1229 struct reftable_ref_record ref = {
1230 .refname = (char *)create->refname,
1231 .value_type = REFTABLE_REF_SYMREF,
1232 .value.symref = (char *)create->target,
1233 .update_index = ts,
1234 };
1235 struct reftable_log_record log = {0};
1236 struct object_id new_oid;
1237 struct object_id old_oid;
1238 int ret;
1239
1240 reftable_writer_set_limits(writer, ts, ts);
1241
1242 ret = reftable_writer_add_ref(writer, &ref);
1243 if (ret)
1244 return ret;
1245
1246 /*
1247 * Note that it is important to try and resolve the reference before we
1248 * write the log entry. This is because `should_write_log()` will munge
1249 * `core.logAllRefUpdates`, which is undesirable when we create a new
1250 * repository because it would be written into the config. As HEAD will
1251 * not resolve for new repositories this ordering will ensure that this
1252 * never happens.
1253 */
1254 if (!create->logmsg ||
1255 !refs_resolve_ref_unsafe(&create->refs->base, create->target,
1256 RESOLVE_REF_READING, &new_oid, NULL) ||
1257 !should_write_log(&create->refs->base, create->refname))
1258 return 0;
1259
1260 fill_reftable_log_record(&log);
1261 log.refname = xstrdup(create->refname);
1262 log.update_index = ts;
1263 log.value.update.message = xstrndup(create->logmsg,
1264 create->refs->write_options.block_size / 2);
87ff7230 1265 memcpy(log.value.update.new_hash, new_oid.hash, GIT_MAX_RAWSZ);
57db2a09
PS
1266 if (refs_resolve_ref_unsafe(&create->refs->base, create->refname,
1267 RESOLVE_REF_READING, &old_oid, NULL))
87ff7230 1268 memcpy(log.value.update.old_hash, old_oid.hash, GIT_MAX_RAWSZ);
57db2a09
PS
1269
1270 ret = reftable_writer_add_log(writer, &log);
87ff7230 1271 reftable_log_record_release(&log);
57db2a09
PS
1272 return ret;
1273}
1274
1275static int reftable_be_create_symref(struct ref_store *ref_store,
1276 const char *refname,
1277 const char *target,
1278 const char *logmsg)
1279{
1280 struct reftable_ref_store *refs =
1281 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_symref");
1282 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1283 struct write_create_symref_arg arg = {
1284 .refs = refs,
1285 .stack = stack,
1286 .refname = refname,
1287 .target = target,
1288 .logmsg = logmsg,
1289 };
1290 int ret;
1291
1292 ret = refs->err;
1293 if (ret < 0)
1294 goto done;
1295
1296 ret = reftable_stack_reload(stack);
1297 if (ret)
1298 goto done;
1299
1300 ret = reftable_stack_add(stack, &write_create_symref_table, &arg);
1301
1302done:
1303 assert(ret != REFTABLE_API_ERROR);
1304 if (ret)
1305 error("unable to write symref for %s: %s", refname,
1306 reftable_error_str(ret));
1307 return ret;
1308}
1309
1310struct write_copy_arg {
1311 struct reftable_ref_store *refs;
1312 struct reftable_stack *stack;
1313 const char *oldname;
1314 const char *newname;
1315 const char *logmsg;
1316 int delete_old;
1317};
1318
1319static int write_copy_table(struct reftable_writer *writer, void *cb_data)
1320{
1321 struct write_copy_arg *arg = cb_data;
1322 uint64_t deletion_ts, creation_ts;
1323 struct reftable_merged_table *mt = reftable_stack_merged_table(arg->stack);
1324 struct reftable_ref_record old_ref = {0}, refs[2] = {0};
1325 struct reftable_log_record old_log = {0}, *logs = NULL;
1326 struct reftable_iterator it = {0};
1327 struct string_list skip = STRING_LIST_INIT_NODUP;
1328 struct strbuf errbuf = STRBUF_INIT;
1329 size_t logs_nr = 0, logs_alloc = 0, i;
1330 int ret;
1331
1332 if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
1333 ret = error(_("refname %s not found"), arg->oldname);
1334 goto done;
1335 }
1336 if (old_ref.value_type == REFTABLE_REF_SYMREF) {
1337 ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
1338 arg->oldname);
1339 goto done;
1340 }
1341
1342 /*
1343 * There's nothing to do in case the old and new name are the same, so
1344 * we exit early in that case.
1345 */
1346 if (!strcmp(arg->oldname, arg->newname)) {
1347 ret = 0;
1348 goto done;
1349 }
1350
1351 /*
1352 * Verify that the new refname is available.
1353 */
f57cc987
PS
1354 if (arg->delete_old)
1355 string_list_insert(&skip, arg->oldname);
57db2a09
PS
1356 ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
1357 NULL, &skip, &errbuf);
1358 if (ret < 0) {
1359 error("%s", errbuf.buf);
1360 goto done;
1361 }
1362
1363 /*
1364 * When deleting the old reference we have to use two update indices:
1365 * once to delete the old ref and its reflog, and once to create the
1366 * new ref and its reflog. They need to be staged with two separate
1367 * indices because the new reflog needs to encode both the deletion of
1368 * the old branch and the creation of the new branch, and we cannot do
1369 * two changes to a reflog in a single update.
1370 */
1371 deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
1372 if (arg->delete_old)
1373 creation_ts++;
1374 reftable_writer_set_limits(writer, deletion_ts, creation_ts);
1375
1376 /*
1377 * Add the new reference. If this is a rename then we also delete the
1378 * old reference.
1379 */
1380 refs[0] = old_ref;
1381 refs[0].refname = (char *)arg->newname;
1382 refs[0].update_index = creation_ts;
1383 if (arg->delete_old) {
1384 refs[1].refname = (char *)arg->oldname;
1385 refs[1].value_type = REFTABLE_REF_DELETION;
1386 refs[1].update_index = deletion_ts;
1387 }
1388 ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
1389 if (ret < 0)
1390 goto done;
1391
1392 /*
1393 * When deleting the old branch we need to create a reflog entry on the
1394 * new branch name that indicates that the old branch has been deleted
1395 * and then recreated. This is a tad weird, but matches what the files
1396 * backend does.
1397 */
1398 if (arg->delete_old) {
1399 struct strbuf head_referent = STRBUF_INIT;
1400 struct object_id head_oid;
1401 int append_head_reflog;
1402 unsigned head_type = 0;
1403
1404 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1405 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1406 fill_reftable_log_record(&logs[logs_nr]);
1407 logs[logs_nr].refname = (char *)arg->newname;
1408 logs[logs_nr].update_index = deletion_ts;
1409 logs[logs_nr].value.update.message =
1410 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
87ff7230 1411 memcpy(logs[logs_nr].value.update.old_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
57db2a09
PS
1412 logs_nr++;
1413
1414 ret = read_ref_without_reload(arg->stack, "HEAD", &head_oid, &head_referent, &head_type);
1415 if (ret < 0)
1416 goto done;
1417 append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
1418 strbuf_release(&head_referent);
1419
1420 /*
1421 * The files backend uses `refs_delete_ref()` to delete the old
1422 * branch name, which will append a reflog entry for HEAD in
1423 * case it points to the old branch.
1424 */
1425 if (append_head_reflog) {
1426 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1427 logs[logs_nr] = logs[logs_nr - 1];
1428 logs[logs_nr].refname = "HEAD";
1429 logs_nr++;
1430 }
1431 }
1432
1433 /*
1434 * Create the reflog entry for the newly created branch.
1435 */
1436 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1437 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1438 fill_reftable_log_record(&logs[logs_nr]);
1439 logs[logs_nr].refname = (char *)arg->newname;
1440 logs[logs_nr].update_index = creation_ts;
1441 logs[logs_nr].value.update.message =
1442 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
87ff7230 1443 memcpy(logs[logs_nr].value.update.new_hash, old_ref.value.val1, GIT_MAX_RAWSZ);
57db2a09
PS
1444 logs_nr++;
1445
1446 /*
1447 * In addition to writing the reflog entry for the new branch, we also
1448 * copy over all log entries from the old reflog. Last but not least,
1449 * when renaming we also have to delete all the old reflog entries.
1450 */
1451 ret = reftable_merged_table_seek_log(mt, &it, arg->oldname);
1452 if (ret < 0)
8a0bebde 1453 goto done;
57db2a09
PS
1454
1455 while (1) {
1456 ret = reftable_iterator_next_log(&it, &old_log);
1457 if (ret < 0)
1458 goto done;
1459 if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
1460 ret = 0;
1461 break;
1462 }
1463
1464 free(old_log.refname);
1465
1466 /*
1467 * Copy over the old reflog entry with the new refname.
1468 */
1469 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1470 logs[logs_nr] = old_log;
1471 logs[logs_nr].refname = (char *)arg->newname;
1472 logs_nr++;
1473
1474 /*
1475 * Delete the old reflog entry in case we are renaming.
1476 */
1477 if (arg->delete_old) {
1478 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1479 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1480 logs[logs_nr].refname = (char *)arg->oldname;
1481 logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
1482 logs[logs_nr].update_index = old_log.update_index;
1483 logs_nr++;
1484 }
1485
1486 /*
1487 * Transfer ownership of the log record we're iterating over to
1488 * the array of log records. Otherwise, the pointers would get
1489 * free'd or reallocated by the iterator.
1490 */
1491 memset(&old_log, 0, sizeof(old_log));
1492 }
1493
1494 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1495 if (ret < 0)
1496 goto done;
1497
1498done:
1499 assert(ret != REFTABLE_API_ERROR);
1500 reftable_iterator_destroy(&it);
1501 string_list_clear(&skip, 0);
1502 strbuf_release(&errbuf);
1503 for (i = 0; i < logs_nr; i++) {
1504 if (!strcmp(logs[i].refname, "HEAD"))
1505 continue;
57db2a09
PS
1506 logs[i].refname = NULL;
1507 reftable_log_record_release(&logs[i]);
1508 }
1509 free(logs);
1510 reftable_ref_record_release(&old_ref);
1511 reftable_log_record_release(&old_log);
1512 return ret;
1513}
1514
1515static int reftable_be_rename_ref(struct ref_store *ref_store,
1516 const char *oldrefname,
1517 const char *newrefname,
1518 const char *logmsg)
1519{
1520 struct reftable_ref_store *refs =
1521 reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1522 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1523 struct write_copy_arg arg = {
1524 .refs = refs,
1525 .stack = stack,
1526 .oldname = oldrefname,
1527 .newname = newrefname,
1528 .logmsg = logmsg,
1529 .delete_old = 1,
1530 };
1531 int ret;
1532
1533 ret = refs->err;
1534 if (ret < 0)
1535 goto done;
1536
1537 ret = reftable_stack_reload(stack);
1538 if (ret)
1539 goto done;
1540 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1541
1542done:
1543 assert(ret != REFTABLE_API_ERROR);
1544 return ret;
1545}
1546
1547static int reftable_be_copy_ref(struct ref_store *ref_store,
1548 const char *oldrefname,
1549 const char *newrefname,
1550 const char *logmsg)
1551{
1552 struct reftable_ref_store *refs =
1553 reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
1554 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1555 struct write_copy_arg arg = {
1556 .refs = refs,
1557 .stack = stack,
1558 .oldname = oldrefname,
1559 .newname = newrefname,
1560 .logmsg = logmsg,
1561 };
1562 int ret;
1563
1564 ret = refs->err;
1565 if (ret < 0)
1566 goto done;
1567
1568 ret = reftable_stack_reload(stack);
1569 if (ret)
1570 goto done;
1571 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1572
1573done:
1574 assert(ret != REFTABLE_API_ERROR);
1575 return ret;
1576}
1577
1578struct reftable_reflog_iterator {
1579 struct ref_iterator base;
1580 struct reftable_ref_store *refs;
1581 struct reftable_iterator iter;
1582 struct reftable_log_record log;
fcacc2b1 1583 struct strbuf last_name;
57db2a09
PS
1584 int err;
1585};
1586
1587static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
1588{
1589 struct reftable_reflog_iterator *iter =
1590 (struct reftable_reflog_iterator *)ref_iterator;
1591
1592 while (!iter->err) {
57db2a09
PS
1593 iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
1594 if (iter->err)
1595 break;
1596
1597 /*
1598 * We want the refnames that we have reflogs for, so we skip if
1599 * we've already produced this name. This could be faster by
1600 * seeking directly to reflog@update_index==0.
1601 */
fcacc2b1 1602 if (!strcmp(iter->log.refname, iter->last_name.buf))
57db2a09
PS
1603 continue;
1604
59c50a96
PS
1605 if (check_refname_format(iter->log.refname,
1606 REFNAME_ALLOW_ONELEVEL))
57db2a09 1607 continue;
57db2a09 1608
fcacc2b1
PS
1609 strbuf_reset(&iter->last_name);
1610 strbuf_addstr(&iter->last_name, iter->log.refname);
57db2a09 1611 iter->base.refname = iter->log.refname;
57db2a09
PS
1612
1613 break;
1614 }
1615
1616 if (iter->err > 0) {
1617 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
1618 return ITER_ERROR;
1619 return ITER_DONE;
1620 }
1621
1622 if (iter->err < 0) {
1623 ref_iterator_abort(ref_iterator);
1624 return ITER_ERROR;
1625 }
1626
1627 return ITER_OK;
1628}
1629
1630static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator,
1631 struct object_id *peeled)
1632{
1633 BUG("reftable reflog iterator cannot be peeled");
1634 return -1;
1635}
1636
1637static int reftable_reflog_iterator_abort(struct ref_iterator *ref_iterator)
1638{
1639 struct reftable_reflog_iterator *iter =
1640 (struct reftable_reflog_iterator *)ref_iterator;
1641 reftable_log_record_release(&iter->log);
1642 reftable_iterator_destroy(&iter->iter);
fcacc2b1 1643 strbuf_release(&iter->last_name);
57db2a09
PS
1644 free(iter);
1645 return ITER_DONE;
1646}
1647
1648static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
1649 .advance = reftable_reflog_iterator_advance,
1650 .peel = reftable_reflog_iterator_peel,
1651 .abort = reftable_reflog_iterator_abort
1652};
1653
1654static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
1655 struct reftable_stack *stack)
1656{
1657 struct reftable_merged_table *merged_table;
1658 struct reftable_reflog_iterator *iter;
1659 int ret;
1660
1661 iter = xcalloc(1, sizeof(*iter));
5e01d838 1662 base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable);
fcacc2b1 1663 strbuf_init(&iter->last_name, 0);
57db2a09 1664 iter->refs = refs;
57db2a09
PS
1665
1666 ret = refs->err;
1667 if (ret)
1668 goto done;
1669
eea0d11d 1670 ret = reftable_stack_reload(stack);
57db2a09
PS
1671 if (ret < 0)
1672 goto done;
1673
1674 merged_table = reftable_stack_merged_table(stack);
1675
1676 ret = reftable_merged_table_seek_log(merged_table, &iter->iter, "");
1677 if (ret < 0)
1678 goto done;
1679
1680done:
1681 iter->err = ret;
1682 return iter;
1683}
1684
1685static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
1686{
1687 struct reftable_ref_store *refs =
1688 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
1689 struct reftable_reflog_iterator *main_iter, *worktree_iter;
1690
1691 main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
1692 if (!refs->worktree_stack)
1693 return &main_iter->base;
1694
1695 worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
1696
5e01d838 1697 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
6f227800 1698 ref_iterator_select, NULL);
57db2a09
PS
1699}
1700
1701static int yield_log_record(struct reftable_log_record *log,
1702 each_reflog_ent_fn fn,
1703 void *cb_data)
1704{
1705 struct object_id old_oid, new_oid;
1706 const char *full_committer;
1707
1708 oidread(&old_oid, log->value.update.old_hash);
1709 oidread(&new_oid, log->value.update.new_hash);
1710
1711 /*
1712 * When both the old object ID and the new object ID are null
1713 * then this is the reflog existence marker. The caller must
1714 * not be aware of it.
1715 */
1716 if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
1717 return 0;
1718
1719 full_committer = fmt_ident(log->value.update.name, log->value.update.email,
1720 WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
1721 return fn(&old_oid, &new_oid, full_committer,
1722 log->value.update.time, log->value.update.tz_offset,
1723 log->value.update.message, cb_data);
1724}
1725
1726static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1727 const char *refname,
1728 each_reflog_ent_fn fn,
1729 void *cb_data)
1730{
1731 struct reftable_ref_store *refs =
1732 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
1733 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1734 struct reftable_merged_table *mt = NULL;
1735 struct reftable_log_record log = {0};
1736 struct reftable_iterator it = {0};
1737 int ret;
1738
1739 if (refs->err < 0)
1740 return refs->err;
1741
1742 mt = reftable_stack_merged_table(stack);
1743 ret = reftable_merged_table_seek_log(mt, &it, refname);
1744 while (!ret) {
1745 ret = reftable_iterator_next_log(&it, &log);
1746 if (ret < 0)
1747 break;
1748 if (ret > 0 || strcmp(log.refname, refname)) {
1749 ret = 0;
1750 break;
1751 }
1752
1753 ret = yield_log_record(&log, fn, cb_data);
1754 if (ret)
1755 break;
1756 }
1757
1758 reftable_log_record_release(&log);
1759 reftable_iterator_destroy(&it);
1760 return ret;
1761}
1762
1763static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
1764 const char *refname,
1765 each_reflog_ent_fn fn,
1766 void *cb_data)
1767{
1768 struct reftable_ref_store *refs =
1769 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
1770 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1771 struct reftable_merged_table *mt = NULL;
1772 struct reftable_log_record *logs = NULL;
1773 struct reftable_iterator it = {0};
1774 size_t logs_alloc = 0, logs_nr = 0, i;
1775 int ret;
1776
1777 if (refs->err < 0)
1778 return refs->err;
1779
1780 mt = reftable_stack_merged_table(stack);
1781 ret = reftable_merged_table_seek_log(mt, &it, refname);
1782 while (!ret) {
1783 struct reftable_log_record log = {0};
1784
1785 ret = reftable_iterator_next_log(&it, &log);
1786 if (ret < 0)
1787 goto done;
1788 if (ret > 0 || strcmp(log.refname, refname)) {
1789 reftable_log_record_release(&log);
1790 ret = 0;
1791 break;
1792 }
1793
1794 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1795 logs[logs_nr++] = log;
1796 }
1797
1798 for (i = logs_nr; i--;) {
1799 ret = yield_log_record(&logs[i], fn, cb_data);
1800 if (ret)
1801 goto done;
1802 }
1803
1804done:
1805 reftable_iterator_destroy(&it);
1806 for (i = 0; i < logs_nr; i++)
1807 reftable_log_record_release(&logs[i]);
1808 free(logs);
1809 return ret;
1810}
1811
1812static int reftable_be_reflog_exists(struct ref_store *ref_store,
1813 const char *refname)
1814{
1815 struct reftable_ref_store *refs =
1816 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1817 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1818 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
1819 struct reftable_log_record log = {0};
1820 struct reftable_iterator it = {0};
1821 int ret;
1822
1823 ret = refs->err;
1824 if (ret < 0)
1825 goto done;
1826
1827 ret = reftable_stack_reload(stack);
1828 if (ret < 0)
1829 goto done;
1830
1831 ret = reftable_merged_table_seek_log(mt, &it, refname);
1832 if (ret < 0)
1833 goto done;
1834
1835 /*
1836 * Check whether we get at least one log record for the given ref name.
1837 * If so, the reflog exists, otherwise it doesn't.
1838 */
1839 ret = reftable_iterator_next_log(&it, &log);
1840 if (ret < 0)
1841 goto done;
1842 if (ret > 0) {
1843 ret = 0;
1844 goto done;
1845 }
1846
1847 ret = strcmp(log.refname, refname) == 0;
1848
1849done:
1850 reftable_iterator_destroy(&it);
1851 reftable_log_record_release(&log);
1852 if (ret < 0)
1853 ret = 0;
1854 return ret;
1855}
1856
1857struct write_reflog_existence_arg {
1858 struct reftable_ref_store *refs;
1859 const char *refname;
1860 struct reftable_stack *stack;
1861};
1862
1863static int write_reflog_existence_table(struct reftable_writer *writer,
1864 void *cb_data)
1865{
1866 struct write_reflog_existence_arg *arg = cb_data;
1867 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1868 struct reftable_log_record log = {0};
1869 int ret;
1870
1871 ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
1872 if (ret <= 0)
1873 goto done;
1874
1875 reftable_writer_set_limits(writer, ts, ts);
1876
1877 /*
1878 * The existence entry has both old and new object ID set to the the
1879 * null object ID. Our iterators are aware of this and will not present
1880 * them to their callers.
1881 */
1882 log.refname = xstrdup(arg->refname);
1883 log.update_index = ts;
1884 log.value_type = REFTABLE_LOG_UPDATE;
1885 ret = reftable_writer_add_log(writer, &log);
1886
1887done:
1888 assert(ret != REFTABLE_API_ERROR);
1889 reftable_log_record_release(&log);
1890 return ret;
1891}
1892
1893static int reftable_be_create_reflog(struct ref_store *ref_store,
1894 const char *refname,
1895 struct strbuf *errmsg)
1896{
1897 struct reftable_ref_store *refs =
1898 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1899 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1900 struct write_reflog_existence_arg arg = {
1901 .refs = refs,
1902 .stack = stack,
1903 .refname = refname,
1904 };
1905 int ret;
1906
1907 ret = refs->err;
1908 if (ret < 0)
1909 goto done;
1910
1911 ret = reftable_stack_reload(stack);
1912 if (ret)
1913 goto done;
1914
1915 ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
1916
1917done:
1918 return ret;
1919}
1920
1921struct write_reflog_delete_arg {
1922 struct reftable_stack *stack;
1923 const char *refname;
1924};
1925
1926static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
1927{
1928 struct write_reflog_delete_arg *arg = cb_data;
1929 struct reftable_merged_table *mt =
1930 reftable_stack_merged_table(arg->stack);
1931 struct reftable_log_record log = {0}, tombstone = {0};
1932 struct reftable_iterator it = {0};
1933 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1934 int ret;
1935
1936 reftable_writer_set_limits(writer, ts, ts);
1937
1938 /*
1939 * In order to delete a table we need to delete all reflog entries one
1940 * by one. This is inefficient, but the reftable format does not have a
1941 * better marker right now.
1942 */
1943 ret = reftable_merged_table_seek_log(mt, &it, arg->refname);
1944 while (ret == 0) {
1945 ret = reftable_iterator_next_log(&it, &log);
1946 if (ret < 0)
1947 break;
1948 if (ret > 0 || strcmp(log.refname, arg->refname)) {
1949 ret = 0;
1950 break;
1951 }
1952
1953 tombstone.refname = (char *)arg->refname;
1954 tombstone.value_type = REFTABLE_LOG_DELETION;
1955 tombstone.update_index = log.update_index;
1956
1957 ret = reftable_writer_add_log(writer, &tombstone);
1958 }
1959
1960 reftable_log_record_release(&log);
1961 reftable_iterator_destroy(&it);
1962 return ret;
1963}
1964
1965static int reftable_be_delete_reflog(struct ref_store *ref_store,
1966 const char *refname)
1967{
1968 struct reftable_ref_store *refs =
1969 reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
1970 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1971 struct write_reflog_delete_arg arg = {
1972 .stack = stack,
1973 .refname = refname,
1974 };
1975 int ret;
1976
1977 ret = reftable_stack_reload(stack);
1978 if (ret)
1979 return ret;
1980 ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
1981
1982 assert(ret != REFTABLE_API_ERROR);
1983 return ret;
1984}
1985
1986struct reflog_expiry_arg {
1987 struct reftable_stack *stack;
1988 struct reftable_log_record *records;
1989 struct object_id update_oid;
1990 const char *refname;
1991 size_t len;
1992};
1993
1994static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
1995{
1996 struct reflog_expiry_arg *arg = cb_data;
1997 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1998 uint64_t live_records = 0;
1999 size_t i;
2000 int ret;
2001
2002 for (i = 0; i < arg->len; i++)
2003 if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
2004 live_records++;
2005
2006 reftable_writer_set_limits(writer, ts, ts);
2007
2008 if (!is_null_oid(&arg->update_oid)) {
2009 struct reftable_ref_record ref = {0};
2010 struct object_id peeled;
2011
2012 ref.refname = (char *)arg->refname;
2013 ref.update_index = ts;
2014
2015 if (!peel_object(&arg->update_oid, &peeled)) {
2016 ref.value_type = REFTABLE_REF_VAL2;
2017 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
2018 memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
2019 } else {
2020 ref.value_type = REFTABLE_REF_VAL1;
2021 memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
2022 }
2023
2024 ret = reftable_writer_add_ref(writer, &ref);
2025 if (ret < 0)
2026 return ret;
2027 }
2028
2029 /*
2030 * When there are no more entries left in the reflog we empty it
2031 * completely, but write a placeholder reflog entry that indicates that
2032 * the reflog still exists.
2033 */
2034 if (!live_records) {
2035 struct reftable_log_record log = {
2036 .refname = (char *)arg->refname,
2037 .value_type = REFTABLE_LOG_UPDATE,
2038 .update_index = ts,
2039 };
2040
2041 ret = reftable_writer_add_log(writer, &log);
2042 if (ret)
2043 return ret;
2044 }
2045
2046 for (i = 0; i < arg->len; i++) {
2047 ret = reftable_writer_add_log(writer, &arg->records[i]);
2048 if (ret)
2049 return ret;
2050 }
2051
2052 return 0;
2053}
2054
2055static int reftable_be_reflog_expire(struct ref_store *ref_store,
2056 const char *refname,
2057 unsigned int flags,
2058 reflog_expiry_prepare_fn prepare_fn,
2059 reflog_expiry_should_prune_fn should_prune_fn,
2060 reflog_expiry_cleanup_fn cleanup_fn,
2061 void *policy_cb_data)
2062{
2063 /*
2064 * For log expiry, we write tombstones for every single reflog entry
2065 * that is to be expired. This means that the entries are still
2066 * retrievable by delving into the stack, and expiring entries
2067 * paradoxically takes extra memory. This memory is only reclaimed when
2068 * compacting the reftable stack.
2069 *
2070 * It would be better if the refs backend supported an API that sets a
2071 * criterion for all refs, passing the criterion to pack_refs().
2072 *
2073 * On the plus side, because we do the expiration per ref, we can easily
2074 * insert the reflog existence dummies.
2075 */
2076 struct reftable_ref_store *refs =
2077 reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2078 struct reftable_stack *stack = stack_for(refs, refname, &refname);
2079 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
2080 struct reftable_log_record *logs = NULL;
2081 struct reftable_log_record *rewritten = NULL;
2082 struct reftable_ref_record ref_record = {0};
2083 struct reftable_iterator it = {0};
2084 struct reftable_addition *add = NULL;
2085 struct reflog_expiry_arg arg = {0};
2086 struct object_id oid = {0};
2087 uint8_t *last_hash = NULL;
2088 size_t logs_nr = 0, logs_alloc = 0, i;
2089 int ret;
2090
2091 if (refs->err < 0)
2092 return refs->err;
2093
2094 ret = reftable_stack_reload(stack);
2095 if (ret < 0)
2096 goto done;
2097
2098 ret = reftable_merged_table_seek_log(mt, &it, refname);
2099 if (ret < 0)
2100 goto done;
2101
2102 ret = reftable_stack_new_addition(&add, stack);
2103 if (ret < 0)
2104 goto done;
2105
2106 ret = reftable_stack_read_ref(stack, refname, &ref_record);
2107 if (ret < 0)
2108 goto done;
2109 if (reftable_ref_record_val1(&ref_record))
2110 oidread(&oid, reftable_ref_record_val1(&ref_record));
2111 prepare_fn(refname, &oid, policy_cb_data);
2112
2113 while (1) {
2114 struct reftable_log_record log = {0};
2115 struct object_id old_oid, new_oid;
2116
2117 ret = reftable_iterator_next_log(&it, &log);
2118 if (ret < 0)
2119 goto done;
2120 if (ret > 0 || strcmp(log.refname, refname)) {
2121 reftable_log_record_release(&log);
2122 break;
2123 }
2124
2125 oidread(&old_oid, log.value.update.old_hash);
2126 oidread(&new_oid, log.value.update.new_hash);
2127
2128 /*
2129 * Skip over the reflog existence marker. We will add it back
2130 * in when there are no live reflog records.
2131 */
2132 if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
2133 reftable_log_record_release(&log);
2134 continue;
2135 }
2136
2137 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2138 logs[logs_nr++] = log;
2139 }
2140
2141 /*
2142 * We need to rewrite all reflog entries according to the pruning
2143 * callback function:
2144 *
2145 * - If a reflog entry shall be pruned we mark the record for
2146 * deletion.
2147 *
2148 * - Otherwise we may have to rewrite the chain of reflog entries so
2149 * that gaps created by just-deleted records get backfilled.
2150 */
2151 CALLOC_ARRAY(rewritten, logs_nr);
2152 for (i = logs_nr; i--;) {
2153 struct reftable_log_record *dest = &rewritten[i];
2154 struct object_id old_oid, new_oid;
2155
2156 *dest = logs[i];
2157 oidread(&old_oid, logs[i].value.update.old_hash);
2158 oidread(&new_oid, logs[i].value.update.new_hash);
2159
2160 if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
2161 (timestamp_t)logs[i].value.update.time,
2162 logs[i].value.update.tz_offset,
2163 logs[i].value.update.message,
2164 policy_cb_data)) {
2165 dest->value_type = REFTABLE_LOG_DELETION;
2166 } else {
2167 if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
87ff7230 2168 memcpy(dest->value.update.old_hash, last_hash, GIT_MAX_RAWSZ);
57db2a09
PS
2169 last_hash = logs[i].value.update.new_hash;
2170 }
2171 }
2172
2173 if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
2174 reftable_ref_record_val1(&ref_record))
2175 oidread(&arg.update_oid, last_hash);
2176
2177 arg.records = rewritten;
2178 arg.len = logs_nr;
2179 arg.stack = stack,
2180 arg.refname = refname,
2181
2182 ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
2183 if (ret < 0)
2184 goto done;
2185
2186 /*
2187 * Future improvement: we could skip writing records that were
2188 * not changed.
2189 */
2190 if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
2191 ret = reftable_addition_commit(add);
2192
2193done:
2194 if (add)
2195 cleanup_fn(policy_cb_data);
2196 assert(ret != REFTABLE_API_ERROR);
2197
2198 reftable_ref_record_release(&ref_record);
2199 reftable_iterator_destroy(&it);
2200 reftable_addition_destroy(add);
2201 for (i = 0; i < logs_nr; i++)
2202 reftable_log_record_release(&logs[i]);
2203 free(logs);
2204 free(rewritten);
2205 return ret;
2206}
2207
2208struct ref_storage_be refs_be_reftable = {
2209 .name = "reftable",
2210 .init = reftable_be_init,
2211 .init_db = reftable_be_init_db,
2212 .transaction_prepare = reftable_be_transaction_prepare,
2213 .transaction_finish = reftable_be_transaction_finish,
2214 .transaction_abort = reftable_be_transaction_abort,
2215 .initial_transaction_commit = reftable_be_initial_transaction_commit,
2216
2217 .pack_refs = reftable_be_pack_refs,
2218 .create_symref = reftable_be_create_symref,
2219 .rename_ref = reftable_be_rename_ref,
2220 .copy_ref = reftable_be_copy_ref,
2221
2222 .iterator_begin = reftable_be_iterator_begin,
2223 .read_raw_ref = reftable_be_read_raw_ref,
2224 .read_symbolic_ref = reftable_be_read_symbolic_ref,
2225
2226 .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
2227 .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
2228 .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
2229 .reflog_exists = reftable_be_reflog_exists,
2230 .create_reflog = reftable_be_create_reflog,
2231 .delete_reflog = reftable_be_delete_reflog,
2232 .reflog_expire = reftable_be_reflog_expire,
2233};