]> git.ipfire.org Git - thirdparty/git.git/blame - refs/reftable-backend.c
Merge branch 'kh/doc-dashed-commands-have-not-worked-for-a-long-time'
[thirdparty/git.git] / refs / reftable-backend.c
CommitLineData
57db2a09
PS
1#include "../git-compat-util.h"
2#include "../abspath.h"
3#include "../chdir-notify.h"
4#include "../environment.h"
5#include "../gettext.h"
6#include "../hash.h"
7#include "../hex.h"
8#include "../iterator.h"
9#include "../ident.h"
10#include "../lockfile.h"
11#include "../object.h"
12#include "../path.h"
13#include "../refs.h"
14#include "../reftable/reftable-stack.h"
15#include "../reftable/reftable-record.h"
16#include "../reftable/reftable-error.h"
17#include "../reftable/reftable-iterator.h"
18#include "../reftable/reftable-merged.h"
19#include "../setup.h"
20#include "../strmap.h"
21#include "refs-internal.h"
22
23/*
24 * Used as a flag in ref_update::flags when the ref_update was via an
25 * update to HEAD.
26 */
27#define REF_UPDATE_VIA_HEAD (1 << 8)
28
29struct reftable_ref_store {
30 struct ref_store base;
31
32 /*
33 * The main stack refers to the common dir and thus contains common
34 * refs as well as refs of the main repository.
35 */
36 struct reftable_stack *main_stack;
37 /*
38 * The worktree stack refers to the gitdir in case the refdb is opened
39 * via a worktree. It thus contains the per-worktree refs.
40 */
41 struct reftable_stack *worktree_stack;
42 /*
43 * Map of worktree stacks by their respective worktree names. The map
44 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
45 */
46 struct strmap worktree_stacks;
47 struct reftable_write_options write_options;
48
49 unsigned int store_flags;
50 int err;
51};
52
53/*
54 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
55 * reftable_ref_store. required_flags is compared with ref_store's store_flags
56 * to ensure the ref_store has all required capabilities. "caller" is used in
57 * any necessary error messages.
58 */
59static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
60 unsigned int required_flags,
61 const char *caller)
62{
63 struct reftable_ref_store *refs;
64
65 if (ref_store->be != &refs_be_reftable)
66 BUG("ref_store is type \"%s\" not \"reftables\" in %s",
67 ref_store->be->name, caller);
68
69 refs = (struct reftable_ref_store *)ref_store;
70
71 if ((refs->store_flags & required_flags) != required_flags)
72 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
73 caller, required_flags, refs->store_flags);
74
75 return refs;
76}
77
78/*
79 * Some refs are global to the repository (refs/heads/{*}), while others are
80 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
81 * multiple separate databases (ie. multiple reftable/ directories), one for
82 * the shared refs, one for the current worktree refs, and one for each
83 * additional worktree. For reading, we merge the view of both the shared and
84 * the current worktree's refs, when necessary.
85 *
86 * This function also optionally assigns the rewritten reference name that is
87 * local to the stack. This translation is required when using worktree refs
88 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
89 * those references in their normalized form.
90 */
91static struct reftable_stack *stack_for(struct reftable_ref_store *store,
92 const char *refname,
93 const char **rewritten_ref)
94{
95 const char *wtname;
96 int wtname_len;
97
98 if (!refname)
99 return store->main_stack;
100
101 switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
102 case REF_WORKTREE_OTHER: {
103 static struct strbuf wtname_buf = STRBUF_INIT;
104 struct strbuf wt_dir = STRBUF_INIT;
105 struct reftable_stack *stack;
106
107 /*
108 * We're using a static buffer here so that we don't need to
109 * allocate the worktree name whenever we look up a reference.
110 * This could be avoided if the strmap interface knew how to
111 * handle keys with a length.
112 */
113 strbuf_reset(&wtname_buf);
114 strbuf_add(&wtname_buf, wtname, wtname_len);
115
116 /*
117 * There is an edge case here: when the worktree references the
118 * current worktree, then we set up the stack once via
119 * `worktree_stacks` and once via `worktree_stack`. This is
120 * wasteful, but in the reading case it shouldn't matter. And
121 * in the writing case we would notice that the stack is locked
122 * already and error out when trying to write a reference via
123 * both stacks.
124 */
125 stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
126 if (!stack) {
127 strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
128 store->base.repo->commondir, wtname_buf.buf);
129
130 store->err = reftable_new_stack(&stack, wt_dir.buf,
131 store->write_options);
132 assert(store->err != REFTABLE_API_ERROR);
133 strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
134 }
135
136 strbuf_release(&wt_dir);
137 return stack;
138 }
139 case REF_WORKTREE_CURRENT:
140 /*
141 * If there is no worktree stack then we're currently in the
142 * main worktree. We thus return the main stack in that case.
143 */
144 if (!store->worktree_stack)
145 return store->main_stack;
146 return store->worktree_stack;
147 case REF_WORKTREE_MAIN:
148 case REF_WORKTREE_SHARED:
149 return store->main_stack;
150 default:
151 BUG("unhandled worktree reference type");
152 }
153}
154
155static int should_write_log(struct ref_store *refs, const char *refname)
156{
157 if (log_all_ref_updates == LOG_REFS_UNSET)
158 log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
159
160 switch (log_all_ref_updates) {
161 case LOG_REFS_NONE:
162 return refs_reflog_exists(refs, refname);
163 case LOG_REFS_ALWAYS:
164 return 1;
165 case LOG_REFS_NORMAL:
166 if (should_autocreate_reflog(refname))
167 return 1;
168 return refs_reflog_exists(refs, refname);
169 default:
170 BUG("unhandled core.logAllRefUpdates value %d", log_all_ref_updates);
171 }
172}
173
174static void clear_reftable_log_record(struct reftable_log_record *log)
175{
176 switch (log->value_type) {
177 case REFTABLE_LOG_UPDATE:
178 /*
179 * When we write log records, the hashes are owned by the
180 * caller and thus shouldn't be free'd.
181 */
182 log->value.update.old_hash = NULL;
183 log->value.update.new_hash = NULL;
184 break;
185 case REFTABLE_LOG_DELETION:
186 break;
187 }
188 reftable_log_record_release(log);
189}
190
191static void fill_reftable_log_record(struct reftable_log_record *log)
192{
193 const char *info = git_committer_info(0);
194 struct ident_split split = {0};
195 int sign = 1;
196
197 if (split_ident_line(&split, info, strlen(info)))
198 BUG("failed splitting committer info");
199
200 reftable_log_record_release(log);
201 log->value_type = REFTABLE_LOG_UPDATE;
202 log->value.update.name =
203 xstrndup(split.name_begin, split.name_end - split.name_begin);
204 log->value.update.email =
205 xstrndup(split.mail_begin, split.mail_end - split.mail_begin);
206 log->value.update.time = atol(split.date_begin);
207 if (*split.tz_begin == '-') {
208 sign = -1;
209 split.tz_begin++;
210 }
211 if (*split.tz_begin == '+') {
212 sign = 1;
213 split.tz_begin++;
214 }
215
216 log->value.update.tz_offset = sign * atoi(split.tz_begin);
217}
218
219static int read_ref_without_reload(struct reftable_stack *stack,
220 const char *refname,
221 struct object_id *oid,
222 struct strbuf *referent,
223 unsigned int *type)
224{
225 struct reftable_ref_record ref = {0};
226 int ret;
227
228 ret = reftable_stack_read_ref(stack, refname, &ref);
229 if (ret)
230 goto done;
231
232 if (ref.value_type == REFTABLE_REF_SYMREF) {
233 strbuf_reset(referent);
234 strbuf_addstr(referent, ref.value.symref);
235 *type |= REF_ISSYMREF;
236 } else if (reftable_ref_record_val1(&ref)) {
237 oidread(oid, reftable_ref_record_val1(&ref));
238 } else {
239 /* We got a tombstone, which should not happen. */
240 BUG("unhandled reference value type %d", ref.value_type);
241 }
242
243done:
244 assert(ret != REFTABLE_API_ERROR);
245 reftable_ref_record_release(&ref);
246 return ret;
247}
248
249static struct ref_store *reftable_be_init(struct repository *repo,
250 const char *gitdir,
251 unsigned int store_flags)
252{
253 struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
254 struct strbuf path = STRBUF_INIT;
255 int is_worktree;
256 mode_t mask;
257
258 mask = umask(0);
259 umask(mask);
260
261 base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
262 strmap_init(&refs->worktree_stacks);
263 refs->store_flags = store_flags;
264 refs->write_options.block_size = 4096;
265 refs->write_options.hash_id = repo->hash_algo->format_id;
266 refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
267
268 /*
269 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
270 * This stack contains both the shared and the main worktree refs.
271 *
272 * Note that we don't try to resolve the path in case we have a
273 * worktree because `get_common_dir_noenv()` already does it for us.
274 */
275 is_worktree = get_common_dir_noenv(&path, gitdir);
276 if (!is_worktree) {
277 strbuf_reset(&path);
278 strbuf_realpath(&path, gitdir, 0);
279 }
280 strbuf_addstr(&path, "/reftable");
281 refs->err = reftable_new_stack(&refs->main_stack, path.buf,
282 refs->write_options);
283 if (refs->err)
284 goto done;
285
286 /*
287 * If we're in a worktree we also need to set up the worktree reftable
288 * stack that is contained in the per-worktree GIT_DIR.
289 *
290 * Ideally, we would also add the stack to our worktree stack map. But
291 * we have no way to figure out the worktree name here and thus can't
292 * do it efficiently.
293 */
294 if (is_worktree) {
295 strbuf_reset(&path);
296 strbuf_addf(&path, "%s/reftable", gitdir);
297
298 refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
299 refs->write_options);
300 if (refs->err)
301 goto done;
302 }
303
304 chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
305
306done:
307 assert(refs->err != REFTABLE_API_ERROR);
308 strbuf_release(&path);
309 return &refs->base;
310}
311
312static int reftable_be_init_db(struct ref_store *ref_store,
313 int flags UNUSED,
314 struct strbuf *err UNUSED)
315{
316 struct reftable_ref_store *refs =
317 reftable_be_downcast(ref_store, REF_STORE_WRITE, "init_db");
318 struct strbuf sb = STRBUF_INIT;
319
320 strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
321 safe_create_dir(sb.buf, 1);
322 strbuf_reset(&sb);
323
324 strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
325 write_file(sb.buf, "ref: refs/heads/.invalid");
326 adjust_shared_perm(sb.buf);
327 strbuf_reset(&sb);
328
329 strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
330 safe_create_dir(sb.buf, 1);
331 strbuf_reset(&sb);
332
333 strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
334 write_file(sb.buf, "this repository uses the reftable format");
335 adjust_shared_perm(sb.buf);
336
337 strbuf_release(&sb);
338 return 0;
339}
340
341struct reftable_ref_iterator {
342 struct ref_iterator base;
343 struct reftable_ref_store *refs;
344 struct reftable_iterator iter;
345 struct reftable_ref_record ref;
346 struct object_id oid;
347
348 const char *prefix;
349 unsigned int flags;
350 int err;
351};
352
353static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
354{
355 struct reftable_ref_iterator *iter =
356 (struct reftable_ref_iterator *)ref_iterator;
357 struct reftable_ref_store *refs = iter->refs;
358
359 while (!iter->err) {
360 int flags = 0;
361
362 iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
363 if (iter->err)
364 break;
365
366 /*
33d15b54
KN
367 * The files backend only lists references contained in "refs/" unless
368 * the root refs are to be included. We emulate the same behaviour here.
57db2a09 369 */
33d15b54
KN
370 if (!starts_with(iter->ref.refname, "refs/") &&
371 !(iter->flags & DO_FOR_EACH_INCLUDE_ROOT_REFS &&
372 (is_pseudoref(&iter->refs->base, iter->ref.refname) ||
373 is_headref(&iter->refs->base, iter->ref.refname)))) {
57db2a09 374 continue;
33d15b54 375 }
57db2a09
PS
376
377 if (iter->prefix &&
378 strncmp(iter->prefix, iter->ref.refname, strlen(iter->prefix))) {
379 iter->err = 1;
380 break;
381 }
382
383 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
384 parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
385 REF_WORKTREE_CURRENT)
386 continue;
387
388 switch (iter->ref.value_type) {
389 case REFTABLE_REF_VAL1:
390 oidread(&iter->oid, iter->ref.value.val1);
391 break;
392 case REFTABLE_REF_VAL2:
393 oidread(&iter->oid, iter->ref.value.val2.value);
394 break;
395 case REFTABLE_REF_SYMREF:
396 if (!refs_resolve_ref_unsafe(&iter->refs->base, iter->ref.refname,
397 RESOLVE_REF_READING, &iter->oid, &flags))
398 oidclr(&iter->oid);
399 break;
400 default:
401 BUG("unhandled reference value type %d", iter->ref.value_type);
402 }
403
404 if (is_null_oid(&iter->oid))
405 flags |= REF_ISBROKEN;
406
407 if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
408 if (!refname_is_safe(iter->ref.refname))
409 die(_("refname is dangerous: %s"), iter->ref.refname);
410 oidclr(&iter->oid);
411 flags |= REF_BAD_NAME | REF_ISBROKEN;
412 }
413
414 if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
415 flags & REF_ISSYMREF &&
416 flags & REF_ISBROKEN)
417 continue;
418
419 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
420 !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
421 &iter->oid, flags))
422 continue;
423
424 iter->base.refname = iter->ref.refname;
425 iter->base.oid = &iter->oid;
426 iter->base.flags = flags;
427
428 break;
429 }
430
431 if (iter->err > 0) {
432 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
433 return ITER_ERROR;
434 return ITER_DONE;
435 }
436
437 if (iter->err < 0) {
438 ref_iterator_abort(ref_iterator);
439 return ITER_ERROR;
440 }
441
442 return ITER_OK;
443}
444
445static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
446 struct object_id *peeled)
447{
448 struct reftable_ref_iterator *iter =
449 (struct reftable_ref_iterator *)ref_iterator;
450
451 if (iter->ref.value_type == REFTABLE_REF_VAL2) {
452 oidread(peeled, iter->ref.value.val2.target_value);
453 return 0;
454 }
455
456 return -1;
457}
458
459static int reftable_ref_iterator_abort(struct ref_iterator *ref_iterator)
460{
461 struct reftable_ref_iterator *iter =
462 (struct reftable_ref_iterator *)ref_iterator;
463 reftable_ref_record_release(&iter->ref);
464 reftable_iterator_destroy(&iter->iter);
465 free(iter);
466 return ITER_DONE;
467}
468
469static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
470 .advance = reftable_ref_iterator_advance,
471 .peel = reftable_ref_iterator_peel,
472 .abort = reftable_ref_iterator_abort
473};
474
475static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
476 struct reftable_stack *stack,
477 const char *prefix,
478 int flags)
479{
480 struct reftable_merged_table *merged_table;
481 struct reftable_ref_iterator *iter;
482 int ret;
483
484 iter = xcalloc(1, sizeof(*iter));
5e01d838 485 base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable);
57db2a09
PS
486 iter->prefix = prefix;
487 iter->base.oid = &iter->oid;
488 iter->flags = flags;
489 iter->refs = refs;
490
491 ret = refs->err;
492 if (ret)
493 goto done;
494
495 ret = reftable_stack_reload(stack);
496 if (ret)
497 goto done;
498
499 merged_table = reftable_stack_merged_table(stack);
500
501 ret = reftable_merged_table_seek_ref(merged_table, &iter->iter, prefix);
502 if (ret)
503 goto done;
504
505done:
506 iter->err = ret;
507 return iter;
508}
509
57db2a09
PS
510static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
511 const char *prefix,
512 const char **exclude_patterns,
513 unsigned int flags)
514{
515 struct reftable_ref_iterator *main_iter, *worktree_iter;
516 struct reftable_ref_store *refs;
517 unsigned int required_flags = REF_STORE_READ;
518
519 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
520 required_flags |= REF_STORE_ODB;
521 refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
522
523 main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix, flags);
524
525 /*
526 * The worktree stack is only set when we're in an actual worktree
527 * right now. If we aren't, then we return the common reftable
528 * iterator, only.
529 */
530 if (!refs->worktree_stack)
531 return &main_iter->base;
532
533 /*
534 * Otherwise we merge both the common and the per-worktree refs into a
535 * single iterator.
536 */
537 worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix, flags);
5e01d838 538 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
6f227800 539 ref_iterator_select, NULL);
57db2a09
PS
540}
541
542static int reftable_be_read_raw_ref(struct ref_store *ref_store,
543 const char *refname,
544 struct object_id *oid,
545 struct strbuf *referent,
546 unsigned int *type,
547 int *failure_errno)
548{
549 struct reftable_ref_store *refs =
550 reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
551 struct reftable_stack *stack = stack_for(refs, refname, &refname);
552 int ret;
553
554 if (refs->err < 0)
555 return refs->err;
556
557 ret = reftable_stack_reload(stack);
558 if (ret)
559 return ret;
560
561 ret = read_ref_without_reload(stack, refname, oid, referent, type);
562 if (ret < 0)
563 return ret;
564 if (ret > 0) {
565 *failure_errno = ENOENT;
566 return -1;
567 }
568
569 return 0;
570}
571
572static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
573 const char *refname,
574 struct strbuf *referent)
575{
576 struct reftable_ref_store *refs =
577 reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
578 struct reftable_stack *stack = stack_for(refs, refname, &refname);
579 struct reftable_ref_record ref = {0};
580 int ret;
581
582 ret = reftable_stack_reload(stack);
583 if (ret)
584 return ret;
585
586 ret = reftable_stack_read_ref(stack, refname, &ref);
587 if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
588 strbuf_addstr(referent, ref.value.symref);
589 else
590 ret = -1;
591
592 reftable_ref_record_release(&ref);
593 return ret;
594}
595
596/*
597 * Return the refname under which update was originally requested.
598 */
599static const char *original_update_refname(struct ref_update *update)
600{
601 while (update->parent_update)
602 update = update->parent_update;
603 return update->refname;
604}
605
606struct reftable_transaction_update {
607 struct ref_update *update;
608 struct object_id current_oid;
609};
610
611struct write_transaction_table_arg {
612 struct reftable_ref_store *refs;
613 struct reftable_stack *stack;
614 struct reftable_addition *addition;
615 struct reftable_transaction_update *updates;
616 size_t updates_nr;
617 size_t updates_alloc;
618 size_t updates_expected;
619};
620
621struct reftable_transaction_data {
622 struct write_transaction_table_arg *args;
623 size_t args_nr, args_alloc;
624};
625
626static void free_transaction_data(struct reftable_transaction_data *tx_data)
627{
628 if (!tx_data)
629 return;
630 for (size_t i = 0; i < tx_data->args_nr; i++) {
631 reftable_addition_destroy(tx_data->args[i].addition);
632 free(tx_data->args[i].updates);
633 }
634 free(tx_data->args);
635 free(tx_data);
636}
637
638/*
639 * Prepare transaction update for the given reference update. This will cause
640 * us to lock the corresponding reftable stack for concurrent modification.
641 */
642static int prepare_transaction_update(struct write_transaction_table_arg **out,
643 struct reftable_ref_store *refs,
644 struct reftable_transaction_data *tx_data,
645 struct ref_update *update,
646 struct strbuf *err)
647{
648 struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
649 struct write_transaction_table_arg *arg = NULL;
650 size_t i;
651 int ret;
652
653 /*
654 * Search for a preexisting stack update. If there is one then we add
655 * the update to it, otherwise we set up a new stack update.
656 */
657 for (i = 0; !arg && i < tx_data->args_nr; i++)
658 if (tx_data->args[i].stack == stack)
659 arg = &tx_data->args[i];
660
661 if (!arg) {
662 struct reftable_addition *addition;
663
664 ret = reftable_stack_reload(stack);
665 if (ret)
666 return ret;
667
668 ret = reftable_stack_new_addition(&addition, stack);
669 if (ret) {
670 if (ret == REFTABLE_LOCK_ERROR)
671 strbuf_addstr(err, "cannot lock references");
672 return ret;
673 }
674
675 ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
676 tx_data->args_alloc);
677 arg = &tx_data->args[tx_data->args_nr++];
678 arg->refs = refs;
679 arg->stack = stack;
680 arg->addition = addition;
681 arg->updates = NULL;
682 arg->updates_nr = 0;
683 arg->updates_alloc = 0;
684 arg->updates_expected = 0;
685 }
686
687 arg->updates_expected++;
688
689 if (out)
690 *out = arg;
691
692 return 0;
693}
694
695/*
696 * Queue a reference update for the correct stack. We potentially need to
697 * handle multiple stack updates in a single transaction when it spans across
698 * multiple worktrees.
699 */
700static int queue_transaction_update(struct reftable_ref_store *refs,
701 struct reftable_transaction_data *tx_data,
702 struct ref_update *update,
703 struct object_id *current_oid,
704 struct strbuf *err)
705{
706 struct write_transaction_table_arg *arg = NULL;
707 int ret;
708
709 if (update->backend_data)
710 BUG("reference update queued more than once");
711
712 ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
713 if (ret < 0)
714 return ret;
715
716 ALLOC_GROW(arg->updates, arg->updates_nr + 1,
717 arg->updates_alloc);
718 arg->updates[arg->updates_nr].update = update;
719 oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
720 update->backend_data = &arg->updates[arg->updates_nr++];
721
722 return 0;
723}
724
725static int reftable_be_transaction_prepare(struct ref_store *ref_store,
726 struct ref_transaction *transaction,
727 struct strbuf *err)
728{
729 struct reftable_ref_store *refs =
730 reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
731 struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
732 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
733 struct reftable_transaction_data *tx_data = NULL;
734 struct object_id head_oid;
735 unsigned int head_type = 0;
736 size_t i;
737 int ret;
738
739 ret = refs->err;
740 if (ret < 0)
741 goto done;
742
743 tx_data = xcalloc(1, sizeof(*tx_data));
744
745 /*
746 * Preprocess all updates. For one we check that there are no duplicate
747 * reference updates in this transaction. Second, we lock all stacks
748 * that will be modified during the transaction.
749 */
750 for (i = 0; i < transaction->nr; i++) {
751 ret = prepare_transaction_update(NULL, refs, tx_data,
752 transaction->updates[i], err);
753 if (ret)
754 goto done;
755
756 string_list_append(&affected_refnames,
757 transaction->updates[i]->refname);
758 }
759
760 /*
761 * Now that we have counted updates per stack we can preallocate their
762 * arrays. This avoids having to reallocate many times.
763 */
764 for (i = 0; i < tx_data->args_nr; i++) {
765 CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
766 tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
767 }
768
769 /*
770 * Fail if a refname appears more than once in the transaction.
771 * This code is taken from the files backend and is a good candidate to
772 * be moved into the generic layer.
773 */
774 string_list_sort(&affected_refnames);
775 if (ref_update_reject_duplicates(&affected_refnames, err)) {
776 ret = TRANSACTION_GENERIC_ERROR;
777 goto done;
778 }
779
780 ret = read_ref_without_reload(stack_for(refs, "HEAD", NULL), "HEAD", &head_oid,
781 &head_referent, &head_type);
782 if (ret < 0)
783 goto done;
b0f6b6b5 784 ret = 0;
57db2a09
PS
785
786 for (i = 0; i < transaction->nr; i++) {
787 struct ref_update *u = transaction->updates[i];
788 struct object_id current_oid = {0};
789 struct reftable_stack *stack;
790 const char *rewritten_ref;
791
792 stack = stack_for(refs, u->refname, &rewritten_ref);
793
794 /* Verify that the new object ID is valid. */
795 if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
796 !(u->flags & REF_SKIP_OID_VERIFICATION) &&
797 !(u->flags & REF_LOG_ONLY)) {
798 struct object *o = parse_object(refs->base.repo, &u->new_oid);
799 if (!o) {
800 strbuf_addf(err,
801 _("trying to write ref '%s' with nonexistent object %s"),
802 u->refname, oid_to_hex(&u->new_oid));
803 ret = -1;
804 goto done;
805 }
806
807 if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
808 strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
809 oid_to_hex(&u->new_oid), u->refname);
810 ret = -1;
811 goto done;
812 }
813 }
814
815 /*
816 * When we update the reference that HEAD points to we enqueue
817 * a second log-only update for HEAD so that its reflog is
818 * updated accordingly.
819 */
820 if (head_type == REF_ISSYMREF &&
821 !(u->flags & REF_LOG_ONLY) &&
822 !(u->flags & REF_UPDATE_VIA_HEAD) &&
823 !strcmp(rewritten_ref, head_referent.buf)) {
824 struct ref_update *new_update;
825
826 /*
827 * First make sure that HEAD is not already in the
828 * transaction. This check is O(lg N) in the transaction
829 * size, but it happens at most once per transaction.
830 */
831 if (string_list_has_string(&affected_refnames, "HEAD")) {
832 /* An entry already existed */
833 strbuf_addf(err,
834 _("multiple updates for 'HEAD' (including one "
835 "via its referent '%s') are not allowed"),
836 u->refname);
837 ret = TRANSACTION_NAME_CONFLICT;
838 goto done;
839 }
840
841 new_update = ref_transaction_add_update(
842 transaction, "HEAD",
843 u->flags | REF_LOG_ONLY | REF_NO_DEREF,
844 &u->new_oid, &u->old_oid, u->msg);
845 string_list_insert(&affected_refnames, new_update->refname);
846 }
847
848 ret = read_ref_without_reload(stack, rewritten_ref,
849 &current_oid, &referent, &u->type);
850 if (ret < 0)
851 goto done;
852 if (ret > 0 && (!(u->flags & REF_HAVE_OLD) || is_null_oid(&u->old_oid))) {
853 /*
854 * The reference does not exist, and we either have no
855 * old object ID or expect the reference to not exist.
856 * We can thus skip below safety checks as well as the
857 * symref splitting. But we do want to verify that
858 * there is no conflicting reference here so that we
859 * can output a proper error message instead of failing
860 * at a later point.
861 */
862 ret = refs_verify_refname_available(ref_store, u->refname,
863 &affected_refnames, NULL, err);
864 if (ret < 0)
865 goto done;
866
867 /*
868 * There is no need to write the reference deletion
869 * when the reference in question doesn't exist.
870 */
871 if (u->flags & REF_HAVE_NEW && !is_null_oid(&u->new_oid)) {
872 ret = queue_transaction_update(refs, tx_data, u,
873 &current_oid, err);
874 if (ret)
875 goto done;
876 }
877
878 continue;
879 }
880 if (ret > 0) {
881 /* The reference does not exist, but we expected it to. */
882 strbuf_addf(err, _("cannot lock ref '%s': "
883 "unable to resolve reference '%s'"),
884 original_update_refname(u), u->refname);
885 ret = -1;
886 goto done;
887 }
888
889 if (u->type & REF_ISSYMREF) {
890 /*
891 * The reftable stack is locked at this point already,
892 * so it is safe to call `refs_resolve_ref_unsafe()`
893 * here without causing races.
894 */
895 const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
896 &current_oid, NULL);
897
898 if (u->flags & REF_NO_DEREF) {
899 if (u->flags & REF_HAVE_OLD && !resolved) {
900 strbuf_addf(err, _("cannot lock ref '%s': "
901 "error reading reference"), u->refname);
902 ret = -1;
903 goto done;
904 }
905 } else {
906 struct ref_update *new_update;
907 int new_flags;
908
909 new_flags = u->flags;
910 if (!strcmp(rewritten_ref, "HEAD"))
911 new_flags |= REF_UPDATE_VIA_HEAD;
912
913 /*
914 * If we are updating a symref (eg. HEAD), we should also
915 * update the branch that the symref points to.
916 *
917 * This is generic functionality, and would be better
918 * done in refs.c, but the current implementation is
919 * intertwined with the locking in files-backend.c.
920 */
921 new_update = ref_transaction_add_update(
922 transaction, referent.buf, new_flags,
923 &u->new_oid, &u->old_oid, u->msg);
924 new_update->parent_update = u;
925
926 /*
927 * Change the symbolic ref update to log only. Also, it
928 * doesn't need to check its old OID value, as that will be
929 * done when new_update is processed.
930 */
931 u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
932 u->flags &= ~REF_HAVE_OLD;
933
934 if (string_list_has_string(&affected_refnames, new_update->refname)) {
935 strbuf_addf(err,
936 _("multiple updates for '%s' (including one "
937 "via symref '%s') are not allowed"),
938 referent.buf, u->refname);
939 ret = TRANSACTION_NAME_CONFLICT;
940 goto done;
941 }
942 string_list_insert(&affected_refnames, new_update->refname);
943 }
944 }
945
946 /*
947 * Verify that the old object matches our expectations. Note
948 * that the error messages here do not make a lot of sense in
949 * the context of the reftable backend as we never lock
950 * individual refs. But the error messages match what the files
951 * backend returns, which keeps our tests happy.
952 */
953 if (u->flags & REF_HAVE_OLD && !oideq(&current_oid, &u->old_oid)) {
954 if (is_null_oid(&u->old_oid))
955 strbuf_addf(err, _("cannot lock ref '%s': "
956 "reference already exists"),
957 original_update_refname(u));
958 else if (is_null_oid(&current_oid))
959 strbuf_addf(err, _("cannot lock ref '%s': "
960 "reference is missing but expected %s"),
961 original_update_refname(u),
962 oid_to_hex(&u->old_oid));
963 else
964 strbuf_addf(err, _("cannot lock ref '%s': "
965 "is at %s but expected %s"),
966 original_update_refname(u),
967 oid_to_hex(&current_oid),
968 oid_to_hex(&u->old_oid));
969 ret = -1;
970 goto done;
971 }
972
973 /*
974 * If all of the following conditions are true:
975 *
976 * - We're not about to write a symref.
977 * - We're not about to write a log-only entry.
978 * - Old and new object ID are different.
979 *
980 * Then we're essentially doing a no-op update that can be
981 * skipped. This is not only for the sake of efficiency, but
982 * also skips writing unneeded reflog entries.
983 */
984 if ((u->type & REF_ISSYMREF) ||
985 (u->flags & REF_LOG_ONLY) ||
986 (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid))) {
987 ret = queue_transaction_update(refs, tx_data, u,
988 &current_oid, err);
989 if (ret)
990 goto done;
991 }
992 }
993
994 transaction->backend_data = tx_data;
995 transaction->state = REF_TRANSACTION_PREPARED;
996
997done:
998 assert(ret != REFTABLE_API_ERROR);
999 if (ret < 0) {
1000 free_transaction_data(tx_data);
1001 transaction->state = REF_TRANSACTION_CLOSED;
1002 if (!err->len)
1003 strbuf_addf(err, _("reftable: transaction prepare: %s"),
1004 reftable_error_str(ret));
1005 }
1006 string_list_clear(&affected_refnames, 0);
1007 strbuf_release(&referent);
1008 strbuf_release(&head_referent);
1009
1010 return ret;
1011}
1012
1013static int reftable_be_transaction_abort(struct ref_store *ref_store,
1014 struct ref_transaction *transaction,
1015 struct strbuf *err)
1016{
1017 struct reftable_transaction_data *tx_data = transaction->backend_data;
1018 free_transaction_data(tx_data);
1019 transaction->state = REF_TRANSACTION_CLOSED;
1020 return 0;
1021}
1022
1023static int transaction_update_cmp(const void *a, const void *b)
1024{
1025 return strcmp(((struct reftable_transaction_update *)a)->update->refname,
1026 ((struct reftable_transaction_update *)b)->update->refname);
1027}
1028
1029static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
1030{
1031 struct write_transaction_table_arg *arg = cb_data;
1032 struct reftable_merged_table *mt =
1033 reftable_stack_merged_table(arg->stack);
1034 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1035 struct reftable_log_record *logs = NULL;
1036 size_t logs_nr = 0, logs_alloc = 0, i;
1037 int ret = 0;
1038
1039 QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
1040
1041 reftable_writer_set_limits(writer, ts, ts);
1042
1043 for (i = 0; i < arg->updates_nr; i++) {
1044 struct reftable_transaction_update *tx_update = &arg->updates[i];
1045 struct ref_update *u = tx_update->update;
1046
1047 /*
1048 * Write a reflog entry when updating a ref to point to
1049 * something new in either of the following cases:
1050 *
1051 * - The reference is about to be deleted. We always want to
1052 * delete the reflog in that case.
1053 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1054 * the reflog entry.
1055 * - `core.logAllRefUpdates` tells us to create the reflog for
1056 * the given ref.
1057 */
1058 if (u->flags & REF_HAVE_NEW && !(u->type & REF_ISSYMREF) && is_null_oid(&u->new_oid)) {
1059 struct reftable_log_record log = {0};
1060 struct reftable_iterator it = {0};
1061
1062 /*
1063 * When deleting refs we also delete all reflog entries
1064 * with them. While it is not strictly required to
1065 * delete reflogs together with their refs, this
1066 * matches the behaviour of the files backend.
1067 *
1068 * Unfortunately, we have no better way than to delete
1069 * all reflog entries one by one.
1070 */
1071 ret = reftable_merged_table_seek_log(mt, &it, u->refname);
1072 while (ret == 0) {
1073 struct reftable_log_record *tombstone;
1074
1075 ret = reftable_iterator_next_log(&it, &log);
1076 if (ret < 0)
1077 break;
1078 if (ret > 0 || strcmp(log.refname, u->refname)) {
1079 ret = 0;
1080 break;
1081 }
1082
1083 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1084 tombstone = &logs[logs_nr++];
1085 tombstone->refname = xstrdup(u->refname);
1086 tombstone->value_type = REFTABLE_LOG_DELETION;
1087 tombstone->update_index = log.update_index;
1088 }
1089
1090 reftable_log_record_release(&log);
1091 reftable_iterator_destroy(&it);
1092
1093 if (ret)
1094 goto done;
1095 } else if (u->flags & REF_HAVE_NEW &&
1096 (u->flags & REF_FORCE_CREATE_REFLOG ||
1097 should_write_log(&arg->refs->base, u->refname))) {
1098 struct reftable_log_record *log;
1099
1100 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1101 log = &logs[logs_nr++];
1102 memset(log, 0, sizeof(*log));
1103
1104 fill_reftable_log_record(log);
1105 log->update_index = ts;
1106 log->refname = xstrdup(u->refname);
1107 log->value.update.new_hash = u->new_oid.hash;
1108 log->value.update.old_hash = tx_update->current_oid.hash;
1109 log->value.update.message =
1110 xstrndup(u->msg, arg->refs->write_options.block_size / 2);
1111 }
1112
1113 if (u->flags & REF_LOG_ONLY)
1114 continue;
1115
1116 if (u->flags & REF_HAVE_NEW && is_null_oid(&u->new_oid)) {
1117 struct reftable_ref_record ref = {
1118 .refname = (char *)u->refname,
1119 .update_index = ts,
1120 .value_type = REFTABLE_REF_DELETION,
1121 };
1122
1123 ret = reftable_writer_add_ref(writer, &ref);
1124 if (ret < 0)
1125 goto done;
1126 } else if (u->flags & REF_HAVE_NEW) {
1127 struct reftable_ref_record ref = {0};
1128 struct object_id peeled;
1129 int peel_error;
1130
1131 ref.refname = (char *)u->refname;
1132 ref.update_index = ts;
1133
1134 peel_error = peel_object(&u->new_oid, &peeled);
1135 if (!peel_error) {
1136 ref.value_type = REFTABLE_REF_VAL2;
1137 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
1138 memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
1139 } else if (!is_null_oid(&u->new_oid)) {
1140 ref.value_type = REFTABLE_REF_VAL1;
1141 memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
1142 }
1143
1144 ret = reftable_writer_add_ref(writer, &ref);
1145 if (ret < 0)
1146 goto done;
1147 }
1148 }
1149
1150 /*
1151 * Logs are written at the end so that we do not have intermixed ref
1152 * and log blocks.
1153 */
1154 if (logs) {
1155 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1156 if (ret < 0)
1157 goto done;
1158 }
1159
1160done:
1161 assert(ret != REFTABLE_API_ERROR);
1162 for (i = 0; i < logs_nr; i++)
1163 clear_reftable_log_record(&logs[i]);
1164 free(logs);
1165 return ret;
1166}
1167
1168static int reftable_be_transaction_finish(struct ref_store *ref_store,
1169 struct ref_transaction *transaction,
1170 struct strbuf *err)
1171{
1172 struct reftable_transaction_data *tx_data = transaction->backend_data;
1173 int ret = 0;
1174
1175 for (size_t i = 0; i < tx_data->args_nr; i++) {
1176 ret = reftable_addition_add(tx_data->args[i].addition,
1177 write_transaction_table, &tx_data->args[i]);
1178 if (ret < 0)
1179 goto done;
1180
1181 ret = reftable_addition_commit(tx_data->args[i].addition);
1182 if (ret < 0)
1183 goto done;
1184 }
1185
1186done:
1187 assert(ret != REFTABLE_API_ERROR);
1188 free_transaction_data(tx_data);
1189 transaction->state = REF_TRANSACTION_CLOSED;
1190
1191 if (ret) {
1192 strbuf_addf(err, _("reftable: transaction failure: %s"),
1193 reftable_error_str(ret));
1194 return -1;
1195 }
1196 return ret;
1197}
1198
1199static int reftable_be_initial_transaction_commit(struct ref_store *ref_store UNUSED,
1200 struct ref_transaction *transaction,
1201 struct strbuf *err)
1202{
1203 return ref_transaction_commit(transaction, err);
1204}
1205
1206static int reftable_be_pack_refs(struct ref_store *ref_store,
1207 struct pack_refs_opts *opts)
1208{
1209 struct reftable_ref_store *refs =
1210 reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "pack_refs");
1211 struct reftable_stack *stack;
1212 int ret;
1213
1214 if (refs->err)
1215 return refs->err;
1216
1217 stack = refs->worktree_stack;
1218 if (!stack)
1219 stack = refs->main_stack;
1220
1221 ret = reftable_stack_compact_all(stack, NULL);
1222 if (ret)
1223 goto out;
1224 ret = reftable_stack_clean(stack);
1225 if (ret)
1226 goto out;
1227
1228out:
1229 return ret;
1230}
1231
1232struct write_create_symref_arg {
1233 struct reftable_ref_store *refs;
1234 struct reftable_stack *stack;
1235 const char *refname;
1236 const char *target;
1237 const char *logmsg;
1238};
1239
1240static int write_create_symref_table(struct reftable_writer *writer, void *cb_data)
1241{
1242 struct write_create_symref_arg *create = cb_data;
1243 uint64_t ts = reftable_stack_next_update_index(create->stack);
1244 struct reftable_ref_record ref = {
1245 .refname = (char *)create->refname,
1246 .value_type = REFTABLE_REF_SYMREF,
1247 .value.symref = (char *)create->target,
1248 .update_index = ts,
1249 };
1250 struct reftable_log_record log = {0};
1251 struct object_id new_oid;
1252 struct object_id old_oid;
1253 int ret;
1254
1255 reftable_writer_set_limits(writer, ts, ts);
1256
1257 ret = reftable_writer_add_ref(writer, &ref);
1258 if (ret)
1259 return ret;
1260
1261 /*
1262 * Note that it is important to try and resolve the reference before we
1263 * write the log entry. This is because `should_write_log()` will munge
1264 * `core.logAllRefUpdates`, which is undesirable when we create a new
1265 * repository because it would be written into the config. As HEAD will
1266 * not resolve for new repositories this ordering will ensure that this
1267 * never happens.
1268 */
1269 if (!create->logmsg ||
1270 !refs_resolve_ref_unsafe(&create->refs->base, create->target,
1271 RESOLVE_REF_READING, &new_oid, NULL) ||
1272 !should_write_log(&create->refs->base, create->refname))
1273 return 0;
1274
1275 fill_reftable_log_record(&log);
1276 log.refname = xstrdup(create->refname);
1277 log.update_index = ts;
1278 log.value.update.message = xstrndup(create->logmsg,
1279 create->refs->write_options.block_size / 2);
1280 log.value.update.new_hash = new_oid.hash;
1281 if (refs_resolve_ref_unsafe(&create->refs->base, create->refname,
1282 RESOLVE_REF_READING, &old_oid, NULL))
1283 log.value.update.old_hash = old_oid.hash;
1284
1285 ret = reftable_writer_add_log(writer, &log);
1286 clear_reftable_log_record(&log);
1287 return ret;
1288}
1289
1290static int reftable_be_create_symref(struct ref_store *ref_store,
1291 const char *refname,
1292 const char *target,
1293 const char *logmsg)
1294{
1295 struct reftable_ref_store *refs =
1296 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_symref");
1297 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1298 struct write_create_symref_arg arg = {
1299 .refs = refs,
1300 .stack = stack,
1301 .refname = refname,
1302 .target = target,
1303 .logmsg = logmsg,
1304 };
1305 int ret;
1306
1307 ret = refs->err;
1308 if (ret < 0)
1309 goto done;
1310
1311 ret = reftable_stack_reload(stack);
1312 if (ret)
1313 goto done;
1314
1315 ret = reftable_stack_add(stack, &write_create_symref_table, &arg);
1316
1317done:
1318 assert(ret != REFTABLE_API_ERROR);
1319 if (ret)
1320 error("unable to write symref for %s: %s", refname,
1321 reftable_error_str(ret));
1322 return ret;
1323}
1324
1325struct write_copy_arg {
1326 struct reftable_ref_store *refs;
1327 struct reftable_stack *stack;
1328 const char *oldname;
1329 const char *newname;
1330 const char *logmsg;
1331 int delete_old;
1332};
1333
1334static int write_copy_table(struct reftable_writer *writer, void *cb_data)
1335{
1336 struct write_copy_arg *arg = cb_data;
1337 uint64_t deletion_ts, creation_ts;
1338 struct reftable_merged_table *mt = reftable_stack_merged_table(arg->stack);
1339 struct reftable_ref_record old_ref = {0}, refs[2] = {0};
1340 struct reftable_log_record old_log = {0}, *logs = NULL;
1341 struct reftable_iterator it = {0};
1342 struct string_list skip = STRING_LIST_INIT_NODUP;
1343 struct strbuf errbuf = STRBUF_INIT;
1344 size_t logs_nr = 0, logs_alloc = 0, i;
1345 int ret;
1346
1347 if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
1348 ret = error(_("refname %s not found"), arg->oldname);
1349 goto done;
1350 }
1351 if (old_ref.value_type == REFTABLE_REF_SYMREF) {
1352 ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
1353 arg->oldname);
1354 goto done;
1355 }
1356
1357 /*
1358 * There's nothing to do in case the old and new name are the same, so
1359 * we exit early in that case.
1360 */
1361 if (!strcmp(arg->oldname, arg->newname)) {
1362 ret = 0;
1363 goto done;
1364 }
1365
1366 /*
1367 * Verify that the new refname is available.
1368 */
1369 string_list_insert(&skip, arg->oldname);
1370 ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
1371 NULL, &skip, &errbuf);
1372 if (ret < 0) {
1373 error("%s", errbuf.buf);
1374 goto done;
1375 }
1376
1377 /*
1378 * When deleting the old reference we have to use two update indices:
1379 * once to delete the old ref and its reflog, and once to create the
1380 * new ref and its reflog. They need to be staged with two separate
1381 * indices because the new reflog needs to encode both the deletion of
1382 * the old branch and the creation of the new branch, and we cannot do
1383 * two changes to a reflog in a single update.
1384 */
1385 deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
1386 if (arg->delete_old)
1387 creation_ts++;
1388 reftable_writer_set_limits(writer, deletion_ts, creation_ts);
1389
1390 /*
1391 * Add the new reference. If this is a rename then we also delete the
1392 * old reference.
1393 */
1394 refs[0] = old_ref;
1395 refs[0].refname = (char *)arg->newname;
1396 refs[0].update_index = creation_ts;
1397 if (arg->delete_old) {
1398 refs[1].refname = (char *)arg->oldname;
1399 refs[1].value_type = REFTABLE_REF_DELETION;
1400 refs[1].update_index = deletion_ts;
1401 }
1402 ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
1403 if (ret < 0)
1404 goto done;
1405
1406 /*
1407 * When deleting the old branch we need to create a reflog entry on the
1408 * new branch name that indicates that the old branch has been deleted
1409 * and then recreated. This is a tad weird, but matches what the files
1410 * backend does.
1411 */
1412 if (arg->delete_old) {
1413 struct strbuf head_referent = STRBUF_INIT;
1414 struct object_id head_oid;
1415 int append_head_reflog;
1416 unsigned head_type = 0;
1417
1418 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1419 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1420 fill_reftable_log_record(&logs[logs_nr]);
1421 logs[logs_nr].refname = (char *)arg->newname;
1422 logs[logs_nr].update_index = deletion_ts;
1423 logs[logs_nr].value.update.message =
1424 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1425 logs[logs_nr].value.update.old_hash = old_ref.value.val1;
1426 logs_nr++;
1427
1428 ret = read_ref_without_reload(arg->stack, "HEAD", &head_oid, &head_referent, &head_type);
1429 if (ret < 0)
1430 goto done;
1431 append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
1432 strbuf_release(&head_referent);
1433
1434 /*
1435 * The files backend uses `refs_delete_ref()` to delete the old
1436 * branch name, which will append a reflog entry for HEAD in
1437 * case it points to the old branch.
1438 */
1439 if (append_head_reflog) {
1440 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1441 logs[logs_nr] = logs[logs_nr - 1];
1442 logs[logs_nr].refname = "HEAD";
1443 logs_nr++;
1444 }
1445 }
1446
1447 /*
1448 * Create the reflog entry for the newly created branch.
1449 */
1450 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1451 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1452 fill_reftable_log_record(&logs[logs_nr]);
1453 logs[logs_nr].refname = (char *)arg->newname;
1454 logs[logs_nr].update_index = creation_ts;
1455 logs[logs_nr].value.update.message =
1456 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1457 logs[logs_nr].value.update.new_hash = old_ref.value.val1;
1458 logs_nr++;
1459
1460 /*
1461 * In addition to writing the reflog entry for the new branch, we also
1462 * copy over all log entries from the old reflog. Last but not least,
1463 * when renaming we also have to delete all the old reflog entries.
1464 */
1465 ret = reftable_merged_table_seek_log(mt, &it, arg->oldname);
1466 if (ret < 0)
8a0bebde 1467 goto done;
57db2a09
PS
1468
1469 while (1) {
1470 ret = reftable_iterator_next_log(&it, &old_log);
1471 if (ret < 0)
1472 goto done;
1473 if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
1474 ret = 0;
1475 break;
1476 }
1477
1478 free(old_log.refname);
1479
1480 /*
1481 * Copy over the old reflog entry with the new refname.
1482 */
1483 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1484 logs[logs_nr] = old_log;
1485 logs[logs_nr].refname = (char *)arg->newname;
1486 logs_nr++;
1487
1488 /*
1489 * Delete the old reflog entry in case we are renaming.
1490 */
1491 if (arg->delete_old) {
1492 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1493 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1494 logs[logs_nr].refname = (char *)arg->oldname;
1495 logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
1496 logs[logs_nr].update_index = old_log.update_index;
1497 logs_nr++;
1498 }
1499
1500 /*
1501 * Transfer ownership of the log record we're iterating over to
1502 * the array of log records. Otherwise, the pointers would get
1503 * free'd or reallocated by the iterator.
1504 */
1505 memset(&old_log, 0, sizeof(old_log));
1506 }
1507
1508 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1509 if (ret < 0)
1510 goto done;
1511
1512done:
1513 assert(ret != REFTABLE_API_ERROR);
1514 reftable_iterator_destroy(&it);
1515 string_list_clear(&skip, 0);
1516 strbuf_release(&errbuf);
1517 for (i = 0; i < logs_nr; i++) {
1518 if (!strcmp(logs[i].refname, "HEAD"))
1519 continue;
1520 if (logs[i].value.update.old_hash == old_ref.value.val1)
1521 logs[i].value.update.old_hash = NULL;
1522 if (logs[i].value.update.new_hash == old_ref.value.val1)
1523 logs[i].value.update.new_hash = NULL;
1524 logs[i].refname = NULL;
1525 reftable_log_record_release(&logs[i]);
1526 }
1527 free(logs);
1528 reftable_ref_record_release(&old_ref);
1529 reftable_log_record_release(&old_log);
1530 return ret;
1531}
1532
1533static int reftable_be_rename_ref(struct ref_store *ref_store,
1534 const char *oldrefname,
1535 const char *newrefname,
1536 const char *logmsg)
1537{
1538 struct reftable_ref_store *refs =
1539 reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1540 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1541 struct write_copy_arg arg = {
1542 .refs = refs,
1543 .stack = stack,
1544 .oldname = oldrefname,
1545 .newname = newrefname,
1546 .logmsg = logmsg,
1547 .delete_old = 1,
1548 };
1549 int ret;
1550
1551 ret = refs->err;
1552 if (ret < 0)
1553 goto done;
1554
1555 ret = reftable_stack_reload(stack);
1556 if (ret)
1557 goto done;
1558 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1559
1560done:
1561 assert(ret != REFTABLE_API_ERROR);
1562 return ret;
1563}
1564
1565static int reftable_be_copy_ref(struct ref_store *ref_store,
1566 const char *oldrefname,
1567 const char *newrefname,
1568 const char *logmsg)
1569{
1570 struct reftable_ref_store *refs =
1571 reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
1572 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1573 struct write_copy_arg arg = {
1574 .refs = refs,
1575 .stack = stack,
1576 .oldname = oldrefname,
1577 .newname = newrefname,
1578 .logmsg = logmsg,
1579 };
1580 int ret;
1581
1582 ret = refs->err;
1583 if (ret < 0)
1584 goto done;
1585
1586 ret = reftable_stack_reload(stack);
1587 if (ret)
1588 goto done;
1589 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1590
1591done:
1592 assert(ret != REFTABLE_API_ERROR);
1593 return ret;
1594}
1595
1596struct reftable_reflog_iterator {
1597 struct ref_iterator base;
1598 struct reftable_ref_store *refs;
1599 struct reftable_iterator iter;
1600 struct reftable_log_record log;
57db2a09
PS
1601 char *last_name;
1602 int err;
1603};
1604
1605static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
1606{
1607 struct reftable_reflog_iterator *iter =
1608 (struct reftable_reflog_iterator *)ref_iterator;
1609
1610 while (!iter->err) {
57db2a09
PS
1611 iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
1612 if (iter->err)
1613 break;
1614
1615 /*
1616 * We want the refnames that we have reflogs for, so we skip if
1617 * we've already produced this name. This could be faster by
1618 * seeking directly to reflog@update_index==0.
1619 */
1620 if (iter->last_name && !strcmp(iter->log.refname, iter->last_name))
1621 continue;
1622
59c50a96
PS
1623 if (check_refname_format(iter->log.refname,
1624 REFNAME_ALLOW_ONELEVEL))
57db2a09 1625 continue;
57db2a09
PS
1626
1627 free(iter->last_name);
1628 iter->last_name = xstrdup(iter->log.refname);
1629 iter->base.refname = iter->log.refname;
57db2a09
PS
1630
1631 break;
1632 }
1633
1634 if (iter->err > 0) {
1635 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
1636 return ITER_ERROR;
1637 return ITER_DONE;
1638 }
1639
1640 if (iter->err < 0) {
1641 ref_iterator_abort(ref_iterator);
1642 return ITER_ERROR;
1643 }
1644
1645 return ITER_OK;
1646}
1647
1648static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator,
1649 struct object_id *peeled)
1650{
1651 BUG("reftable reflog iterator cannot be peeled");
1652 return -1;
1653}
1654
1655static int reftable_reflog_iterator_abort(struct ref_iterator *ref_iterator)
1656{
1657 struct reftable_reflog_iterator *iter =
1658 (struct reftable_reflog_iterator *)ref_iterator;
1659 reftable_log_record_release(&iter->log);
1660 reftable_iterator_destroy(&iter->iter);
1661 free(iter->last_name);
1662 free(iter);
1663 return ITER_DONE;
1664}
1665
1666static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
1667 .advance = reftable_reflog_iterator_advance,
1668 .peel = reftable_reflog_iterator_peel,
1669 .abort = reftable_reflog_iterator_abort
1670};
1671
1672static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
1673 struct reftable_stack *stack)
1674{
1675 struct reftable_merged_table *merged_table;
1676 struct reftable_reflog_iterator *iter;
1677 int ret;
1678
1679 iter = xcalloc(1, sizeof(*iter));
5e01d838 1680 base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable);
57db2a09 1681 iter->refs = refs;
57db2a09
PS
1682
1683 ret = refs->err;
1684 if (ret)
1685 goto done;
1686
1687 ret = reftable_stack_reload(refs->main_stack);
1688 if (ret < 0)
1689 goto done;
1690
1691 merged_table = reftable_stack_merged_table(stack);
1692
1693 ret = reftable_merged_table_seek_log(merged_table, &iter->iter, "");
1694 if (ret < 0)
1695 goto done;
1696
1697done:
1698 iter->err = ret;
1699 return iter;
1700}
1701
1702static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
1703{
1704 struct reftable_ref_store *refs =
1705 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
1706 struct reftable_reflog_iterator *main_iter, *worktree_iter;
1707
1708 main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
1709 if (!refs->worktree_stack)
1710 return &main_iter->base;
1711
1712 worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
1713
5e01d838 1714 return merge_ref_iterator_begin(&worktree_iter->base, &main_iter->base,
6f227800 1715 ref_iterator_select, NULL);
57db2a09
PS
1716}
1717
1718static int yield_log_record(struct reftable_log_record *log,
1719 each_reflog_ent_fn fn,
1720 void *cb_data)
1721{
1722 struct object_id old_oid, new_oid;
1723 const char *full_committer;
1724
1725 oidread(&old_oid, log->value.update.old_hash);
1726 oidread(&new_oid, log->value.update.new_hash);
1727
1728 /*
1729 * When both the old object ID and the new object ID are null
1730 * then this is the reflog existence marker. The caller must
1731 * not be aware of it.
1732 */
1733 if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
1734 return 0;
1735
1736 full_committer = fmt_ident(log->value.update.name, log->value.update.email,
1737 WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
1738 return fn(&old_oid, &new_oid, full_committer,
1739 log->value.update.time, log->value.update.tz_offset,
1740 log->value.update.message, cb_data);
1741}
1742
1743static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1744 const char *refname,
1745 each_reflog_ent_fn fn,
1746 void *cb_data)
1747{
1748 struct reftable_ref_store *refs =
1749 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
1750 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1751 struct reftable_merged_table *mt = NULL;
1752 struct reftable_log_record log = {0};
1753 struct reftable_iterator it = {0};
1754 int ret;
1755
1756 if (refs->err < 0)
1757 return refs->err;
1758
1759 mt = reftable_stack_merged_table(stack);
1760 ret = reftable_merged_table_seek_log(mt, &it, refname);
1761 while (!ret) {
1762 ret = reftable_iterator_next_log(&it, &log);
1763 if (ret < 0)
1764 break;
1765 if (ret > 0 || strcmp(log.refname, refname)) {
1766 ret = 0;
1767 break;
1768 }
1769
1770 ret = yield_log_record(&log, fn, cb_data);
1771 if (ret)
1772 break;
1773 }
1774
1775 reftable_log_record_release(&log);
1776 reftable_iterator_destroy(&it);
1777 return ret;
1778}
1779
1780static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
1781 const char *refname,
1782 each_reflog_ent_fn fn,
1783 void *cb_data)
1784{
1785 struct reftable_ref_store *refs =
1786 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
1787 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1788 struct reftable_merged_table *mt = NULL;
1789 struct reftable_log_record *logs = NULL;
1790 struct reftable_iterator it = {0};
1791 size_t logs_alloc = 0, logs_nr = 0, i;
1792 int ret;
1793
1794 if (refs->err < 0)
1795 return refs->err;
1796
1797 mt = reftable_stack_merged_table(stack);
1798 ret = reftable_merged_table_seek_log(mt, &it, refname);
1799 while (!ret) {
1800 struct reftable_log_record log = {0};
1801
1802 ret = reftable_iterator_next_log(&it, &log);
1803 if (ret < 0)
1804 goto done;
1805 if (ret > 0 || strcmp(log.refname, refname)) {
1806 reftable_log_record_release(&log);
1807 ret = 0;
1808 break;
1809 }
1810
1811 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1812 logs[logs_nr++] = log;
1813 }
1814
1815 for (i = logs_nr; i--;) {
1816 ret = yield_log_record(&logs[i], fn, cb_data);
1817 if (ret)
1818 goto done;
1819 }
1820
1821done:
1822 reftable_iterator_destroy(&it);
1823 for (i = 0; i < logs_nr; i++)
1824 reftable_log_record_release(&logs[i]);
1825 free(logs);
1826 return ret;
1827}
1828
1829static int reftable_be_reflog_exists(struct ref_store *ref_store,
1830 const char *refname)
1831{
1832 struct reftable_ref_store *refs =
1833 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1834 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1835 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
1836 struct reftable_log_record log = {0};
1837 struct reftable_iterator it = {0};
1838 int ret;
1839
1840 ret = refs->err;
1841 if (ret < 0)
1842 goto done;
1843
1844 ret = reftable_stack_reload(stack);
1845 if (ret < 0)
1846 goto done;
1847
1848 ret = reftable_merged_table_seek_log(mt, &it, refname);
1849 if (ret < 0)
1850 goto done;
1851
1852 /*
1853 * Check whether we get at least one log record for the given ref name.
1854 * If so, the reflog exists, otherwise it doesn't.
1855 */
1856 ret = reftable_iterator_next_log(&it, &log);
1857 if (ret < 0)
1858 goto done;
1859 if (ret > 0) {
1860 ret = 0;
1861 goto done;
1862 }
1863
1864 ret = strcmp(log.refname, refname) == 0;
1865
1866done:
1867 reftable_iterator_destroy(&it);
1868 reftable_log_record_release(&log);
1869 if (ret < 0)
1870 ret = 0;
1871 return ret;
1872}
1873
1874struct write_reflog_existence_arg {
1875 struct reftable_ref_store *refs;
1876 const char *refname;
1877 struct reftable_stack *stack;
1878};
1879
1880static int write_reflog_existence_table(struct reftable_writer *writer,
1881 void *cb_data)
1882{
1883 struct write_reflog_existence_arg *arg = cb_data;
1884 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1885 struct reftable_log_record log = {0};
1886 int ret;
1887
1888 ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
1889 if (ret <= 0)
1890 goto done;
1891
1892 reftable_writer_set_limits(writer, ts, ts);
1893
1894 /*
1895 * The existence entry has both old and new object ID set to the the
1896 * null object ID. Our iterators are aware of this and will not present
1897 * them to their callers.
1898 */
1899 log.refname = xstrdup(arg->refname);
1900 log.update_index = ts;
1901 log.value_type = REFTABLE_LOG_UPDATE;
1902 ret = reftable_writer_add_log(writer, &log);
1903
1904done:
1905 assert(ret != REFTABLE_API_ERROR);
1906 reftable_log_record_release(&log);
1907 return ret;
1908}
1909
1910static int reftable_be_create_reflog(struct ref_store *ref_store,
1911 const char *refname,
1912 struct strbuf *errmsg)
1913{
1914 struct reftable_ref_store *refs =
1915 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1916 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1917 struct write_reflog_existence_arg arg = {
1918 .refs = refs,
1919 .stack = stack,
1920 .refname = refname,
1921 };
1922 int ret;
1923
1924 ret = refs->err;
1925 if (ret < 0)
1926 goto done;
1927
1928 ret = reftable_stack_reload(stack);
1929 if (ret)
1930 goto done;
1931
1932 ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
1933
1934done:
1935 return ret;
1936}
1937
1938struct write_reflog_delete_arg {
1939 struct reftable_stack *stack;
1940 const char *refname;
1941};
1942
1943static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
1944{
1945 struct write_reflog_delete_arg *arg = cb_data;
1946 struct reftable_merged_table *mt =
1947 reftable_stack_merged_table(arg->stack);
1948 struct reftable_log_record log = {0}, tombstone = {0};
1949 struct reftable_iterator it = {0};
1950 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1951 int ret;
1952
1953 reftable_writer_set_limits(writer, ts, ts);
1954
1955 /*
1956 * In order to delete a table we need to delete all reflog entries one
1957 * by one. This is inefficient, but the reftable format does not have a
1958 * better marker right now.
1959 */
1960 ret = reftable_merged_table_seek_log(mt, &it, arg->refname);
1961 while (ret == 0) {
1962 ret = reftable_iterator_next_log(&it, &log);
1963 if (ret < 0)
1964 break;
1965 if (ret > 0 || strcmp(log.refname, arg->refname)) {
1966 ret = 0;
1967 break;
1968 }
1969
1970 tombstone.refname = (char *)arg->refname;
1971 tombstone.value_type = REFTABLE_LOG_DELETION;
1972 tombstone.update_index = log.update_index;
1973
1974 ret = reftable_writer_add_log(writer, &tombstone);
1975 }
1976
1977 reftable_log_record_release(&log);
1978 reftable_iterator_destroy(&it);
1979 return ret;
1980}
1981
1982static int reftable_be_delete_reflog(struct ref_store *ref_store,
1983 const char *refname)
1984{
1985 struct reftable_ref_store *refs =
1986 reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
1987 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1988 struct write_reflog_delete_arg arg = {
1989 .stack = stack,
1990 .refname = refname,
1991 };
1992 int ret;
1993
1994 ret = reftable_stack_reload(stack);
1995 if (ret)
1996 return ret;
1997 ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
1998
1999 assert(ret != REFTABLE_API_ERROR);
2000 return ret;
2001}
2002
2003struct reflog_expiry_arg {
2004 struct reftable_stack *stack;
2005 struct reftable_log_record *records;
2006 struct object_id update_oid;
2007 const char *refname;
2008 size_t len;
2009};
2010
2011static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
2012{
2013 struct reflog_expiry_arg *arg = cb_data;
2014 uint64_t ts = reftable_stack_next_update_index(arg->stack);
2015 uint64_t live_records = 0;
2016 size_t i;
2017 int ret;
2018
2019 for (i = 0; i < arg->len; i++)
2020 if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
2021 live_records++;
2022
2023 reftable_writer_set_limits(writer, ts, ts);
2024
2025 if (!is_null_oid(&arg->update_oid)) {
2026 struct reftable_ref_record ref = {0};
2027 struct object_id peeled;
2028
2029 ref.refname = (char *)arg->refname;
2030 ref.update_index = ts;
2031
2032 if (!peel_object(&arg->update_oid, &peeled)) {
2033 ref.value_type = REFTABLE_REF_VAL2;
2034 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
2035 memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
2036 } else {
2037 ref.value_type = REFTABLE_REF_VAL1;
2038 memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
2039 }
2040
2041 ret = reftable_writer_add_ref(writer, &ref);
2042 if (ret < 0)
2043 return ret;
2044 }
2045
2046 /*
2047 * When there are no more entries left in the reflog we empty it
2048 * completely, but write a placeholder reflog entry that indicates that
2049 * the reflog still exists.
2050 */
2051 if (!live_records) {
2052 struct reftable_log_record log = {
2053 .refname = (char *)arg->refname,
2054 .value_type = REFTABLE_LOG_UPDATE,
2055 .update_index = ts,
2056 };
2057
2058 ret = reftable_writer_add_log(writer, &log);
2059 if (ret)
2060 return ret;
2061 }
2062
2063 for (i = 0; i < arg->len; i++) {
2064 ret = reftable_writer_add_log(writer, &arg->records[i]);
2065 if (ret)
2066 return ret;
2067 }
2068
2069 return 0;
2070}
2071
2072static int reftable_be_reflog_expire(struct ref_store *ref_store,
2073 const char *refname,
2074 unsigned int flags,
2075 reflog_expiry_prepare_fn prepare_fn,
2076 reflog_expiry_should_prune_fn should_prune_fn,
2077 reflog_expiry_cleanup_fn cleanup_fn,
2078 void *policy_cb_data)
2079{
2080 /*
2081 * For log expiry, we write tombstones for every single reflog entry
2082 * that is to be expired. This means that the entries are still
2083 * retrievable by delving into the stack, and expiring entries
2084 * paradoxically takes extra memory. This memory is only reclaimed when
2085 * compacting the reftable stack.
2086 *
2087 * It would be better if the refs backend supported an API that sets a
2088 * criterion for all refs, passing the criterion to pack_refs().
2089 *
2090 * On the plus side, because we do the expiration per ref, we can easily
2091 * insert the reflog existence dummies.
2092 */
2093 struct reftable_ref_store *refs =
2094 reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2095 struct reftable_stack *stack = stack_for(refs, refname, &refname);
2096 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
2097 struct reftable_log_record *logs = NULL;
2098 struct reftable_log_record *rewritten = NULL;
2099 struct reftable_ref_record ref_record = {0};
2100 struct reftable_iterator it = {0};
2101 struct reftable_addition *add = NULL;
2102 struct reflog_expiry_arg arg = {0};
2103 struct object_id oid = {0};
2104 uint8_t *last_hash = NULL;
2105 size_t logs_nr = 0, logs_alloc = 0, i;
2106 int ret;
2107
2108 if (refs->err < 0)
2109 return refs->err;
2110
2111 ret = reftable_stack_reload(stack);
2112 if (ret < 0)
2113 goto done;
2114
2115 ret = reftable_merged_table_seek_log(mt, &it, refname);
2116 if (ret < 0)
2117 goto done;
2118
2119 ret = reftable_stack_new_addition(&add, stack);
2120 if (ret < 0)
2121 goto done;
2122
2123 ret = reftable_stack_read_ref(stack, refname, &ref_record);
2124 if (ret < 0)
2125 goto done;
2126 if (reftable_ref_record_val1(&ref_record))
2127 oidread(&oid, reftable_ref_record_val1(&ref_record));
2128 prepare_fn(refname, &oid, policy_cb_data);
2129
2130 while (1) {
2131 struct reftable_log_record log = {0};
2132 struct object_id old_oid, new_oid;
2133
2134 ret = reftable_iterator_next_log(&it, &log);
2135 if (ret < 0)
2136 goto done;
2137 if (ret > 0 || strcmp(log.refname, refname)) {
2138 reftable_log_record_release(&log);
2139 break;
2140 }
2141
2142 oidread(&old_oid, log.value.update.old_hash);
2143 oidread(&new_oid, log.value.update.new_hash);
2144
2145 /*
2146 * Skip over the reflog existence marker. We will add it back
2147 * in when there are no live reflog records.
2148 */
2149 if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
2150 reftable_log_record_release(&log);
2151 continue;
2152 }
2153
2154 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2155 logs[logs_nr++] = log;
2156 }
2157
2158 /*
2159 * We need to rewrite all reflog entries according to the pruning
2160 * callback function:
2161 *
2162 * - If a reflog entry shall be pruned we mark the record for
2163 * deletion.
2164 *
2165 * - Otherwise we may have to rewrite the chain of reflog entries so
2166 * that gaps created by just-deleted records get backfilled.
2167 */
2168 CALLOC_ARRAY(rewritten, logs_nr);
2169 for (i = logs_nr; i--;) {
2170 struct reftable_log_record *dest = &rewritten[i];
2171 struct object_id old_oid, new_oid;
2172
2173 *dest = logs[i];
2174 oidread(&old_oid, logs[i].value.update.old_hash);
2175 oidread(&new_oid, logs[i].value.update.new_hash);
2176
2177 if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
2178 (timestamp_t)logs[i].value.update.time,
2179 logs[i].value.update.tz_offset,
2180 logs[i].value.update.message,
2181 policy_cb_data)) {
2182 dest->value_type = REFTABLE_LOG_DELETION;
2183 } else {
2184 if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
2185 dest->value.update.old_hash = last_hash;
2186 last_hash = logs[i].value.update.new_hash;
2187 }
2188 }
2189
2190 if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
2191 reftable_ref_record_val1(&ref_record))
2192 oidread(&arg.update_oid, last_hash);
2193
2194 arg.records = rewritten;
2195 arg.len = logs_nr;
2196 arg.stack = stack,
2197 arg.refname = refname,
2198
2199 ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
2200 if (ret < 0)
2201 goto done;
2202
2203 /*
2204 * Future improvement: we could skip writing records that were
2205 * not changed.
2206 */
2207 if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
2208 ret = reftable_addition_commit(add);
2209
2210done:
2211 if (add)
2212 cleanup_fn(policy_cb_data);
2213 assert(ret != REFTABLE_API_ERROR);
2214
2215 reftable_ref_record_release(&ref_record);
2216 reftable_iterator_destroy(&it);
2217 reftable_addition_destroy(add);
2218 for (i = 0; i < logs_nr; i++)
2219 reftable_log_record_release(&logs[i]);
2220 free(logs);
2221 free(rewritten);
2222 return ret;
2223}
2224
2225struct ref_storage_be refs_be_reftable = {
2226 .name = "reftable",
2227 .init = reftable_be_init,
2228 .init_db = reftable_be_init_db,
2229 .transaction_prepare = reftable_be_transaction_prepare,
2230 .transaction_finish = reftable_be_transaction_finish,
2231 .transaction_abort = reftable_be_transaction_abort,
2232 .initial_transaction_commit = reftable_be_initial_transaction_commit,
2233
2234 .pack_refs = reftable_be_pack_refs,
2235 .create_symref = reftable_be_create_symref,
2236 .rename_ref = reftable_be_rename_ref,
2237 .copy_ref = reftable_be_copy_ref,
2238
2239 .iterator_begin = reftable_be_iterator_begin,
2240 .read_raw_ref = reftable_be_read_raw_ref,
2241 .read_symbolic_ref = reftable_be_read_symbolic_ref,
2242
2243 .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
2244 .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
2245 .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
2246 .reflog_exists = reftable_be_reflog_exists,
2247 .create_reflog = reftable_be_create_reflog,
2248 .delete_reflog = reftable_be_delete_reflog,
2249 .reflog_expire = reftable_be_reflog_expire,
2250};