]> git.ipfire.org Git - thirdparty/git.git/blob - refs/reftable-backend.c
t0610: execute git-pack-refs(1) with specified umask
[thirdparty/git.git] / refs / reftable-backend.c
1 #include "../git-compat-util.h"
2 #include "../abspath.h"
3 #include "../chdir-notify.h"
4 #include "../environment.h"
5 #include "../gettext.h"
6 #include "../hash.h"
7 #include "../hex.h"
8 #include "../iterator.h"
9 #include "../ident.h"
10 #include "../lockfile.h"
11 #include "../object.h"
12 #include "../path.h"
13 #include "../refs.h"
14 #include "../reftable/reftable-stack.h"
15 #include "../reftable/reftable-record.h"
16 #include "../reftable/reftable-error.h"
17 #include "../reftable/reftable-iterator.h"
18 #include "../reftable/reftable-merged.h"
19 #include "../setup.h"
20 #include "../strmap.h"
21 #include "refs-internal.h"
22
23 /*
24 * Used as a flag in ref_update::flags when the ref_update was via an
25 * update to HEAD.
26 */
27 #define REF_UPDATE_VIA_HEAD (1 << 8)
28
29 struct reftable_ref_store {
30 struct ref_store base;
31
32 /*
33 * The main stack refers to the common dir and thus contains common
34 * refs as well as refs of the main repository.
35 */
36 struct reftable_stack *main_stack;
37 /*
38 * The worktree stack refers to the gitdir in case the refdb is opened
39 * via a worktree. It thus contains the per-worktree refs.
40 */
41 struct reftable_stack *worktree_stack;
42 /*
43 * Map of worktree stacks by their respective worktree names. The map
44 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
45 */
46 struct strmap worktree_stacks;
47 struct reftable_write_options write_options;
48
49 unsigned int store_flags;
50 int err;
51 };
52
53 /*
54 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
55 * reftable_ref_store. required_flags is compared with ref_store's store_flags
56 * to ensure the ref_store has all required capabilities. "caller" is used in
57 * any necessary error messages.
58 */
59 static struct reftable_ref_store *reftable_be_downcast(struct ref_store *ref_store,
60 unsigned int required_flags,
61 const char *caller)
62 {
63 struct reftable_ref_store *refs;
64
65 if (ref_store->be != &refs_be_reftable)
66 BUG("ref_store is type \"%s\" not \"reftables\" in %s",
67 ref_store->be->name, caller);
68
69 refs = (struct reftable_ref_store *)ref_store;
70
71 if ((refs->store_flags & required_flags) != required_flags)
72 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
73 caller, required_flags, refs->store_flags);
74
75 return refs;
76 }
77
78 /*
79 * Some refs are global to the repository (refs/heads/{*}), while others are
80 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
81 * multiple separate databases (ie. multiple reftable/ directories), one for
82 * the shared refs, one for the current worktree refs, and one for each
83 * additional worktree. For reading, we merge the view of both the shared and
84 * the current worktree's refs, when necessary.
85 *
86 * This function also optionally assigns the rewritten reference name that is
87 * local to the stack. This translation is required when using worktree refs
88 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
89 * those references in their normalized form.
90 */
91 static struct reftable_stack *stack_for(struct reftable_ref_store *store,
92 const char *refname,
93 const char **rewritten_ref)
94 {
95 const char *wtname;
96 int wtname_len;
97
98 if (!refname)
99 return store->main_stack;
100
101 switch (parse_worktree_ref(refname, &wtname, &wtname_len, rewritten_ref)) {
102 case REF_WORKTREE_OTHER: {
103 static struct strbuf wtname_buf = STRBUF_INIT;
104 struct strbuf wt_dir = STRBUF_INIT;
105 struct reftable_stack *stack;
106
107 /*
108 * We're using a static buffer here so that we don't need to
109 * allocate the worktree name whenever we look up a reference.
110 * This could be avoided if the strmap interface knew how to
111 * handle keys with a length.
112 */
113 strbuf_reset(&wtname_buf);
114 strbuf_add(&wtname_buf, wtname, wtname_len);
115
116 /*
117 * There is an edge case here: when the worktree references the
118 * current worktree, then we set up the stack once via
119 * `worktree_stacks` and once via `worktree_stack`. This is
120 * wasteful, but in the reading case it shouldn't matter. And
121 * in the writing case we would notice that the stack is locked
122 * already and error out when trying to write a reference via
123 * both stacks.
124 */
125 stack = strmap_get(&store->worktree_stacks, wtname_buf.buf);
126 if (!stack) {
127 strbuf_addf(&wt_dir, "%s/worktrees/%s/reftable",
128 store->base.repo->commondir, wtname_buf.buf);
129
130 store->err = reftable_new_stack(&stack, wt_dir.buf,
131 store->write_options);
132 assert(store->err != REFTABLE_API_ERROR);
133 strmap_put(&store->worktree_stacks, wtname_buf.buf, stack);
134 }
135
136 strbuf_release(&wt_dir);
137 return stack;
138 }
139 case REF_WORKTREE_CURRENT:
140 /*
141 * If there is no worktree stack then we're currently in the
142 * main worktree. We thus return the main stack in that case.
143 */
144 if (!store->worktree_stack)
145 return store->main_stack;
146 return store->worktree_stack;
147 case REF_WORKTREE_MAIN:
148 case REF_WORKTREE_SHARED:
149 return store->main_stack;
150 default:
151 BUG("unhandled worktree reference type");
152 }
153 }
154
155 static int should_write_log(struct ref_store *refs, const char *refname)
156 {
157 if (log_all_ref_updates == LOG_REFS_UNSET)
158 log_all_ref_updates = is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;
159
160 switch (log_all_ref_updates) {
161 case LOG_REFS_NONE:
162 return refs_reflog_exists(refs, refname);
163 case LOG_REFS_ALWAYS:
164 return 1;
165 case LOG_REFS_NORMAL:
166 if (should_autocreate_reflog(refname))
167 return 1;
168 return refs_reflog_exists(refs, refname);
169 default:
170 BUG("unhandled core.logAllRefUpdates value %d", log_all_ref_updates);
171 }
172 }
173
174 static void clear_reftable_log_record(struct reftable_log_record *log)
175 {
176 switch (log->value_type) {
177 case REFTABLE_LOG_UPDATE:
178 /*
179 * When we write log records, the hashes are owned by the
180 * caller and thus shouldn't be free'd.
181 */
182 log->value.update.old_hash = NULL;
183 log->value.update.new_hash = NULL;
184 break;
185 case REFTABLE_LOG_DELETION:
186 break;
187 }
188 reftable_log_record_release(log);
189 }
190
191 static void fill_reftable_log_record(struct reftable_log_record *log)
192 {
193 const char *info = git_committer_info(0);
194 struct ident_split split = {0};
195 int sign = 1;
196
197 if (split_ident_line(&split, info, strlen(info)))
198 BUG("failed splitting committer info");
199
200 reftable_log_record_release(log);
201 log->value_type = REFTABLE_LOG_UPDATE;
202 log->value.update.name =
203 xstrndup(split.name_begin, split.name_end - split.name_begin);
204 log->value.update.email =
205 xstrndup(split.mail_begin, split.mail_end - split.mail_begin);
206 log->value.update.time = atol(split.date_begin);
207 if (*split.tz_begin == '-') {
208 sign = -1;
209 split.tz_begin++;
210 }
211 if (*split.tz_begin == '+') {
212 sign = 1;
213 split.tz_begin++;
214 }
215
216 log->value.update.tz_offset = sign * atoi(split.tz_begin);
217 }
218
219 static int read_ref_without_reload(struct reftable_stack *stack,
220 const char *refname,
221 struct object_id *oid,
222 struct strbuf *referent,
223 unsigned int *type)
224 {
225 struct reftable_ref_record ref = {0};
226 int ret;
227
228 ret = reftable_stack_read_ref(stack, refname, &ref);
229 if (ret)
230 goto done;
231
232 if (ref.value_type == REFTABLE_REF_SYMREF) {
233 strbuf_reset(referent);
234 strbuf_addstr(referent, ref.value.symref);
235 *type |= REF_ISSYMREF;
236 } else if (reftable_ref_record_val1(&ref)) {
237 oidread(oid, reftable_ref_record_val1(&ref));
238 } else {
239 /* We got a tombstone, which should not happen. */
240 BUG("unhandled reference value type %d", ref.value_type);
241 }
242
243 done:
244 assert(ret != REFTABLE_API_ERROR);
245 reftable_ref_record_release(&ref);
246 return ret;
247 }
248
249 static struct ref_store *reftable_be_init(struct repository *repo,
250 const char *gitdir,
251 unsigned int store_flags)
252 {
253 struct reftable_ref_store *refs = xcalloc(1, sizeof(*refs));
254 struct strbuf path = STRBUF_INIT;
255 int is_worktree;
256 mode_t mask;
257
258 mask = umask(0);
259 umask(mask);
260
261 base_ref_store_init(&refs->base, repo, gitdir, &refs_be_reftable);
262 strmap_init(&refs->worktree_stacks);
263 refs->store_flags = store_flags;
264 refs->write_options.block_size = 4096;
265 refs->write_options.hash_id = repo->hash_algo->format_id;
266 refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
267
268 /*
269 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
270 * This stack contains both the shared and the main worktree refs.
271 *
272 * Note that we don't try to resolve the path in case we have a
273 * worktree because `get_common_dir_noenv()` already does it for us.
274 */
275 is_worktree = get_common_dir_noenv(&path, gitdir);
276 if (!is_worktree) {
277 strbuf_reset(&path);
278 strbuf_realpath(&path, gitdir, 0);
279 }
280 strbuf_addstr(&path, "/reftable");
281 refs->err = reftable_new_stack(&refs->main_stack, path.buf,
282 refs->write_options);
283 if (refs->err)
284 goto done;
285
286 /*
287 * If we're in a worktree we also need to set up the worktree reftable
288 * stack that is contained in the per-worktree GIT_DIR.
289 *
290 * Ideally, we would also add the stack to our worktree stack map. But
291 * we have no way to figure out the worktree name here and thus can't
292 * do it efficiently.
293 */
294 if (is_worktree) {
295 strbuf_reset(&path);
296 strbuf_addf(&path, "%s/reftable", gitdir);
297
298 refs->err = reftable_new_stack(&refs->worktree_stack, path.buf,
299 refs->write_options);
300 if (refs->err)
301 goto done;
302 }
303
304 chdir_notify_reparent("reftables-backend $GIT_DIR", &refs->base.gitdir);
305
306 done:
307 assert(refs->err != REFTABLE_API_ERROR);
308 strbuf_release(&path);
309 return &refs->base;
310 }
311
312 static int reftable_be_init_db(struct ref_store *ref_store,
313 int flags UNUSED,
314 struct strbuf *err UNUSED)
315 {
316 struct reftable_ref_store *refs =
317 reftable_be_downcast(ref_store, REF_STORE_WRITE, "init_db");
318 struct strbuf sb = STRBUF_INIT;
319
320 strbuf_addf(&sb, "%s/reftable", refs->base.gitdir);
321 safe_create_dir(sb.buf, 1);
322 strbuf_reset(&sb);
323
324 strbuf_addf(&sb, "%s/HEAD", refs->base.gitdir);
325 write_file(sb.buf, "ref: refs/heads/.invalid");
326 adjust_shared_perm(sb.buf);
327 strbuf_reset(&sb);
328
329 strbuf_addf(&sb, "%s/refs", refs->base.gitdir);
330 safe_create_dir(sb.buf, 1);
331 strbuf_reset(&sb);
332
333 strbuf_addf(&sb, "%s/refs/heads", refs->base.gitdir);
334 write_file(sb.buf, "this repository uses the reftable format");
335 adjust_shared_perm(sb.buf);
336
337 strbuf_release(&sb);
338 return 0;
339 }
340
341 struct reftable_ref_iterator {
342 struct ref_iterator base;
343 struct reftable_ref_store *refs;
344 struct reftable_iterator iter;
345 struct reftable_ref_record ref;
346 struct object_id oid;
347
348 const char *prefix;
349 unsigned int flags;
350 int err;
351 };
352
353 static int reftable_ref_iterator_advance(struct ref_iterator *ref_iterator)
354 {
355 struct reftable_ref_iterator *iter =
356 (struct reftable_ref_iterator *)ref_iterator;
357 struct reftable_ref_store *refs = iter->refs;
358
359 while (!iter->err) {
360 int flags = 0;
361
362 iter->err = reftable_iterator_next_ref(&iter->iter, &iter->ref);
363 if (iter->err)
364 break;
365
366 /*
367 * The files backend only lists references contained in
368 * "refs/". We emulate the same behaviour here and thus skip
369 * all references that don't start with this prefix.
370 */
371 if (!starts_with(iter->ref.refname, "refs/"))
372 continue;
373
374 if (iter->prefix &&
375 strncmp(iter->prefix, iter->ref.refname, strlen(iter->prefix))) {
376 iter->err = 1;
377 break;
378 }
379
380 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
381 parse_worktree_ref(iter->ref.refname, NULL, NULL, NULL) !=
382 REF_WORKTREE_CURRENT)
383 continue;
384
385 switch (iter->ref.value_type) {
386 case REFTABLE_REF_VAL1:
387 oidread(&iter->oid, iter->ref.value.val1);
388 break;
389 case REFTABLE_REF_VAL2:
390 oidread(&iter->oid, iter->ref.value.val2.value);
391 break;
392 case REFTABLE_REF_SYMREF:
393 if (!refs_resolve_ref_unsafe(&iter->refs->base, iter->ref.refname,
394 RESOLVE_REF_READING, &iter->oid, &flags))
395 oidclr(&iter->oid);
396 break;
397 default:
398 BUG("unhandled reference value type %d", iter->ref.value_type);
399 }
400
401 if (is_null_oid(&iter->oid))
402 flags |= REF_ISBROKEN;
403
404 if (check_refname_format(iter->ref.refname, REFNAME_ALLOW_ONELEVEL)) {
405 if (!refname_is_safe(iter->ref.refname))
406 die(_("refname is dangerous: %s"), iter->ref.refname);
407 oidclr(&iter->oid);
408 flags |= REF_BAD_NAME | REF_ISBROKEN;
409 }
410
411 if (iter->flags & DO_FOR_EACH_OMIT_DANGLING_SYMREFS &&
412 flags & REF_ISSYMREF &&
413 flags & REF_ISBROKEN)
414 continue;
415
416 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
417 !ref_resolves_to_object(iter->ref.refname, refs->base.repo,
418 &iter->oid, flags))
419 continue;
420
421 iter->base.refname = iter->ref.refname;
422 iter->base.oid = &iter->oid;
423 iter->base.flags = flags;
424
425 break;
426 }
427
428 if (iter->err > 0) {
429 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
430 return ITER_ERROR;
431 return ITER_DONE;
432 }
433
434 if (iter->err < 0) {
435 ref_iterator_abort(ref_iterator);
436 return ITER_ERROR;
437 }
438
439 return ITER_OK;
440 }
441
442 static int reftable_ref_iterator_peel(struct ref_iterator *ref_iterator,
443 struct object_id *peeled)
444 {
445 struct reftable_ref_iterator *iter =
446 (struct reftable_ref_iterator *)ref_iterator;
447
448 if (iter->ref.value_type == REFTABLE_REF_VAL2) {
449 oidread(peeled, iter->ref.value.val2.target_value);
450 return 0;
451 }
452
453 return -1;
454 }
455
456 static int reftable_ref_iterator_abort(struct ref_iterator *ref_iterator)
457 {
458 struct reftable_ref_iterator *iter =
459 (struct reftable_ref_iterator *)ref_iterator;
460 reftable_ref_record_release(&iter->ref);
461 reftable_iterator_destroy(&iter->iter);
462 free(iter);
463 return ITER_DONE;
464 }
465
466 static struct ref_iterator_vtable reftable_ref_iterator_vtable = {
467 .advance = reftable_ref_iterator_advance,
468 .peel = reftable_ref_iterator_peel,
469 .abort = reftable_ref_iterator_abort
470 };
471
472 static struct reftable_ref_iterator *ref_iterator_for_stack(struct reftable_ref_store *refs,
473 struct reftable_stack *stack,
474 const char *prefix,
475 int flags)
476 {
477 struct reftable_merged_table *merged_table;
478 struct reftable_ref_iterator *iter;
479 int ret;
480
481 iter = xcalloc(1, sizeof(*iter));
482 base_ref_iterator_init(&iter->base, &reftable_ref_iterator_vtable, 1);
483 iter->prefix = prefix;
484 iter->base.oid = &iter->oid;
485 iter->flags = flags;
486 iter->refs = refs;
487
488 ret = refs->err;
489 if (ret)
490 goto done;
491
492 ret = reftable_stack_reload(stack);
493 if (ret)
494 goto done;
495
496 merged_table = reftable_stack_merged_table(stack);
497
498 ret = reftable_merged_table_seek_ref(merged_table, &iter->iter, prefix);
499 if (ret)
500 goto done;
501
502 done:
503 iter->err = ret;
504 return iter;
505 }
506
507 static enum iterator_selection iterator_select(struct ref_iterator *iter_worktree,
508 struct ref_iterator *iter_common,
509 void *cb_data UNUSED)
510 {
511 if (iter_worktree && !iter_common) {
512 /*
513 * Return the worktree ref if there are no more common refs.
514 */
515 return ITER_SELECT_0;
516 } else if (iter_common) {
517 /*
518 * In case we have pending worktree and common refs we need to
519 * yield them based on their lexicographical order. Worktree
520 * refs that have the same name as common refs shadow the
521 * latter.
522 */
523 if (iter_worktree) {
524 int cmp = strcmp(iter_worktree->refname,
525 iter_common->refname);
526 if (cmp < 0)
527 return ITER_SELECT_0;
528 else if (!cmp)
529 return ITER_SELECT_0_SKIP_1;
530 }
531
532 /*
533 * We now know that the lexicographically-next ref is a common
534 * ref. When the common ref is a shared one we return it.
535 */
536 if (parse_worktree_ref(iter_common->refname, NULL, NULL,
537 NULL) == REF_WORKTREE_SHARED)
538 return ITER_SELECT_1;
539
540 /*
541 * Otherwise, if the common ref is a per-worktree ref we skip
542 * it because it would belong to the main worktree, not ours.
543 */
544 return ITER_SKIP_1;
545 } else {
546 return ITER_DONE;
547 }
548 }
549
550 static struct ref_iterator *reftable_be_iterator_begin(struct ref_store *ref_store,
551 const char *prefix,
552 const char **exclude_patterns,
553 unsigned int flags)
554 {
555 struct reftable_ref_iterator *main_iter, *worktree_iter;
556 struct reftable_ref_store *refs;
557 unsigned int required_flags = REF_STORE_READ;
558
559 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
560 required_flags |= REF_STORE_ODB;
561 refs = reftable_be_downcast(ref_store, required_flags, "ref_iterator_begin");
562
563 main_iter = ref_iterator_for_stack(refs, refs->main_stack, prefix, flags);
564
565 /*
566 * The worktree stack is only set when we're in an actual worktree
567 * right now. If we aren't, then we return the common reftable
568 * iterator, only.
569 */
570 if (!refs->worktree_stack)
571 return &main_iter->base;
572
573 /*
574 * Otherwise we merge both the common and the per-worktree refs into a
575 * single iterator.
576 */
577 worktree_iter = ref_iterator_for_stack(refs, refs->worktree_stack, prefix, flags);
578 return merge_ref_iterator_begin(1, &worktree_iter->base, &main_iter->base,
579 iterator_select, NULL);
580 }
581
582 static int reftable_be_read_raw_ref(struct ref_store *ref_store,
583 const char *refname,
584 struct object_id *oid,
585 struct strbuf *referent,
586 unsigned int *type,
587 int *failure_errno)
588 {
589 struct reftable_ref_store *refs =
590 reftable_be_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
591 struct reftable_stack *stack = stack_for(refs, refname, &refname);
592 int ret;
593
594 if (refs->err < 0)
595 return refs->err;
596
597 ret = reftable_stack_reload(stack);
598 if (ret)
599 return ret;
600
601 ret = read_ref_without_reload(stack, refname, oid, referent, type);
602 if (ret < 0)
603 return ret;
604 if (ret > 0) {
605 *failure_errno = ENOENT;
606 return -1;
607 }
608
609 return 0;
610 }
611
612 static int reftable_be_read_symbolic_ref(struct ref_store *ref_store,
613 const char *refname,
614 struct strbuf *referent)
615 {
616 struct reftable_ref_store *refs =
617 reftable_be_downcast(ref_store, REF_STORE_READ, "read_symbolic_ref");
618 struct reftable_stack *stack = stack_for(refs, refname, &refname);
619 struct reftable_ref_record ref = {0};
620 int ret;
621
622 ret = reftable_stack_reload(stack);
623 if (ret)
624 return ret;
625
626 ret = reftable_stack_read_ref(stack, refname, &ref);
627 if (ret == 0 && ref.value_type == REFTABLE_REF_SYMREF)
628 strbuf_addstr(referent, ref.value.symref);
629 else
630 ret = -1;
631
632 reftable_ref_record_release(&ref);
633 return ret;
634 }
635
636 /*
637 * Return the refname under which update was originally requested.
638 */
639 static const char *original_update_refname(struct ref_update *update)
640 {
641 while (update->parent_update)
642 update = update->parent_update;
643 return update->refname;
644 }
645
646 struct reftable_transaction_update {
647 struct ref_update *update;
648 struct object_id current_oid;
649 };
650
651 struct write_transaction_table_arg {
652 struct reftable_ref_store *refs;
653 struct reftable_stack *stack;
654 struct reftable_addition *addition;
655 struct reftable_transaction_update *updates;
656 size_t updates_nr;
657 size_t updates_alloc;
658 size_t updates_expected;
659 };
660
661 struct reftable_transaction_data {
662 struct write_transaction_table_arg *args;
663 size_t args_nr, args_alloc;
664 };
665
666 static void free_transaction_data(struct reftable_transaction_data *tx_data)
667 {
668 if (!tx_data)
669 return;
670 for (size_t i = 0; i < tx_data->args_nr; i++) {
671 reftable_addition_destroy(tx_data->args[i].addition);
672 free(tx_data->args[i].updates);
673 }
674 free(tx_data->args);
675 free(tx_data);
676 }
677
678 /*
679 * Prepare transaction update for the given reference update. This will cause
680 * us to lock the corresponding reftable stack for concurrent modification.
681 */
682 static int prepare_transaction_update(struct write_transaction_table_arg **out,
683 struct reftable_ref_store *refs,
684 struct reftable_transaction_data *tx_data,
685 struct ref_update *update,
686 struct strbuf *err)
687 {
688 struct reftable_stack *stack = stack_for(refs, update->refname, NULL);
689 struct write_transaction_table_arg *arg = NULL;
690 size_t i;
691 int ret;
692
693 /*
694 * Search for a preexisting stack update. If there is one then we add
695 * the update to it, otherwise we set up a new stack update.
696 */
697 for (i = 0; !arg && i < tx_data->args_nr; i++)
698 if (tx_data->args[i].stack == stack)
699 arg = &tx_data->args[i];
700
701 if (!arg) {
702 struct reftable_addition *addition;
703
704 ret = reftable_stack_reload(stack);
705 if (ret)
706 return ret;
707
708 ret = reftable_stack_new_addition(&addition, stack);
709 if (ret) {
710 if (ret == REFTABLE_LOCK_ERROR)
711 strbuf_addstr(err, "cannot lock references");
712 return ret;
713 }
714
715 ALLOC_GROW(tx_data->args, tx_data->args_nr + 1,
716 tx_data->args_alloc);
717 arg = &tx_data->args[tx_data->args_nr++];
718 arg->refs = refs;
719 arg->stack = stack;
720 arg->addition = addition;
721 arg->updates = NULL;
722 arg->updates_nr = 0;
723 arg->updates_alloc = 0;
724 arg->updates_expected = 0;
725 }
726
727 arg->updates_expected++;
728
729 if (out)
730 *out = arg;
731
732 return 0;
733 }
734
735 /*
736 * Queue a reference update for the correct stack. We potentially need to
737 * handle multiple stack updates in a single transaction when it spans across
738 * multiple worktrees.
739 */
740 static int queue_transaction_update(struct reftable_ref_store *refs,
741 struct reftable_transaction_data *tx_data,
742 struct ref_update *update,
743 struct object_id *current_oid,
744 struct strbuf *err)
745 {
746 struct write_transaction_table_arg *arg = NULL;
747 int ret;
748
749 if (update->backend_data)
750 BUG("reference update queued more than once");
751
752 ret = prepare_transaction_update(&arg, refs, tx_data, update, err);
753 if (ret < 0)
754 return ret;
755
756 ALLOC_GROW(arg->updates, arg->updates_nr + 1,
757 arg->updates_alloc);
758 arg->updates[arg->updates_nr].update = update;
759 oidcpy(&arg->updates[arg->updates_nr].current_oid, current_oid);
760 update->backend_data = &arg->updates[arg->updates_nr++];
761
762 return 0;
763 }
764
765 static int reftable_be_transaction_prepare(struct ref_store *ref_store,
766 struct ref_transaction *transaction,
767 struct strbuf *err)
768 {
769 struct reftable_ref_store *refs =
770 reftable_be_downcast(ref_store, REF_STORE_WRITE|REF_STORE_MAIN, "ref_transaction_prepare");
771 struct strbuf referent = STRBUF_INIT, head_referent = STRBUF_INIT;
772 struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
773 struct reftable_transaction_data *tx_data = NULL;
774 struct object_id head_oid;
775 unsigned int head_type = 0;
776 size_t i;
777 int ret;
778
779 ret = refs->err;
780 if (ret < 0)
781 goto done;
782
783 tx_data = xcalloc(1, sizeof(*tx_data));
784
785 /*
786 * Preprocess all updates. For one we check that there are no duplicate
787 * reference updates in this transaction. Second, we lock all stacks
788 * that will be modified during the transaction.
789 */
790 for (i = 0; i < transaction->nr; i++) {
791 ret = prepare_transaction_update(NULL, refs, tx_data,
792 transaction->updates[i], err);
793 if (ret)
794 goto done;
795
796 string_list_append(&affected_refnames,
797 transaction->updates[i]->refname);
798 }
799
800 /*
801 * Now that we have counted updates per stack we can preallocate their
802 * arrays. This avoids having to reallocate many times.
803 */
804 for (i = 0; i < tx_data->args_nr; i++) {
805 CALLOC_ARRAY(tx_data->args[i].updates, tx_data->args[i].updates_expected);
806 tx_data->args[i].updates_alloc = tx_data->args[i].updates_expected;
807 }
808
809 /*
810 * Fail if a refname appears more than once in the transaction.
811 * This code is taken from the files backend and is a good candidate to
812 * be moved into the generic layer.
813 */
814 string_list_sort(&affected_refnames);
815 if (ref_update_reject_duplicates(&affected_refnames, err)) {
816 ret = TRANSACTION_GENERIC_ERROR;
817 goto done;
818 }
819
820 ret = read_ref_without_reload(stack_for(refs, "HEAD", NULL), "HEAD", &head_oid,
821 &head_referent, &head_type);
822 if (ret < 0)
823 goto done;
824
825 for (i = 0; i < transaction->nr; i++) {
826 struct ref_update *u = transaction->updates[i];
827 struct object_id current_oid = {0};
828 struct reftable_stack *stack;
829 const char *rewritten_ref;
830
831 stack = stack_for(refs, u->refname, &rewritten_ref);
832
833 /* Verify that the new object ID is valid. */
834 if ((u->flags & REF_HAVE_NEW) && !is_null_oid(&u->new_oid) &&
835 !(u->flags & REF_SKIP_OID_VERIFICATION) &&
836 !(u->flags & REF_LOG_ONLY)) {
837 struct object *o = parse_object(refs->base.repo, &u->new_oid);
838 if (!o) {
839 strbuf_addf(err,
840 _("trying to write ref '%s' with nonexistent object %s"),
841 u->refname, oid_to_hex(&u->new_oid));
842 ret = -1;
843 goto done;
844 }
845
846 if (o->type != OBJ_COMMIT && is_branch(u->refname)) {
847 strbuf_addf(err, _("trying to write non-commit object %s to branch '%s'"),
848 oid_to_hex(&u->new_oid), u->refname);
849 ret = -1;
850 goto done;
851 }
852 }
853
854 /*
855 * When we update the reference that HEAD points to we enqueue
856 * a second log-only update for HEAD so that its reflog is
857 * updated accordingly.
858 */
859 if (head_type == REF_ISSYMREF &&
860 !(u->flags & REF_LOG_ONLY) &&
861 !(u->flags & REF_UPDATE_VIA_HEAD) &&
862 !strcmp(rewritten_ref, head_referent.buf)) {
863 struct ref_update *new_update;
864
865 /*
866 * First make sure that HEAD is not already in the
867 * transaction. This check is O(lg N) in the transaction
868 * size, but it happens at most once per transaction.
869 */
870 if (string_list_has_string(&affected_refnames, "HEAD")) {
871 /* An entry already existed */
872 strbuf_addf(err,
873 _("multiple updates for 'HEAD' (including one "
874 "via its referent '%s') are not allowed"),
875 u->refname);
876 ret = TRANSACTION_NAME_CONFLICT;
877 goto done;
878 }
879
880 new_update = ref_transaction_add_update(
881 transaction, "HEAD",
882 u->flags | REF_LOG_ONLY | REF_NO_DEREF,
883 &u->new_oid, &u->old_oid, u->msg);
884 string_list_insert(&affected_refnames, new_update->refname);
885 }
886
887 ret = read_ref_without_reload(stack, rewritten_ref,
888 &current_oid, &referent, &u->type);
889 if (ret < 0)
890 goto done;
891 if (ret > 0 && (!(u->flags & REF_HAVE_OLD) || is_null_oid(&u->old_oid))) {
892 /*
893 * The reference does not exist, and we either have no
894 * old object ID or expect the reference to not exist.
895 * We can thus skip below safety checks as well as the
896 * symref splitting. But we do want to verify that
897 * there is no conflicting reference here so that we
898 * can output a proper error message instead of failing
899 * at a later point.
900 */
901 ret = refs_verify_refname_available(ref_store, u->refname,
902 &affected_refnames, NULL, err);
903 if (ret < 0)
904 goto done;
905
906 /*
907 * There is no need to write the reference deletion
908 * when the reference in question doesn't exist.
909 */
910 if (u->flags & REF_HAVE_NEW && !is_null_oid(&u->new_oid)) {
911 ret = queue_transaction_update(refs, tx_data, u,
912 &current_oid, err);
913 if (ret)
914 goto done;
915 }
916
917 continue;
918 }
919 if (ret > 0) {
920 /* The reference does not exist, but we expected it to. */
921 strbuf_addf(err, _("cannot lock ref '%s': "
922 "unable to resolve reference '%s'"),
923 original_update_refname(u), u->refname);
924 ret = -1;
925 goto done;
926 }
927
928 if (u->type & REF_ISSYMREF) {
929 /*
930 * The reftable stack is locked at this point already,
931 * so it is safe to call `refs_resolve_ref_unsafe()`
932 * here without causing races.
933 */
934 const char *resolved = refs_resolve_ref_unsafe(&refs->base, u->refname, 0,
935 &current_oid, NULL);
936
937 if (u->flags & REF_NO_DEREF) {
938 if (u->flags & REF_HAVE_OLD && !resolved) {
939 strbuf_addf(err, _("cannot lock ref '%s': "
940 "error reading reference"), u->refname);
941 ret = -1;
942 goto done;
943 }
944 } else {
945 struct ref_update *new_update;
946 int new_flags;
947
948 new_flags = u->flags;
949 if (!strcmp(rewritten_ref, "HEAD"))
950 new_flags |= REF_UPDATE_VIA_HEAD;
951
952 /*
953 * If we are updating a symref (eg. HEAD), we should also
954 * update the branch that the symref points to.
955 *
956 * This is generic functionality, and would be better
957 * done in refs.c, but the current implementation is
958 * intertwined with the locking in files-backend.c.
959 */
960 new_update = ref_transaction_add_update(
961 transaction, referent.buf, new_flags,
962 &u->new_oid, &u->old_oid, u->msg);
963 new_update->parent_update = u;
964
965 /*
966 * Change the symbolic ref update to log only. Also, it
967 * doesn't need to check its old OID value, as that will be
968 * done when new_update is processed.
969 */
970 u->flags |= REF_LOG_ONLY | REF_NO_DEREF;
971 u->flags &= ~REF_HAVE_OLD;
972
973 if (string_list_has_string(&affected_refnames, new_update->refname)) {
974 strbuf_addf(err,
975 _("multiple updates for '%s' (including one "
976 "via symref '%s') are not allowed"),
977 referent.buf, u->refname);
978 ret = TRANSACTION_NAME_CONFLICT;
979 goto done;
980 }
981 string_list_insert(&affected_refnames, new_update->refname);
982 }
983 }
984
985 /*
986 * Verify that the old object matches our expectations. Note
987 * that the error messages here do not make a lot of sense in
988 * the context of the reftable backend as we never lock
989 * individual refs. But the error messages match what the files
990 * backend returns, which keeps our tests happy.
991 */
992 if (u->flags & REF_HAVE_OLD && !oideq(&current_oid, &u->old_oid)) {
993 if (is_null_oid(&u->old_oid))
994 strbuf_addf(err, _("cannot lock ref '%s': "
995 "reference already exists"),
996 original_update_refname(u));
997 else if (is_null_oid(&current_oid))
998 strbuf_addf(err, _("cannot lock ref '%s': "
999 "reference is missing but expected %s"),
1000 original_update_refname(u),
1001 oid_to_hex(&u->old_oid));
1002 else
1003 strbuf_addf(err, _("cannot lock ref '%s': "
1004 "is at %s but expected %s"),
1005 original_update_refname(u),
1006 oid_to_hex(&current_oid),
1007 oid_to_hex(&u->old_oid));
1008 ret = -1;
1009 goto done;
1010 }
1011
1012 /*
1013 * If all of the following conditions are true:
1014 *
1015 * - We're not about to write a symref.
1016 * - We're not about to write a log-only entry.
1017 * - Old and new object ID are different.
1018 *
1019 * Then we're essentially doing a no-op update that can be
1020 * skipped. This is not only for the sake of efficiency, but
1021 * also skips writing unneeded reflog entries.
1022 */
1023 if ((u->type & REF_ISSYMREF) ||
1024 (u->flags & REF_LOG_ONLY) ||
1025 (u->flags & REF_HAVE_NEW && !oideq(&current_oid, &u->new_oid))) {
1026 ret = queue_transaction_update(refs, tx_data, u,
1027 &current_oid, err);
1028 if (ret)
1029 goto done;
1030 }
1031 }
1032
1033 transaction->backend_data = tx_data;
1034 transaction->state = REF_TRANSACTION_PREPARED;
1035
1036 done:
1037 assert(ret != REFTABLE_API_ERROR);
1038 if (ret < 0) {
1039 free_transaction_data(tx_data);
1040 transaction->state = REF_TRANSACTION_CLOSED;
1041 if (!err->len)
1042 strbuf_addf(err, _("reftable: transaction prepare: %s"),
1043 reftable_error_str(ret));
1044 }
1045 string_list_clear(&affected_refnames, 0);
1046 strbuf_release(&referent);
1047 strbuf_release(&head_referent);
1048
1049 return ret;
1050 }
1051
1052 static int reftable_be_transaction_abort(struct ref_store *ref_store,
1053 struct ref_transaction *transaction,
1054 struct strbuf *err)
1055 {
1056 struct reftable_transaction_data *tx_data = transaction->backend_data;
1057 free_transaction_data(tx_data);
1058 transaction->state = REF_TRANSACTION_CLOSED;
1059 return 0;
1060 }
1061
1062 static int transaction_update_cmp(const void *a, const void *b)
1063 {
1064 return strcmp(((struct reftable_transaction_update *)a)->update->refname,
1065 ((struct reftable_transaction_update *)b)->update->refname);
1066 }
1067
1068 static int write_transaction_table(struct reftable_writer *writer, void *cb_data)
1069 {
1070 struct write_transaction_table_arg *arg = cb_data;
1071 struct reftable_merged_table *mt =
1072 reftable_stack_merged_table(arg->stack);
1073 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1074 struct reftable_log_record *logs = NULL;
1075 size_t logs_nr = 0, logs_alloc = 0, i;
1076 int ret = 0;
1077
1078 QSORT(arg->updates, arg->updates_nr, transaction_update_cmp);
1079
1080 reftable_writer_set_limits(writer, ts, ts);
1081
1082 for (i = 0; i < arg->updates_nr; i++) {
1083 struct reftable_transaction_update *tx_update = &arg->updates[i];
1084 struct ref_update *u = tx_update->update;
1085
1086 /*
1087 * Write a reflog entry when updating a ref to point to
1088 * something new in either of the following cases:
1089 *
1090 * - The reference is about to be deleted. We always want to
1091 * delete the reflog in that case.
1092 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1093 * the reflog entry.
1094 * - `core.logAllRefUpdates` tells us to create the reflog for
1095 * the given ref.
1096 */
1097 if (u->flags & REF_HAVE_NEW && !(u->type & REF_ISSYMREF) && is_null_oid(&u->new_oid)) {
1098 struct reftable_log_record log = {0};
1099 struct reftable_iterator it = {0};
1100
1101 /*
1102 * When deleting refs we also delete all reflog entries
1103 * with them. While it is not strictly required to
1104 * delete reflogs together with their refs, this
1105 * matches the behaviour of the files backend.
1106 *
1107 * Unfortunately, we have no better way than to delete
1108 * all reflog entries one by one.
1109 */
1110 ret = reftable_merged_table_seek_log(mt, &it, u->refname);
1111 while (ret == 0) {
1112 struct reftable_log_record *tombstone;
1113
1114 ret = reftable_iterator_next_log(&it, &log);
1115 if (ret < 0)
1116 break;
1117 if (ret > 0 || strcmp(log.refname, u->refname)) {
1118 ret = 0;
1119 break;
1120 }
1121
1122 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1123 tombstone = &logs[logs_nr++];
1124 tombstone->refname = xstrdup(u->refname);
1125 tombstone->value_type = REFTABLE_LOG_DELETION;
1126 tombstone->update_index = log.update_index;
1127 }
1128
1129 reftable_log_record_release(&log);
1130 reftable_iterator_destroy(&it);
1131
1132 if (ret)
1133 goto done;
1134 } else if (u->flags & REF_HAVE_NEW &&
1135 (u->flags & REF_FORCE_CREATE_REFLOG ||
1136 should_write_log(&arg->refs->base, u->refname))) {
1137 struct reftable_log_record *log;
1138
1139 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1140 log = &logs[logs_nr++];
1141 memset(log, 0, sizeof(*log));
1142
1143 fill_reftable_log_record(log);
1144 log->update_index = ts;
1145 log->refname = xstrdup(u->refname);
1146 log->value.update.new_hash = u->new_oid.hash;
1147 log->value.update.old_hash = tx_update->current_oid.hash;
1148 log->value.update.message =
1149 xstrndup(u->msg, arg->refs->write_options.block_size / 2);
1150 }
1151
1152 if (u->flags & REF_LOG_ONLY)
1153 continue;
1154
1155 if (u->flags & REF_HAVE_NEW && is_null_oid(&u->new_oid)) {
1156 struct reftable_ref_record ref = {
1157 .refname = (char *)u->refname,
1158 .update_index = ts,
1159 .value_type = REFTABLE_REF_DELETION,
1160 };
1161
1162 ret = reftable_writer_add_ref(writer, &ref);
1163 if (ret < 0)
1164 goto done;
1165 } else if (u->flags & REF_HAVE_NEW) {
1166 struct reftable_ref_record ref = {0};
1167 struct object_id peeled;
1168 int peel_error;
1169
1170 ref.refname = (char *)u->refname;
1171 ref.update_index = ts;
1172
1173 peel_error = peel_object(&u->new_oid, &peeled);
1174 if (!peel_error) {
1175 ref.value_type = REFTABLE_REF_VAL2;
1176 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
1177 memcpy(ref.value.val2.value, u->new_oid.hash, GIT_MAX_RAWSZ);
1178 } else if (!is_null_oid(&u->new_oid)) {
1179 ref.value_type = REFTABLE_REF_VAL1;
1180 memcpy(ref.value.val1, u->new_oid.hash, GIT_MAX_RAWSZ);
1181 }
1182
1183 ret = reftable_writer_add_ref(writer, &ref);
1184 if (ret < 0)
1185 goto done;
1186 }
1187 }
1188
1189 /*
1190 * Logs are written at the end so that we do not have intermixed ref
1191 * and log blocks.
1192 */
1193 if (logs) {
1194 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1195 if (ret < 0)
1196 goto done;
1197 }
1198
1199 done:
1200 assert(ret != REFTABLE_API_ERROR);
1201 for (i = 0; i < logs_nr; i++)
1202 clear_reftable_log_record(&logs[i]);
1203 free(logs);
1204 return ret;
1205 }
1206
1207 static int reftable_be_transaction_finish(struct ref_store *ref_store,
1208 struct ref_transaction *transaction,
1209 struct strbuf *err)
1210 {
1211 struct reftable_transaction_data *tx_data = transaction->backend_data;
1212 int ret = 0;
1213
1214 for (size_t i = 0; i < tx_data->args_nr; i++) {
1215 ret = reftable_addition_add(tx_data->args[i].addition,
1216 write_transaction_table, &tx_data->args[i]);
1217 if (ret < 0)
1218 goto done;
1219
1220 ret = reftable_addition_commit(tx_data->args[i].addition);
1221 if (ret < 0)
1222 goto done;
1223 }
1224
1225 done:
1226 assert(ret != REFTABLE_API_ERROR);
1227 free_transaction_data(tx_data);
1228 transaction->state = REF_TRANSACTION_CLOSED;
1229
1230 if (ret) {
1231 strbuf_addf(err, _("reftable: transaction failure: %s"),
1232 reftable_error_str(ret));
1233 return -1;
1234 }
1235 return ret;
1236 }
1237
1238 static int reftable_be_initial_transaction_commit(struct ref_store *ref_store UNUSED,
1239 struct ref_transaction *transaction,
1240 struct strbuf *err)
1241 {
1242 return ref_transaction_commit(transaction, err);
1243 }
1244
1245 static int reftable_be_pack_refs(struct ref_store *ref_store,
1246 struct pack_refs_opts *opts)
1247 {
1248 struct reftable_ref_store *refs =
1249 reftable_be_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB, "pack_refs");
1250 struct reftable_stack *stack;
1251 int ret;
1252
1253 if (refs->err)
1254 return refs->err;
1255
1256 stack = refs->worktree_stack;
1257 if (!stack)
1258 stack = refs->main_stack;
1259
1260 ret = reftable_stack_compact_all(stack, NULL);
1261 if (ret)
1262 goto out;
1263 ret = reftable_stack_clean(stack);
1264 if (ret)
1265 goto out;
1266
1267 out:
1268 return ret;
1269 }
1270
1271 struct write_create_symref_arg {
1272 struct reftable_ref_store *refs;
1273 struct reftable_stack *stack;
1274 const char *refname;
1275 const char *target;
1276 const char *logmsg;
1277 };
1278
1279 static int write_create_symref_table(struct reftable_writer *writer, void *cb_data)
1280 {
1281 struct write_create_symref_arg *create = cb_data;
1282 uint64_t ts = reftable_stack_next_update_index(create->stack);
1283 struct reftable_ref_record ref = {
1284 .refname = (char *)create->refname,
1285 .value_type = REFTABLE_REF_SYMREF,
1286 .value.symref = (char *)create->target,
1287 .update_index = ts,
1288 };
1289 struct reftable_log_record log = {0};
1290 struct object_id new_oid;
1291 struct object_id old_oid;
1292 int ret;
1293
1294 reftable_writer_set_limits(writer, ts, ts);
1295
1296 ret = reftable_writer_add_ref(writer, &ref);
1297 if (ret)
1298 return ret;
1299
1300 /*
1301 * Note that it is important to try and resolve the reference before we
1302 * write the log entry. This is because `should_write_log()` will munge
1303 * `core.logAllRefUpdates`, which is undesirable when we create a new
1304 * repository because it would be written into the config. As HEAD will
1305 * not resolve for new repositories this ordering will ensure that this
1306 * never happens.
1307 */
1308 if (!create->logmsg ||
1309 !refs_resolve_ref_unsafe(&create->refs->base, create->target,
1310 RESOLVE_REF_READING, &new_oid, NULL) ||
1311 !should_write_log(&create->refs->base, create->refname))
1312 return 0;
1313
1314 fill_reftable_log_record(&log);
1315 log.refname = xstrdup(create->refname);
1316 log.update_index = ts;
1317 log.value.update.message = xstrndup(create->logmsg,
1318 create->refs->write_options.block_size / 2);
1319 log.value.update.new_hash = new_oid.hash;
1320 if (refs_resolve_ref_unsafe(&create->refs->base, create->refname,
1321 RESOLVE_REF_READING, &old_oid, NULL))
1322 log.value.update.old_hash = old_oid.hash;
1323
1324 ret = reftable_writer_add_log(writer, &log);
1325 clear_reftable_log_record(&log);
1326 return ret;
1327 }
1328
1329 static int reftable_be_create_symref(struct ref_store *ref_store,
1330 const char *refname,
1331 const char *target,
1332 const char *logmsg)
1333 {
1334 struct reftable_ref_store *refs =
1335 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_symref");
1336 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1337 struct write_create_symref_arg arg = {
1338 .refs = refs,
1339 .stack = stack,
1340 .refname = refname,
1341 .target = target,
1342 .logmsg = logmsg,
1343 };
1344 int ret;
1345
1346 ret = refs->err;
1347 if (ret < 0)
1348 goto done;
1349
1350 ret = reftable_stack_reload(stack);
1351 if (ret)
1352 goto done;
1353
1354 ret = reftable_stack_add(stack, &write_create_symref_table, &arg);
1355
1356 done:
1357 assert(ret != REFTABLE_API_ERROR);
1358 if (ret)
1359 error("unable to write symref for %s: %s", refname,
1360 reftable_error_str(ret));
1361 return ret;
1362 }
1363
1364 struct write_copy_arg {
1365 struct reftable_ref_store *refs;
1366 struct reftable_stack *stack;
1367 const char *oldname;
1368 const char *newname;
1369 const char *logmsg;
1370 int delete_old;
1371 };
1372
1373 static int write_copy_table(struct reftable_writer *writer, void *cb_data)
1374 {
1375 struct write_copy_arg *arg = cb_data;
1376 uint64_t deletion_ts, creation_ts;
1377 struct reftable_merged_table *mt = reftable_stack_merged_table(arg->stack);
1378 struct reftable_ref_record old_ref = {0}, refs[2] = {0};
1379 struct reftable_log_record old_log = {0}, *logs = NULL;
1380 struct reftable_iterator it = {0};
1381 struct string_list skip = STRING_LIST_INIT_NODUP;
1382 struct strbuf errbuf = STRBUF_INIT;
1383 size_t logs_nr = 0, logs_alloc = 0, i;
1384 int ret;
1385
1386 if (reftable_stack_read_ref(arg->stack, arg->oldname, &old_ref)) {
1387 ret = error(_("refname %s not found"), arg->oldname);
1388 goto done;
1389 }
1390 if (old_ref.value_type == REFTABLE_REF_SYMREF) {
1391 ret = error(_("refname %s is a symbolic ref, copying it is not supported"),
1392 arg->oldname);
1393 goto done;
1394 }
1395
1396 /*
1397 * There's nothing to do in case the old and new name are the same, so
1398 * we exit early in that case.
1399 */
1400 if (!strcmp(arg->oldname, arg->newname)) {
1401 ret = 0;
1402 goto done;
1403 }
1404
1405 /*
1406 * Verify that the new refname is available.
1407 */
1408 string_list_insert(&skip, arg->oldname);
1409 ret = refs_verify_refname_available(&arg->refs->base, arg->newname,
1410 NULL, &skip, &errbuf);
1411 if (ret < 0) {
1412 error("%s", errbuf.buf);
1413 goto done;
1414 }
1415
1416 /*
1417 * When deleting the old reference we have to use two update indices:
1418 * once to delete the old ref and its reflog, and once to create the
1419 * new ref and its reflog. They need to be staged with two separate
1420 * indices because the new reflog needs to encode both the deletion of
1421 * the old branch and the creation of the new branch, and we cannot do
1422 * two changes to a reflog in a single update.
1423 */
1424 deletion_ts = creation_ts = reftable_stack_next_update_index(arg->stack);
1425 if (arg->delete_old)
1426 creation_ts++;
1427 reftable_writer_set_limits(writer, deletion_ts, creation_ts);
1428
1429 /*
1430 * Add the new reference. If this is a rename then we also delete the
1431 * old reference.
1432 */
1433 refs[0] = old_ref;
1434 refs[0].refname = (char *)arg->newname;
1435 refs[0].update_index = creation_ts;
1436 if (arg->delete_old) {
1437 refs[1].refname = (char *)arg->oldname;
1438 refs[1].value_type = REFTABLE_REF_DELETION;
1439 refs[1].update_index = deletion_ts;
1440 }
1441 ret = reftable_writer_add_refs(writer, refs, arg->delete_old ? 2 : 1);
1442 if (ret < 0)
1443 goto done;
1444
1445 /*
1446 * When deleting the old branch we need to create a reflog entry on the
1447 * new branch name that indicates that the old branch has been deleted
1448 * and then recreated. This is a tad weird, but matches what the files
1449 * backend does.
1450 */
1451 if (arg->delete_old) {
1452 struct strbuf head_referent = STRBUF_INIT;
1453 struct object_id head_oid;
1454 int append_head_reflog;
1455 unsigned head_type = 0;
1456
1457 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1458 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1459 fill_reftable_log_record(&logs[logs_nr]);
1460 logs[logs_nr].refname = (char *)arg->newname;
1461 logs[logs_nr].update_index = deletion_ts;
1462 logs[logs_nr].value.update.message =
1463 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1464 logs[logs_nr].value.update.old_hash = old_ref.value.val1;
1465 logs_nr++;
1466
1467 ret = read_ref_without_reload(arg->stack, "HEAD", &head_oid, &head_referent, &head_type);
1468 if (ret < 0)
1469 goto done;
1470 append_head_reflog = (head_type & REF_ISSYMREF) && !strcmp(head_referent.buf, arg->oldname);
1471 strbuf_release(&head_referent);
1472
1473 /*
1474 * The files backend uses `refs_delete_ref()` to delete the old
1475 * branch name, which will append a reflog entry for HEAD in
1476 * case it points to the old branch.
1477 */
1478 if (append_head_reflog) {
1479 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1480 logs[logs_nr] = logs[logs_nr - 1];
1481 logs[logs_nr].refname = "HEAD";
1482 logs_nr++;
1483 }
1484 }
1485
1486 /*
1487 * Create the reflog entry for the newly created branch.
1488 */
1489 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1490 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1491 fill_reftable_log_record(&logs[logs_nr]);
1492 logs[logs_nr].refname = (char *)arg->newname;
1493 logs[logs_nr].update_index = creation_ts;
1494 logs[logs_nr].value.update.message =
1495 xstrndup(arg->logmsg, arg->refs->write_options.block_size / 2);
1496 logs[logs_nr].value.update.new_hash = old_ref.value.val1;
1497 logs_nr++;
1498
1499 /*
1500 * In addition to writing the reflog entry for the new branch, we also
1501 * copy over all log entries from the old reflog. Last but not least,
1502 * when renaming we also have to delete all the old reflog entries.
1503 */
1504 ret = reftable_merged_table_seek_log(mt, &it, arg->oldname);
1505 if (ret < 0)
1506 goto done;
1507
1508 while (1) {
1509 ret = reftable_iterator_next_log(&it, &old_log);
1510 if (ret < 0)
1511 goto done;
1512 if (ret > 0 || strcmp(old_log.refname, arg->oldname)) {
1513 ret = 0;
1514 break;
1515 }
1516
1517 free(old_log.refname);
1518
1519 /*
1520 * Copy over the old reflog entry with the new refname.
1521 */
1522 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1523 logs[logs_nr] = old_log;
1524 logs[logs_nr].refname = (char *)arg->newname;
1525 logs_nr++;
1526
1527 /*
1528 * Delete the old reflog entry in case we are renaming.
1529 */
1530 if (arg->delete_old) {
1531 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1532 memset(&logs[logs_nr], 0, sizeof(logs[logs_nr]));
1533 logs[logs_nr].refname = (char *)arg->oldname;
1534 logs[logs_nr].value_type = REFTABLE_LOG_DELETION;
1535 logs[logs_nr].update_index = old_log.update_index;
1536 logs_nr++;
1537 }
1538
1539 /*
1540 * Transfer ownership of the log record we're iterating over to
1541 * the array of log records. Otherwise, the pointers would get
1542 * free'd or reallocated by the iterator.
1543 */
1544 memset(&old_log, 0, sizeof(old_log));
1545 }
1546
1547 ret = reftable_writer_add_logs(writer, logs, logs_nr);
1548 if (ret < 0)
1549 goto done;
1550
1551 done:
1552 assert(ret != REFTABLE_API_ERROR);
1553 reftable_iterator_destroy(&it);
1554 string_list_clear(&skip, 0);
1555 strbuf_release(&errbuf);
1556 for (i = 0; i < logs_nr; i++) {
1557 if (!strcmp(logs[i].refname, "HEAD"))
1558 continue;
1559 if (logs[i].value.update.old_hash == old_ref.value.val1)
1560 logs[i].value.update.old_hash = NULL;
1561 if (logs[i].value.update.new_hash == old_ref.value.val1)
1562 logs[i].value.update.new_hash = NULL;
1563 logs[i].refname = NULL;
1564 reftable_log_record_release(&logs[i]);
1565 }
1566 free(logs);
1567 reftable_ref_record_release(&old_ref);
1568 reftable_log_record_release(&old_log);
1569 return ret;
1570 }
1571
1572 static int reftable_be_rename_ref(struct ref_store *ref_store,
1573 const char *oldrefname,
1574 const char *newrefname,
1575 const char *logmsg)
1576 {
1577 struct reftable_ref_store *refs =
1578 reftable_be_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
1579 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1580 struct write_copy_arg arg = {
1581 .refs = refs,
1582 .stack = stack,
1583 .oldname = oldrefname,
1584 .newname = newrefname,
1585 .logmsg = logmsg,
1586 .delete_old = 1,
1587 };
1588 int ret;
1589
1590 ret = refs->err;
1591 if (ret < 0)
1592 goto done;
1593
1594 ret = reftable_stack_reload(stack);
1595 if (ret)
1596 goto done;
1597 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1598
1599 done:
1600 assert(ret != REFTABLE_API_ERROR);
1601 return ret;
1602 }
1603
1604 static int reftable_be_copy_ref(struct ref_store *ref_store,
1605 const char *oldrefname,
1606 const char *newrefname,
1607 const char *logmsg)
1608 {
1609 struct reftable_ref_store *refs =
1610 reftable_be_downcast(ref_store, REF_STORE_WRITE, "copy_ref");
1611 struct reftable_stack *stack = stack_for(refs, newrefname, &newrefname);
1612 struct write_copy_arg arg = {
1613 .refs = refs,
1614 .stack = stack,
1615 .oldname = oldrefname,
1616 .newname = newrefname,
1617 .logmsg = logmsg,
1618 };
1619 int ret;
1620
1621 ret = refs->err;
1622 if (ret < 0)
1623 goto done;
1624
1625 ret = reftable_stack_reload(stack);
1626 if (ret)
1627 goto done;
1628 ret = reftable_stack_add(stack, &write_copy_table, &arg);
1629
1630 done:
1631 assert(ret != REFTABLE_API_ERROR);
1632 return ret;
1633 }
1634
1635 struct reftable_reflog_iterator {
1636 struct ref_iterator base;
1637 struct reftable_ref_store *refs;
1638 struct reftable_iterator iter;
1639 struct reftable_log_record log;
1640 struct object_id oid;
1641 char *last_name;
1642 int err;
1643 };
1644
1645 static int reftable_reflog_iterator_advance(struct ref_iterator *ref_iterator)
1646 {
1647 struct reftable_reflog_iterator *iter =
1648 (struct reftable_reflog_iterator *)ref_iterator;
1649
1650 while (!iter->err) {
1651 int flags;
1652
1653 iter->err = reftable_iterator_next_log(&iter->iter, &iter->log);
1654 if (iter->err)
1655 break;
1656
1657 /*
1658 * We want the refnames that we have reflogs for, so we skip if
1659 * we've already produced this name. This could be faster by
1660 * seeking directly to reflog@update_index==0.
1661 */
1662 if (iter->last_name && !strcmp(iter->log.refname, iter->last_name))
1663 continue;
1664
1665 if (!refs_resolve_ref_unsafe(&iter->refs->base, iter->log.refname,
1666 0, &iter->oid, &flags)) {
1667 error(_("bad ref for %s"), iter->log.refname);
1668 continue;
1669 }
1670
1671 free(iter->last_name);
1672 iter->last_name = xstrdup(iter->log.refname);
1673 iter->base.refname = iter->log.refname;
1674 iter->base.oid = &iter->oid;
1675 iter->base.flags = flags;
1676
1677 break;
1678 }
1679
1680 if (iter->err > 0) {
1681 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
1682 return ITER_ERROR;
1683 return ITER_DONE;
1684 }
1685
1686 if (iter->err < 0) {
1687 ref_iterator_abort(ref_iterator);
1688 return ITER_ERROR;
1689 }
1690
1691 return ITER_OK;
1692 }
1693
1694 static int reftable_reflog_iterator_peel(struct ref_iterator *ref_iterator,
1695 struct object_id *peeled)
1696 {
1697 BUG("reftable reflog iterator cannot be peeled");
1698 return -1;
1699 }
1700
1701 static int reftable_reflog_iterator_abort(struct ref_iterator *ref_iterator)
1702 {
1703 struct reftable_reflog_iterator *iter =
1704 (struct reftable_reflog_iterator *)ref_iterator;
1705 reftable_log_record_release(&iter->log);
1706 reftable_iterator_destroy(&iter->iter);
1707 free(iter->last_name);
1708 free(iter);
1709 return ITER_DONE;
1710 }
1711
1712 static struct ref_iterator_vtable reftable_reflog_iterator_vtable = {
1713 .advance = reftable_reflog_iterator_advance,
1714 .peel = reftable_reflog_iterator_peel,
1715 .abort = reftable_reflog_iterator_abort
1716 };
1717
1718 static struct reftable_reflog_iterator *reflog_iterator_for_stack(struct reftable_ref_store *refs,
1719 struct reftable_stack *stack)
1720 {
1721 struct reftable_merged_table *merged_table;
1722 struct reftable_reflog_iterator *iter;
1723 int ret;
1724
1725 iter = xcalloc(1, sizeof(*iter));
1726 base_ref_iterator_init(&iter->base, &reftable_reflog_iterator_vtable, 1);
1727 iter->refs = refs;
1728 iter->base.oid = &iter->oid;
1729
1730 ret = refs->err;
1731 if (ret)
1732 goto done;
1733
1734 ret = reftable_stack_reload(refs->main_stack);
1735 if (ret < 0)
1736 goto done;
1737
1738 merged_table = reftable_stack_merged_table(stack);
1739
1740 ret = reftable_merged_table_seek_log(merged_table, &iter->iter, "");
1741 if (ret < 0)
1742 goto done;
1743
1744 done:
1745 iter->err = ret;
1746 return iter;
1747 }
1748
1749 static struct ref_iterator *reftable_be_reflog_iterator_begin(struct ref_store *ref_store)
1750 {
1751 struct reftable_ref_store *refs =
1752 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_iterator_begin");
1753 struct reftable_reflog_iterator *main_iter, *worktree_iter;
1754
1755 main_iter = reflog_iterator_for_stack(refs, refs->main_stack);
1756 if (!refs->worktree_stack)
1757 return &main_iter->base;
1758
1759 worktree_iter = reflog_iterator_for_stack(refs, refs->worktree_stack);
1760
1761 return merge_ref_iterator_begin(1, &worktree_iter->base, &main_iter->base,
1762 iterator_select, NULL);
1763 }
1764
1765 static int yield_log_record(struct reftable_log_record *log,
1766 each_reflog_ent_fn fn,
1767 void *cb_data)
1768 {
1769 struct object_id old_oid, new_oid;
1770 const char *full_committer;
1771
1772 oidread(&old_oid, log->value.update.old_hash);
1773 oidread(&new_oid, log->value.update.new_hash);
1774
1775 /*
1776 * When both the old object ID and the new object ID are null
1777 * then this is the reflog existence marker. The caller must
1778 * not be aware of it.
1779 */
1780 if (is_null_oid(&old_oid) && is_null_oid(&new_oid))
1781 return 0;
1782
1783 full_committer = fmt_ident(log->value.update.name, log->value.update.email,
1784 WANT_COMMITTER_IDENT, NULL, IDENT_NO_DATE);
1785 return fn(&old_oid, &new_oid, full_committer,
1786 log->value.update.time, log->value.update.tz_offset,
1787 log->value.update.message, cb_data);
1788 }
1789
1790 static int reftable_be_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1791 const char *refname,
1792 each_reflog_ent_fn fn,
1793 void *cb_data)
1794 {
1795 struct reftable_ref_store *refs =
1796 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent_reverse");
1797 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1798 struct reftable_merged_table *mt = NULL;
1799 struct reftable_log_record log = {0};
1800 struct reftable_iterator it = {0};
1801 int ret;
1802
1803 if (refs->err < 0)
1804 return refs->err;
1805
1806 mt = reftable_stack_merged_table(stack);
1807 ret = reftable_merged_table_seek_log(mt, &it, refname);
1808 while (!ret) {
1809 ret = reftable_iterator_next_log(&it, &log);
1810 if (ret < 0)
1811 break;
1812 if (ret > 0 || strcmp(log.refname, refname)) {
1813 ret = 0;
1814 break;
1815 }
1816
1817 ret = yield_log_record(&log, fn, cb_data);
1818 if (ret)
1819 break;
1820 }
1821
1822 reftable_log_record_release(&log);
1823 reftable_iterator_destroy(&it);
1824 return ret;
1825 }
1826
1827 static int reftable_be_for_each_reflog_ent(struct ref_store *ref_store,
1828 const char *refname,
1829 each_reflog_ent_fn fn,
1830 void *cb_data)
1831 {
1832 struct reftable_ref_store *refs =
1833 reftable_be_downcast(ref_store, REF_STORE_READ, "for_each_reflog_ent");
1834 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1835 struct reftable_merged_table *mt = NULL;
1836 struct reftable_log_record *logs = NULL;
1837 struct reftable_iterator it = {0};
1838 size_t logs_alloc = 0, logs_nr = 0, i;
1839 int ret;
1840
1841 if (refs->err < 0)
1842 return refs->err;
1843
1844 mt = reftable_stack_merged_table(stack);
1845 ret = reftable_merged_table_seek_log(mt, &it, refname);
1846 while (!ret) {
1847 struct reftable_log_record log = {0};
1848
1849 ret = reftable_iterator_next_log(&it, &log);
1850 if (ret < 0)
1851 goto done;
1852 if (ret > 0 || strcmp(log.refname, refname)) {
1853 reftable_log_record_release(&log);
1854 ret = 0;
1855 break;
1856 }
1857
1858 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
1859 logs[logs_nr++] = log;
1860 }
1861
1862 for (i = logs_nr; i--;) {
1863 ret = yield_log_record(&logs[i], fn, cb_data);
1864 if (ret)
1865 goto done;
1866 }
1867
1868 done:
1869 reftable_iterator_destroy(&it);
1870 for (i = 0; i < logs_nr; i++)
1871 reftable_log_record_release(&logs[i]);
1872 free(logs);
1873 return ret;
1874 }
1875
1876 static int reftable_be_reflog_exists(struct ref_store *ref_store,
1877 const char *refname)
1878 {
1879 struct reftable_ref_store *refs =
1880 reftable_be_downcast(ref_store, REF_STORE_READ, "reflog_exists");
1881 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1882 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
1883 struct reftable_log_record log = {0};
1884 struct reftable_iterator it = {0};
1885 int ret;
1886
1887 ret = refs->err;
1888 if (ret < 0)
1889 goto done;
1890
1891 ret = reftable_stack_reload(stack);
1892 if (ret < 0)
1893 goto done;
1894
1895 ret = reftable_merged_table_seek_log(mt, &it, refname);
1896 if (ret < 0)
1897 goto done;
1898
1899 /*
1900 * Check whether we get at least one log record for the given ref name.
1901 * If so, the reflog exists, otherwise it doesn't.
1902 */
1903 ret = reftable_iterator_next_log(&it, &log);
1904 if (ret < 0)
1905 goto done;
1906 if (ret > 0) {
1907 ret = 0;
1908 goto done;
1909 }
1910
1911 ret = strcmp(log.refname, refname) == 0;
1912
1913 done:
1914 reftable_iterator_destroy(&it);
1915 reftable_log_record_release(&log);
1916 if (ret < 0)
1917 ret = 0;
1918 return ret;
1919 }
1920
1921 struct write_reflog_existence_arg {
1922 struct reftable_ref_store *refs;
1923 const char *refname;
1924 struct reftable_stack *stack;
1925 };
1926
1927 static int write_reflog_existence_table(struct reftable_writer *writer,
1928 void *cb_data)
1929 {
1930 struct write_reflog_existence_arg *arg = cb_data;
1931 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1932 struct reftable_log_record log = {0};
1933 int ret;
1934
1935 ret = reftable_stack_read_log(arg->stack, arg->refname, &log);
1936 if (ret <= 0)
1937 goto done;
1938
1939 reftable_writer_set_limits(writer, ts, ts);
1940
1941 /*
1942 * The existence entry has both old and new object ID set to the the
1943 * null object ID. Our iterators are aware of this and will not present
1944 * them to their callers.
1945 */
1946 log.refname = xstrdup(arg->refname);
1947 log.update_index = ts;
1948 log.value_type = REFTABLE_LOG_UPDATE;
1949 ret = reftable_writer_add_log(writer, &log);
1950
1951 done:
1952 assert(ret != REFTABLE_API_ERROR);
1953 reftable_log_record_release(&log);
1954 return ret;
1955 }
1956
1957 static int reftable_be_create_reflog(struct ref_store *ref_store,
1958 const char *refname,
1959 struct strbuf *errmsg)
1960 {
1961 struct reftable_ref_store *refs =
1962 reftable_be_downcast(ref_store, REF_STORE_WRITE, "create_reflog");
1963 struct reftable_stack *stack = stack_for(refs, refname, &refname);
1964 struct write_reflog_existence_arg arg = {
1965 .refs = refs,
1966 .stack = stack,
1967 .refname = refname,
1968 };
1969 int ret;
1970
1971 ret = refs->err;
1972 if (ret < 0)
1973 goto done;
1974
1975 ret = reftable_stack_reload(stack);
1976 if (ret)
1977 goto done;
1978
1979 ret = reftable_stack_add(stack, &write_reflog_existence_table, &arg);
1980
1981 done:
1982 return ret;
1983 }
1984
1985 struct write_reflog_delete_arg {
1986 struct reftable_stack *stack;
1987 const char *refname;
1988 };
1989
1990 static int write_reflog_delete_table(struct reftable_writer *writer, void *cb_data)
1991 {
1992 struct write_reflog_delete_arg *arg = cb_data;
1993 struct reftable_merged_table *mt =
1994 reftable_stack_merged_table(arg->stack);
1995 struct reftable_log_record log = {0}, tombstone = {0};
1996 struct reftable_iterator it = {0};
1997 uint64_t ts = reftable_stack_next_update_index(arg->stack);
1998 int ret;
1999
2000 reftable_writer_set_limits(writer, ts, ts);
2001
2002 /*
2003 * In order to delete a table we need to delete all reflog entries one
2004 * by one. This is inefficient, but the reftable format does not have a
2005 * better marker right now.
2006 */
2007 ret = reftable_merged_table_seek_log(mt, &it, arg->refname);
2008 while (ret == 0) {
2009 ret = reftable_iterator_next_log(&it, &log);
2010 if (ret < 0)
2011 break;
2012 if (ret > 0 || strcmp(log.refname, arg->refname)) {
2013 ret = 0;
2014 break;
2015 }
2016
2017 tombstone.refname = (char *)arg->refname;
2018 tombstone.value_type = REFTABLE_LOG_DELETION;
2019 tombstone.update_index = log.update_index;
2020
2021 ret = reftable_writer_add_log(writer, &tombstone);
2022 }
2023
2024 reftable_log_record_release(&log);
2025 reftable_iterator_destroy(&it);
2026 return ret;
2027 }
2028
2029 static int reftable_be_delete_reflog(struct ref_store *ref_store,
2030 const char *refname)
2031 {
2032 struct reftable_ref_store *refs =
2033 reftable_be_downcast(ref_store, REF_STORE_WRITE, "delete_reflog");
2034 struct reftable_stack *stack = stack_for(refs, refname, &refname);
2035 struct write_reflog_delete_arg arg = {
2036 .stack = stack,
2037 .refname = refname,
2038 };
2039 int ret;
2040
2041 ret = reftable_stack_reload(stack);
2042 if (ret)
2043 return ret;
2044 ret = reftable_stack_add(stack, &write_reflog_delete_table, &arg);
2045
2046 assert(ret != REFTABLE_API_ERROR);
2047 return ret;
2048 }
2049
2050 struct reflog_expiry_arg {
2051 struct reftable_stack *stack;
2052 struct reftable_log_record *records;
2053 struct object_id update_oid;
2054 const char *refname;
2055 size_t len;
2056 };
2057
2058 static int write_reflog_expiry_table(struct reftable_writer *writer, void *cb_data)
2059 {
2060 struct reflog_expiry_arg *arg = cb_data;
2061 uint64_t ts = reftable_stack_next_update_index(arg->stack);
2062 uint64_t live_records = 0;
2063 size_t i;
2064 int ret;
2065
2066 for (i = 0; i < arg->len; i++)
2067 if (arg->records[i].value_type == REFTABLE_LOG_UPDATE)
2068 live_records++;
2069
2070 reftable_writer_set_limits(writer, ts, ts);
2071
2072 if (!is_null_oid(&arg->update_oid)) {
2073 struct reftable_ref_record ref = {0};
2074 struct object_id peeled;
2075
2076 ref.refname = (char *)arg->refname;
2077 ref.update_index = ts;
2078
2079 if (!peel_object(&arg->update_oid, &peeled)) {
2080 ref.value_type = REFTABLE_REF_VAL2;
2081 memcpy(ref.value.val2.target_value, peeled.hash, GIT_MAX_RAWSZ);
2082 memcpy(ref.value.val2.value, arg->update_oid.hash, GIT_MAX_RAWSZ);
2083 } else {
2084 ref.value_type = REFTABLE_REF_VAL1;
2085 memcpy(ref.value.val1, arg->update_oid.hash, GIT_MAX_RAWSZ);
2086 }
2087
2088 ret = reftable_writer_add_ref(writer, &ref);
2089 if (ret < 0)
2090 return ret;
2091 }
2092
2093 /*
2094 * When there are no more entries left in the reflog we empty it
2095 * completely, but write a placeholder reflog entry that indicates that
2096 * the reflog still exists.
2097 */
2098 if (!live_records) {
2099 struct reftable_log_record log = {
2100 .refname = (char *)arg->refname,
2101 .value_type = REFTABLE_LOG_UPDATE,
2102 .update_index = ts,
2103 };
2104
2105 ret = reftable_writer_add_log(writer, &log);
2106 if (ret)
2107 return ret;
2108 }
2109
2110 for (i = 0; i < arg->len; i++) {
2111 ret = reftable_writer_add_log(writer, &arg->records[i]);
2112 if (ret)
2113 return ret;
2114 }
2115
2116 return 0;
2117 }
2118
2119 static int reftable_be_reflog_expire(struct ref_store *ref_store,
2120 const char *refname,
2121 unsigned int flags,
2122 reflog_expiry_prepare_fn prepare_fn,
2123 reflog_expiry_should_prune_fn should_prune_fn,
2124 reflog_expiry_cleanup_fn cleanup_fn,
2125 void *policy_cb_data)
2126 {
2127 /*
2128 * For log expiry, we write tombstones for every single reflog entry
2129 * that is to be expired. This means that the entries are still
2130 * retrievable by delving into the stack, and expiring entries
2131 * paradoxically takes extra memory. This memory is only reclaimed when
2132 * compacting the reftable stack.
2133 *
2134 * It would be better if the refs backend supported an API that sets a
2135 * criterion for all refs, passing the criterion to pack_refs().
2136 *
2137 * On the plus side, because we do the expiration per ref, we can easily
2138 * insert the reflog existence dummies.
2139 */
2140 struct reftable_ref_store *refs =
2141 reftable_be_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
2142 struct reftable_stack *stack = stack_for(refs, refname, &refname);
2143 struct reftable_merged_table *mt = reftable_stack_merged_table(stack);
2144 struct reftable_log_record *logs = NULL;
2145 struct reftable_log_record *rewritten = NULL;
2146 struct reftable_ref_record ref_record = {0};
2147 struct reftable_iterator it = {0};
2148 struct reftable_addition *add = NULL;
2149 struct reflog_expiry_arg arg = {0};
2150 struct object_id oid = {0};
2151 uint8_t *last_hash = NULL;
2152 size_t logs_nr = 0, logs_alloc = 0, i;
2153 int ret;
2154
2155 if (refs->err < 0)
2156 return refs->err;
2157
2158 ret = reftable_stack_reload(stack);
2159 if (ret < 0)
2160 goto done;
2161
2162 ret = reftable_merged_table_seek_log(mt, &it, refname);
2163 if (ret < 0)
2164 goto done;
2165
2166 ret = reftable_stack_new_addition(&add, stack);
2167 if (ret < 0)
2168 goto done;
2169
2170 ret = reftable_stack_read_ref(stack, refname, &ref_record);
2171 if (ret < 0)
2172 goto done;
2173 if (reftable_ref_record_val1(&ref_record))
2174 oidread(&oid, reftable_ref_record_val1(&ref_record));
2175 prepare_fn(refname, &oid, policy_cb_data);
2176
2177 while (1) {
2178 struct reftable_log_record log = {0};
2179 struct object_id old_oid, new_oid;
2180
2181 ret = reftable_iterator_next_log(&it, &log);
2182 if (ret < 0)
2183 goto done;
2184 if (ret > 0 || strcmp(log.refname, refname)) {
2185 reftable_log_record_release(&log);
2186 break;
2187 }
2188
2189 oidread(&old_oid, log.value.update.old_hash);
2190 oidread(&new_oid, log.value.update.new_hash);
2191
2192 /*
2193 * Skip over the reflog existence marker. We will add it back
2194 * in when there are no live reflog records.
2195 */
2196 if (is_null_oid(&old_oid) && is_null_oid(&new_oid)) {
2197 reftable_log_record_release(&log);
2198 continue;
2199 }
2200
2201 ALLOC_GROW(logs, logs_nr + 1, logs_alloc);
2202 logs[logs_nr++] = log;
2203 }
2204
2205 /*
2206 * We need to rewrite all reflog entries according to the pruning
2207 * callback function:
2208 *
2209 * - If a reflog entry shall be pruned we mark the record for
2210 * deletion.
2211 *
2212 * - Otherwise we may have to rewrite the chain of reflog entries so
2213 * that gaps created by just-deleted records get backfilled.
2214 */
2215 CALLOC_ARRAY(rewritten, logs_nr);
2216 for (i = logs_nr; i--;) {
2217 struct reftable_log_record *dest = &rewritten[i];
2218 struct object_id old_oid, new_oid;
2219
2220 *dest = logs[i];
2221 oidread(&old_oid, logs[i].value.update.old_hash);
2222 oidread(&new_oid, logs[i].value.update.new_hash);
2223
2224 if (should_prune_fn(&old_oid, &new_oid, logs[i].value.update.email,
2225 (timestamp_t)logs[i].value.update.time,
2226 logs[i].value.update.tz_offset,
2227 logs[i].value.update.message,
2228 policy_cb_data)) {
2229 dest->value_type = REFTABLE_LOG_DELETION;
2230 } else {
2231 if ((flags & EXPIRE_REFLOGS_REWRITE) && last_hash)
2232 dest->value.update.old_hash = last_hash;
2233 last_hash = logs[i].value.update.new_hash;
2234 }
2235 }
2236
2237 if (flags & EXPIRE_REFLOGS_UPDATE_REF && last_hash &&
2238 reftable_ref_record_val1(&ref_record))
2239 oidread(&arg.update_oid, last_hash);
2240
2241 arg.records = rewritten;
2242 arg.len = logs_nr;
2243 arg.stack = stack,
2244 arg.refname = refname,
2245
2246 ret = reftable_addition_add(add, &write_reflog_expiry_table, &arg);
2247 if (ret < 0)
2248 goto done;
2249
2250 /*
2251 * Future improvement: we could skip writing records that were
2252 * not changed.
2253 */
2254 if (!(flags & EXPIRE_REFLOGS_DRY_RUN))
2255 ret = reftable_addition_commit(add);
2256
2257 done:
2258 if (add)
2259 cleanup_fn(policy_cb_data);
2260 assert(ret != REFTABLE_API_ERROR);
2261
2262 reftable_ref_record_release(&ref_record);
2263 reftable_iterator_destroy(&it);
2264 reftable_addition_destroy(add);
2265 for (i = 0; i < logs_nr; i++)
2266 reftable_log_record_release(&logs[i]);
2267 free(logs);
2268 free(rewritten);
2269 return ret;
2270 }
2271
2272 struct ref_storage_be refs_be_reftable = {
2273 .name = "reftable",
2274 .init = reftable_be_init,
2275 .init_db = reftable_be_init_db,
2276 .transaction_prepare = reftable_be_transaction_prepare,
2277 .transaction_finish = reftable_be_transaction_finish,
2278 .transaction_abort = reftable_be_transaction_abort,
2279 .initial_transaction_commit = reftable_be_initial_transaction_commit,
2280
2281 .pack_refs = reftable_be_pack_refs,
2282 .create_symref = reftable_be_create_symref,
2283 .rename_ref = reftable_be_rename_ref,
2284 .copy_ref = reftable_be_copy_ref,
2285
2286 .iterator_begin = reftable_be_iterator_begin,
2287 .read_raw_ref = reftable_be_read_raw_ref,
2288 .read_symbolic_ref = reftable_be_read_symbolic_ref,
2289
2290 .reflog_iterator_begin = reftable_be_reflog_iterator_begin,
2291 .for_each_reflog_ent = reftable_be_for_each_reflog_ent,
2292 .for_each_reflog_ent_reverse = reftable_be_for_each_reflog_ent_reverse,
2293 .reflog_exists = reftable_be_reflog_exists,
2294 .create_reflog = reftable_be_create_reflog,
2295 .delete_reflog = reftable_be_delete_reflog,
2296 .reflog_expire = reftable_be_reflog_expire,
2297 };