1 #include "../git-compat-util.h"
2 #include "../abspath.h"
3 #include "../chdir-notify.h"
4 #include "../environment.h"
5 #include "../gettext.h"
8 #include "../iterator.h"
10 #include "../lockfile.h"
11 #include "../object.h"
14 #include "../reftable/reftable-stack.h"
15 #include "../reftable/reftable-record.h"
16 #include "../reftable/reftable-error.h"
17 #include "../reftable/reftable-iterator.h"
18 #include "../reftable/reftable-merged.h"
20 #include "../strmap.h"
21 #include "refs-internal.h"
24 * Used as a flag in ref_update::flags when the ref_update was via an
27 #define REF_UPDATE_VIA_HEAD (1 << 8)
29 struct reftable_ref_store
{
30 struct ref_store base
;
33 * The main stack refers to the common dir and thus contains common
34 * refs as well as refs of the main repository.
36 struct reftable_stack
*main_stack
;
38 * The worktree stack refers to the gitdir in case the refdb is opened
39 * via a worktree. It thus contains the per-worktree refs.
41 struct reftable_stack
*worktree_stack
;
43 * Map of worktree stacks by their respective worktree names. The map
44 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
46 struct strmap worktree_stacks
;
47 struct reftable_write_options write_options
;
49 unsigned int store_flags
;
54 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
55 * reftable_ref_store. required_flags is compared with ref_store's store_flags
56 * to ensure the ref_store has all required capabilities. "caller" is used in
57 * any necessary error messages.
59 static struct reftable_ref_store
*reftable_be_downcast(struct ref_store
*ref_store
,
60 unsigned int required_flags
,
63 struct reftable_ref_store
*refs
;
65 if (ref_store
->be
!= &refs_be_reftable
)
66 BUG("ref_store is type \"%s\" not \"reftables\" in %s",
67 ref_store
->be
->name
, caller
);
69 refs
= (struct reftable_ref_store
*)ref_store
;
71 if ((refs
->store_flags
& required_flags
) != required_flags
)
72 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
73 caller
, required_flags
, refs
->store_flags
);
79 * Some refs are global to the repository (refs/heads/{*}), while others are
80 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
81 * multiple separate databases (ie. multiple reftable/ directories), one for
82 * the shared refs, one for the current worktree refs, and one for each
83 * additional worktree. For reading, we merge the view of both the shared and
84 * the current worktree's refs, when necessary.
86 * This function also optionally assigns the rewritten reference name that is
87 * local to the stack. This translation is required when using worktree refs
88 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
89 * those references in their normalized form.
91 static struct reftable_stack
*stack_for(struct reftable_ref_store
*store
,
93 const char **rewritten_ref
)
99 return store
->main_stack
;
101 switch (parse_worktree_ref(refname
, &wtname
, &wtname_len
, rewritten_ref
)) {
102 case REF_WORKTREE_OTHER
: {
103 static struct strbuf wtname_buf
= STRBUF_INIT
;
104 struct strbuf wt_dir
= STRBUF_INIT
;
105 struct reftable_stack
*stack
;
108 * We're using a static buffer here so that we don't need to
109 * allocate the worktree name whenever we look up a reference.
110 * This could be avoided if the strmap interface knew how to
111 * handle keys with a length.
113 strbuf_reset(&wtname_buf
);
114 strbuf_add(&wtname_buf
, wtname
, wtname_len
);
117 * There is an edge case here: when the worktree references the
118 * current worktree, then we set up the stack once via
119 * `worktree_stacks` and once via `worktree_stack`. This is
120 * wasteful, but in the reading case it shouldn't matter. And
121 * in the writing case we would notice that the stack is locked
122 * already and error out when trying to write a reference via
125 stack
= strmap_get(&store
->worktree_stacks
, wtname_buf
.buf
);
127 strbuf_addf(&wt_dir
, "%s/worktrees/%s/reftable",
128 store
->base
.repo
->commondir
, wtname_buf
.buf
);
130 store
->err
= reftable_new_stack(&stack
, wt_dir
.buf
,
131 store
->write_options
);
132 assert(store
->err
!= REFTABLE_API_ERROR
);
133 strmap_put(&store
->worktree_stacks
, wtname_buf
.buf
, stack
);
136 strbuf_release(&wt_dir
);
139 case REF_WORKTREE_CURRENT
:
141 * If there is no worktree stack then we're currently in the
142 * main worktree. We thus return the main stack in that case.
144 if (!store
->worktree_stack
)
145 return store
->main_stack
;
146 return store
->worktree_stack
;
147 case REF_WORKTREE_MAIN
:
148 case REF_WORKTREE_SHARED
:
149 return store
->main_stack
;
151 BUG("unhandled worktree reference type");
155 static int should_write_log(struct ref_store
*refs
, const char *refname
)
157 if (log_all_ref_updates
== LOG_REFS_UNSET
)
158 log_all_ref_updates
= is_bare_repository() ? LOG_REFS_NONE
: LOG_REFS_NORMAL
;
160 switch (log_all_ref_updates
) {
162 return refs_reflog_exists(refs
, refname
);
163 case LOG_REFS_ALWAYS
:
165 case LOG_REFS_NORMAL
:
166 if (should_autocreate_reflog(refname
))
168 return refs_reflog_exists(refs
, refname
);
170 BUG("unhandled core.logAllRefUpdates value %d", log_all_ref_updates
);
174 static void clear_reftable_log_record(struct reftable_log_record
*log
)
176 switch (log
->value_type
) {
177 case REFTABLE_LOG_UPDATE
:
179 * When we write log records, the hashes are owned by the
180 * caller and thus shouldn't be free'd.
182 log
->value
.update
.old_hash
= NULL
;
183 log
->value
.update
.new_hash
= NULL
;
185 case REFTABLE_LOG_DELETION
:
188 reftable_log_record_release(log
);
191 static void fill_reftable_log_record(struct reftable_log_record
*log
)
193 const char *info
= git_committer_info(0);
194 struct ident_split split
= {0};
197 if (split_ident_line(&split
, info
, strlen(info
)))
198 BUG("failed splitting committer info");
200 reftable_log_record_release(log
);
201 log
->value_type
= REFTABLE_LOG_UPDATE
;
202 log
->value
.update
.name
=
203 xstrndup(split
.name_begin
, split
.name_end
- split
.name_begin
);
204 log
->value
.update
.email
=
205 xstrndup(split
.mail_begin
, split
.mail_end
- split
.mail_begin
);
206 log
->value
.update
.time
= atol(split
.date_begin
);
207 if (*split
.tz_begin
== '-') {
211 if (*split
.tz_begin
== '+') {
216 log
->value
.update
.tz_offset
= sign
* atoi(split
.tz_begin
);
219 static int read_ref_without_reload(struct reftable_stack
*stack
,
221 struct object_id
*oid
,
222 struct strbuf
*referent
,
225 struct reftable_ref_record ref
= {0};
228 ret
= reftable_stack_read_ref(stack
, refname
, &ref
);
232 if (ref
.value_type
== REFTABLE_REF_SYMREF
) {
233 strbuf_reset(referent
);
234 strbuf_addstr(referent
, ref
.value
.symref
);
235 *type
|= REF_ISSYMREF
;
236 } else if (reftable_ref_record_val1(&ref
)) {
237 oidread(oid
, reftable_ref_record_val1(&ref
));
239 /* We got a tombstone, which should not happen. */
240 BUG("unhandled reference value type %d", ref
.value_type
);
244 assert(ret
!= REFTABLE_API_ERROR
);
245 reftable_ref_record_release(&ref
);
249 static struct ref_store
*reftable_be_init(struct repository
*repo
,
251 unsigned int store_flags
)
253 struct reftable_ref_store
*refs
= xcalloc(1, sizeof(*refs
));
254 struct strbuf path
= STRBUF_INIT
;
261 base_ref_store_init(&refs
->base
, repo
, gitdir
, &refs_be_reftable
);
262 strmap_init(&refs
->worktree_stacks
);
263 refs
->store_flags
= store_flags
;
264 refs
->write_options
.block_size
= 4096;
265 refs
->write_options
.hash_id
= repo
->hash_algo
->format_id
;
266 refs
->write_options
.default_permissions
= calc_shared_perm(0666 & ~mask
);
269 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
270 * This stack contains both the shared and the main worktree refs.
272 * Note that we don't try to resolve the path in case we have a
273 * worktree because `get_common_dir_noenv()` already does it for us.
275 is_worktree
= get_common_dir_noenv(&path
, gitdir
);
278 strbuf_realpath(&path
, gitdir
, 0);
280 strbuf_addstr(&path
, "/reftable");
281 refs
->err
= reftable_new_stack(&refs
->main_stack
, path
.buf
,
282 refs
->write_options
);
287 * If we're in a worktree we also need to set up the worktree reftable
288 * stack that is contained in the per-worktree GIT_DIR.
290 * Ideally, we would also add the stack to our worktree stack map. But
291 * we have no way to figure out the worktree name here and thus can't
296 strbuf_addf(&path
, "%s/reftable", gitdir
);
298 refs
->err
= reftable_new_stack(&refs
->worktree_stack
, path
.buf
,
299 refs
->write_options
);
304 chdir_notify_reparent("reftables-backend $GIT_DIR", &refs
->base
.gitdir
);
307 assert(refs
->err
!= REFTABLE_API_ERROR
);
308 strbuf_release(&path
);
312 static int reftable_be_init_db(struct ref_store
*ref_store
,
314 struct strbuf
*err UNUSED
)
316 struct reftable_ref_store
*refs
=
317 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "init_db");
318 struct strbuf sb
= STRBUF_INIT
;
320 strbuf_addf(&sb
, "%s/reftable", refs
->base
.gitdir
);
321 safe_create_dir(sb
.buf
, 1);
324 strbuf_addf(&sb
, "%s/HEAD", refs
->base
.gitdir
);
325 write_file(sb
.buf
, "ref: refs/heads/.invalid");
326 adjust_shared_perm(sb
.buf
);
329 strbuf_addf(&sb
, "%s/refs", refs
->base
.gitdir
);
330 safe_create_dir(sb
.buf
, 1);
333 strbuf_addf(&sb
, "%s/refs/heads", refs
->base
.gitdir
);
334 write_file(sb
.buf
, "this repository uses the reftable format");
335 adjust_shared_perm(sb
.buf
);
341 struct reftable_ref_iterator
{
342 struct ref_iterator base
;
343 struct reftable_ref_store
*refs
;
344 struct reftable_iterator iter
;
345 struct reftable_ref_record ref
;
346 struct object_id oid
;
353 static int reftable_ref_iterator_advance(struct ref_iterator
*ref_iterator
)
355 struct reftable_ref_iterator
*iter
=
356 (struct reftable_ref_iterator
*)ref_iterator
;
357 struct reftable_ref_store
*refs
= iter
->refs
;
362 iter
->err
= reftable_iterator_next_ref(&iter
->iter
, &iter
->ref
);
367 * The files backend only lists references contained in
368 * "refs/". We emulate the same behaviour here and thus skip
369 * all references that don't start with this prefix.
371 if (!starts_with(iter
->ref
.refname
, "refs/"))
375 strncmp(iter
->prefix
, iter
->ref
.refname
, strlen(iter
->prefix
))) {
380 if (iter
->flags
& DO_FOR_EACH_PER_WORKTREE_ONLY
&&
381 parse_worktree_ref(iter
->ref
.refname
, NULL
, NULL
, NULL
) !=
382 REF_WORKTREE_CURRENT
)
385 switch (iter
->ref
.value_type
) {
386 case REFTABLE_REF_VAL1
:
387 oidread(&iter
->oid
, iter
->ref
.value
.val1
);
389 case REFTABLE_REF_VAL2
:
390 oidread(&iter
->oid
, iter
->ref
.value
.val2
.value
);
392 case REFTABLE_REF_SYMREF
:
393 if (!refs_resolve_ref_unsafe(&iter
->refs
->base
, iter
->ref
.refname
,
394 RESOLVE_REF_READING
, &iter
->oid
, &flags
))
398 BUG("unhandled reference value type %d", iter
->ref
.value_type
);
401 if (is_null_oid(&iter
->oid
))
402 flags
|= REF_ISBROKEN
;
404 if (check_refname_format(iter
->ref
.refname
, REFNAME_ALLOW_ONELEVEL
)) {
405 if (!refname_is_safe(iter
->ref
.refname
))
406 die(_("refname is dangerous: %s"), iter
->ref
.refname
);
408 flags
|= REF_BAD_NAME
| REF_ISBROKEN
;
411 if (iter
->flags
& DO_FOR_EACH_OMIT_DANGLING_SYMREFS
&&
412 flags
& REF_ISSYMREF
&&
413 flags
& REF_ISBROKEN
)
416 if (!(iter
->flags
& DO_FOR_EACH_INCLUDE_BROKEN
) &&
417 !ref_resolves_to_object(iter
->ref
.refname
, refs
->base
.repo
,
421 iter
->base
.refname
= iter
->ref
.refname
;
422 iter
->base
.oid
= &iter
->oid
;
423 iter
->base
.flags
= flags
;
429 if (ref_iterator_abort(ref_iterator
) != ITER_DONE
)
435 ref_iterator_abort(ref_iterator
);
442 static int reftable_ref_iterator_peel(struct ref_iterator
*ref_iterator
,
443 struct object_id
*peeled
)
445 struct reftable_ref_iterator
*iter
=
446 (struct reftable_ref_iterator
*)ref_iterator
;
448 if (iter
->ref
.value_type
== REFTABLE_REF_VAL2
) {
449 oidread(peeled
, iter
->ref
.value
.val2
.target_value
);
456 static int reftable_ref_iterator_abort(struct ref_iterator
*ref_iterator
)
458 struct reftable_ref_iterator
*iter
=
459 (struct reftable_ref_iterator
*)ref_iterator
;
460 reftable_ref_record_release(&iter
->ref
);
461 reftable_iterator_destroy(&iter
->iter
);
466 static struct ref_iterator_vtable reftable_ref_iterator_vtable
= {
467 .advance
= reftable_ref_iterator_advance
,
468 .peel
= reftable_ref_iterator_peel
,
469 .abort
= reftable_ref_iterator_abort
472 static struct reftable_ref_iterator
*ref_iterator_for_stack(struct reftable_ref_store
*refs
,
473 struct reftable_stack
*stack
,
477 struct reftable_merged_table
*merged_table
;
478 struct reftable_ref_iterator
*iter
;
481 iter
= xcalloc(1, sizeof(*iter
));
482 base_ref_iterator_init(&iter
->base
, &reftable_ref_iterator_vtable
, 1);
483 iter
->prefix
= prefix
;
484 iter
->base
.oid
= &iter
->oid
;
492 ret
= reftable_stack_reload(stack
);
496 merged_table
= reftable_stack_merged_table(stack
);
498 ret
= reftable_merged_table_seek_ref(merged_table
, &iter
->iter
, prefix
);
507 static enum iterator_selection
iterator_select(struct ref_iterator
*iter_worktree
,
508 struct ref_iterator
*iter_common
,
509 void *cb_data UNUSED
)
511 if (iter_worktree
&& !iter_common
) {
513 * Return the worktree ref if there are no more common refs.
515 return ITER_SELECT_0
;
516 } else if (iter_common
) {
518 * In case we have pending worktree and common refs we need to
519 * yield them based on their lexicographical order. Worktree
520 * refs that have the same name as common refs shadow the
524 int cmp
= strcmp(iter_worktree
->refname
,
525 iter_common
->refname
);
527 return ITER_SELECT_0
;
529 return ITER_SELECT_0_SKIP_1
;
533 * We now know that the lexicographically-next ref is a common
534 * ref. When the common ref is a shared one we return it.
536 if (parse_worktree_ref(iter_common
->refname
, NULL
, NULL
,
537 NULL
) == REF_WORKTREE_SHARED
)
538 return ITER_SELECT_1
;
541 * Otherwise, if the common ref is a per-worktree ref we skip
542 * it because it would belong to the main worktree, not ours.
550 static struct ref_iterator
*reftable_be_iterator_begin(struct ref_store
*ref_store
,
552 const char **exclude_patterns
,
555 struct reftable_ref_iterator
*main_iter
, *worktree_iter
;
556 struct reftable_ref_store
*refs
;
557 unsigned int required_flags
= REF_STORE_READ
;
559 if (!(flags
& DO_FOR_EACH_INCLUDE_BROKEN
))
560 required_flags
|= REF_STORE_ODB
;
561 refs
= reftable_be_downcast(ref_store
, required_flags
, "ref_iterator_begin");
563 main_iter
= ref_iterator_for_stack(refs
, refs
->main_stack
, prefix
, flags
);
566 * The worktree stack is only set when we're in an actual worktree
567 * right now. If we aren't, then we return the common reftable
570 if (!refs
->worktree_stack
)
571 return &main_iter
->base
;
574 * Otherwise we merge both the common and the per-worktree refs into a
577 worktree_iter
= ref_iterator_for_stack(refs
, refs
->worktree_stack
, prefix
, flags
);
578 return merge_ref_iterator_begin(1, &worktree_iter
->base
, &main_iter
->base
,
579 iterator_select
, NULL
);
582 static int reftable_be_read_raw_ref(struct ref_store
*ref_store
,
584 struct object_id
*oid
,
585 struct strbuf
*referent
,
589 struct reftable_ref_store
*refs
=
590 reftable_be_downcast(ref_store
, REF_STORE_READ
, "read_raw_ref");
591 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
597 ret
= reftable_stack_reload(stack
);
601 ret
= read_ref_without_reload(stack
, refname
, oid
, referent
, type
);
605 *failure_errno
= ENOENT
;
612 static int reftable_be_read_symbolic_ref(struct ref_store
*ref_store
,
614 struct strbuf
*referent
)
616 struct reftable_ref_store
*refs
=
617 reftable_be_downcast(ref_store
, REF_STORE_READ
, "read_symbolic_ref");
618 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
619 struct reftable_ref_record ref
= {0};
622 ret
= reftable_stack_reload(stack
);
626 ret
= reftable_stack_read_ref(stack
, refname
, &ref
);
627 if (ret
== 0 && ref
.value_type
== REFTABLE_REF_SYMREF
)
628 strbuf_addstr(referent
, ref
.value
.symref
);
632 reftable_ref_record_release(&ref
);
637 * Return the refname under which update was originally requested.
639 static const char *original_update_refname(struct ref_update
*update
)
641 while (update
->parent_update
)
642 update
= update
->parent_update
;
643 return update
->refname
;
646 struct reftable_transaction_update
{
647 struct ref_update
*update
;
648 struct object_id current_oid
;
651 struct write_transaction_table_arg
{
652 struct reftable_ref_store
*refs
;
653 struct reftable_stack
*stack
;
654 struct reftable_addition
*addition
;
655 struct reftable_transaction_update
*updates
;
657 size_t updates_alloc
;
658 size_t updates_expected
;
661 struct reftable_transaction_data
{
662 struct write_transaction_table_arg
*args
;
663 size_t args_nr
, args_alloc
;
666 static void free_transaction_data(struct reftable_transaction_data
*tx_data
)
670 for (size_t i
= 0; i
< tx_data
->args_nr
; i
++) {
671 reftable_addition_destroy(tx_data
->args
[i
].addition
);
672 free(tx_data
->args
[i
].updates
);
679 * Prepare transaction update for the given reference update. This will cause
680 * us to lock the corresponding reftable stack for concurrent modification.
682 static int prepare_transaction_update(struct write_transaction_table_arg
**out
,
683 struct reftable_ref_store
*refs
,
684 struct reftable_transaction_data
*tx_data
,
685 struct ref_update
*update
,
688 struct reftable_stack
*stack
= stack_for(refs
, update
->refname
, NULL
);
689 struct write_transaction_table_arg
*arg
= NULL
;
694 * Search for a preexisting stack update. If there is one then we add
695 * the update to it, otherwise we set up a new stack update.
697 for (i
= 0; !arg
&& i
< tx_data
->args_nr
; i
++)
698 if (tx_data
->args
[i
].stack
== stack
)
699 arg
= &tx_data
->args
[i
];
702 struct reftable_addition
*addition
;
704 ret
= reftable_stack_reload(stack
);
708 ret
= reftable_stack_new_addition(&addition
, stack
);
710 if (ret
== REFTABLE_LOCK_ERROR
)
711 strbuf_addstr(err
, "cannot lock references");
715 ALLOC_GROW(tx_data
->args
, tx_data
->args_nr
+ 1,
716 tx_data
->args_alloc
);
717 arg
= &tx_data
->args
[tx_data
->args_nr
++];
720 arg
->addition
= addition
;
723 arg
->updates_alloc
= 0;
724 arg
->updates_expected
= 0;
727 arg
->updates_expected
++;
736 * Queue a reference update for the correct stack. We potentially need to
737 * handle multiple stack updates in a single transaction when it spans across
738 * multiple worktrees.
740 static int queue_transaction_update(struct reftable_ref_store
*refs
,
741 struct reftable_transaction_data
*tx_data
,
742 struct ref_update
*update
,
743 struct object_id
*current_oid
,
746 struct write_transaction_table_arg
*arg
= NULL
;
749 if (update
->backend_data
)
750 BUG("reference update queued more than once");
752 ret
= prepare_transaction_update(&arg
, refs
, tx_data
, update
, err
);
756 ALLOC_GROW(arg
->updates
, arg
->updates_nr
+ 1,
758 arg
->updates
[arg
->updates_nr
].update
= update
;
759 oidcpy(&arg
->updates
[arg
->updates_nr
].current_oid
, current_oid
);
760 update
->backend_data
= &arg
->updates
[arg
->updates_nr
++];
765 static int reftable_be_transaction_prepare(struct ref_store
*ref_store
,
766 struct ref_transaction
*transaction
,
769 struct reftable_ref_store
*refs
=
770 reftable_be_downcast(ref_store
, REF_STORE_WRITE
|REF_STORE_MAIN
, "ref_transaction_prepare");
771 struct strbuf referent
= STRBUF_INIT
, head_referent
= STRBUF_INIT
;
772 struct string_list affected_refnames
= STRING_LIST_INIT_NODUP
;
773 struct reftable_transaction_data
*tx_data
= NULL
;
774 struct object_id head_oid
;
775 unsigned int head_type
= 0;
783 tx_data
= xcalloc(1, sizeof(*tx_data
));
786 * Preprocess all updates. For one we check that there are no duplicate
787 * reference updates in this transaction. Second, we lock all stacks
788 * that will be modified during the transaction.
790 for (i
= 0; i
< transaction
->nr
; i
++) {
791 ret
= prepare_transaction_update(NULL
, refs
, tx_data
,
792 transaction
->updates
[i
], err
);
796 string_list_append(&affected_refnames
,
797 transaction
->updates
[i
]->refname
);
801 * Now that we have counted updates per stack we can preallocate their
802 * arrays. This avoids having to reallocate many times.
804 for (i
= 0; i
< tx_data
->args_nr
; i
++) {
805 CALLOC_ARRAY(tx_data
->args
[i
].updates
, tx_data
->args
[i
].updates_expected
);
806 tx_data
->args
[i
].updates_alloc
= tx_data
->args
[i
].updates_expected
;
810 * Fail if a refname appears more than once in the transaction.
811 * This code is taken from the files backend and is a good candidate to
812 * be moved into the generic layer.
814 string_list_sort(&affected_refnames
);
815 if (ref_update_reject_duplicates(&affected_refnames
, err
)) {
816 ret
= TRANSACTION_GENERIC_ERROR
;
820 ret
= read_ref_without_reload(stack_for(refs
, "HEAD", NULL
), "HEAD", &head_oid
,
821 &head_referent
, &head_type
);
825 for (i
= 0; i
< transaction
->nr
; i
++) {
826 struct ref_update
*u
= transaction
->updates
[i
];
827 struct object_id current_oid
= {0};
828 struct reftable_stack
*stack
;
829 const char *rewritten_ref
;
831 stack
= stack_for(refs
, u
->refname
, &rewritten_ref
);
833 /* Verify that the new object ID is valid. */
834 if ((u
->flags
& REF_HAVE_NEW
) && !is_null_oid(&u
->new_oid
) &&
835 !(u
->flags
& REF_SKIP_OID_VERIFICATION
) &&
836 !(u
->flags
& REF_LOG_ONLY
)) {
837 struct object
*o
= parse_object(refs
->base
.repo
, &u
->new_oid
);
840 _("trying to write ref '%s' with nonexistent object %s"),
841 u
->refname
, oid_to_hex(&u
->new_oid
));
846 if (o
->type
!= OBJ_COMMIT
&& is_branch(u
->refname
)) {
847 strbuf_addf(err
, _("trying to write non-commit object %s to branch '%s'"),
848 oid_to_hex(&u
->new_oid
), u
->refname
);
855 * When we update the reference that HEAD points to we enqueue
856 * a second log-only update for HEAD so that its reflog is
857 * updated accordingly.
859 if (head_type
== REF_ISSYMREF
&&
860 !(u
->flags
& REF_LOG_ONLY
) &&
861 !(u
->flags
& REF_UPDATE_VIA_HEAD
) &&
862 !strcmp(rewritten_ref
, head_referent
.buf
)) {
863 struct ref_update
*new_update
;
866 * First make sure that HEAD is not already in the
867 * transaction. This check is O(lg N) in the transaction
868 * size, but it happens at most once per transaction.
870 if (string_list_has_string(&affected_refnames
, "HEAD")) {
871 /* An entry already existed */
873 _("multiple updates for 'HEAD' (including one "
874 "via its referent '%s') are not allowed"),
876 ret
= TRANSACTION_NAME_CONFLICT
;
880 new_update
= ref_transaction_add_update(
882 u
->flags
| REF_LOG_ONLY
| REF_NO_DEREF
,
883 &u
->new_oid
, &u
->old_oid
, u
->msg
);
884 string_list_insert(&affected_refnames
, new_update
->refname
);
887 ret
= read_ref_without_reload(stack
, rewritten_ref
,
888 ¤t_oid
, &referent
, &u
->type
);
891 if (ret
> 0 && (!(u
->flags
& REF_HAVE_OLD
) || is_null_oid(&u
->old_oid
))) {
893 * The reference does not exist, and we either have no
894 * old object ID or expect the reference to not exist.
895 * We can thus skip below safety checks as well as the
896 * symref splitting. But we do want to verify that
897 * there is no conflicting reference here so that we
898 * can output a proper error message instead of failing
901 ret
= refs_verify_refname_available(ref_store
, u
->refname
,
902 &affected_refnames
, NULL
, err
);
907 * There is no need to write the reference deletion
908 * when the reference in question doesn't exist.
910 if (u
->flags
& REF_HAVE_NEW
&& !is_null_oid(&u
->new_oid
)) {
911 ret
= queue_transaction_update(refs
, tx_data
, u
,
920 /* The reference does not exist, but we expected it to. */
921 strbuf_addf(err
, _("cannot lock ref '%s': "
922 "unable to resolve reference '%s'"),
923 original_update_refname(u
), u
->refname
);
928 if (u
->type
& REF_ISSYMREF
) {
930 * The reftable stack is locked at this point already,
931 * so it is safe to call `refs_resolve_ref_unsafe()`
932 * here without causing races.
934 const char *resolved
= refs_resolve_ref_unsafe(&refs
->base
, u
->refname
, 0,
937 if (u
->flags
& REF_NO_DEREF
) {
938 if (u
->flags
& REF_HAVE_OLD
&& !resolved
) {
939 strbuf_addf(err
, _("cannot lock ref '%s': "
940 "error reading reference"), u
->refname
);
945 struct ref_update
*new_update
;
948 new_flags
= u
->flags
;
949 if (!strcmp(rewritten_ref
, "HEAD"))
950 new_flags
|= REF_UPDATE_VIA_HEAD
;
953 * If we are updating a symref (eg. HEAD), we should also
954 * update the branch that the symref points to.
956 * This is generic functionality, and would be better
957 * done in refs.c, but the current implementation is
958 * intertwined with the locking in files-backend.c.
960 new_update
= ref_transaction_add_update(
961 transaction
, referent
.buf
, new_flags
,
962 &u
->new_oid
, &u
->old_oid
, u
->msg
);
963 new_update
->parent_update
= u
;
966 * Change the symbolic ref update to log only. Also, it
967 * doesn't need to check its old OID value, as that will be
968 * done when new_update is processed.
970 u
->flags
|= REF_LOG_ONLY
| REF_NO_DEREF
;
971 u
->flags
&= ~REF_HAVE_OLD
;
973 if (string_list_has_string(&affected_refnames
, new_update
->refname
)) {
975 _("multiple updates for '%s' (including one "
976 "via symref '%s') are not allowed"),
977 referent
.buf
, u
->refname
);
978 ret
= TRANSACTION_NAME_CONFLICT
;
981 string_list_insert(&affected_refnames
, new_update
->refname
);
986 * Verify that the old object matches our expectations. Note
987 * that the error messages here do not make a lot of sense in
988 * the context of the reftable backend as we never lock
989 * individual refs. But the error messages match what the files
990 * backend returns, which keeps our tests happy.
992 if (u
->flags
& REF_HAVE_OLD
&& !oideq(¤t_oid
, &u
->old_oid
)) {
993 if (is_null_oid(&u
->old_oid
))
994 strbuf_addf(err
, _("cannot lock ref '%s': "
995 "reference already exists"),
996 original_update_refname(u
));
997 else if (is_null_oid(¤t_oid
))
998 strbuf_addf(err
, _("cannot lock ref '%s': "
999 "reference is missing but expected %s"),
1000 original_update_refname(u
),
1001 oid_to_hex(&u
->old_oid
));
1003 strbuf_addf(err
, _("cannot lock ref '%s': "
1004 "is at %s but expected %s"),
1005 original_update_refname(u
),
1006 oid_to_hex(¤t_oid
),
1007 oid_to_hex(&u
->old_oid
));
1013 * If all of the following conditions are true:
1015 * - We're not about to write a symref.
1016 * - We're not about to write a log-only entry.
1017 * - Old and new object ID are different.
1019 * Then we're essentially doing a no-op update that can be
1020 * skipped. This is not only for the sake of efficiency, but
1021 * also skips writing unneeded reflog entries.
1023 if ((u
->type
& REF_ISSYMREF
) ||
1024 (u
->flags
& REF_LOG_ONLY
) ||
1025 (u
->flags
& REF_HAVE_NEW
&& !oideq(¤t_oid
, &u
->new_oid
))) {
1026 ret
= queue_transaction_update(refs
, tx_data
, u
,
1033 transaction
->backend_data
= tx_data
;
1034 transaction
->state
= REF_TRANSACTION_PREPARED
;
1037 assert(ret
!= REFTABLE_API_ERROR
);
1039 free_transaction_data(tx_data
);
1040 transaction
->state
= REF_TRANSACTION_CLOSED
;
1042 strbuf_addf(err
, _("reftable: transaction prepare: %s"),
1043 reftable_error_str(ret
));
1045 string_list_clear(&affected_refnames
, 0);
1046 strbuf_release(&referent
);
1047 strbuf_release(&head_referent
);
1052 static int reftable_be_transaction_abort(struct ref_store
*ref_store
,
1053 struct ref_transaction
*transaction
,
1056 struct reftable_transaction_data
*tx_data
= transaction
->backend_data
;
1057 free_transaction_data(tx_data
);
1058 transaction
->state
= REF_TRANSACTION_CLOSED
;
1062 static int transaction_update_cmp(const void *a
, const void *b
)
1064 return strcmp(((struct reftable_transaction_update
*)a
)->update
->refname
,
1065 ((struct reftable_transaction_update
*)b
)->update
->refname
);
1068 static int write_transaction_table(struct reftable_writer
*writer
, void *cb_data
)
1070 struct write_transaction_table_arg
*arg
= cb_data
;
1071 struct reftable_merged_table
*mt
=
1072 reftable_stack_merged_table(arg
->stack
);
1073 uint64_t ts
= reftable_stack_next_update_index(arg
->stack
);
1074 struct reftable_log_record
*logs
= NULL
;
1075 size_t logs_nr
= 0, logs_alloc
= 0, i
;
1078 QSORT(arg
->updates
, arg
->updates_nr
, transaction_update_cmp
);
1080 reftable_writer_set_limits(writer
, ts
, ts
);
1082 for (i
= 0; i
< arg
->updates_nr
; i
++) {
1083 struct reftable_transaction_update
*tx_update
= &arg
->updates
[i
];
1084 struct ref_update
*u
= tx_update
->update
;
1087 * Write a reflog entry when updating a ref to point to
1088 * something new in either of the following cases:
1090 * - The reference is about to be deleted. We always want to
1091 * delete the reflog in that case.
1092 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1094 * - `core.logAllRefUpdates` tells us to create the reflog for
1097 if (u
->flags
& REF_HAVE_NEW
&& !(u
->type
& REF_ISSYMREF
) && is_null_oid(&u
->new_oid
)) {
1098 struct reftable_log_record log
= {0};
1099 struct reftable_iterator it
= {0};
1102 * When deleting refs we also delete all reflog entries
1103 * with them. While it is not strictly required to
1104 * delete reflogs together with their refs, this
1105 * matches the behaviour of the files backend.
1107 * Unfortunately, we have no better way than to delete
1108 * all reflog entries one by one.
1110 ret
= reftable_merged_table_seek_log(mt
, &it
, u
->refname
);
1112 struct reftable_log_record
*tombstone
;
1114 ret
= reftable_iterator_next_log(&it
, &log
);
1117 if (ret
> 0 || strcmp(log
.refname
, u
->refname
)) {
1122 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1123 tombstone
= &logs
[logs_nr
++];
1124 tombstone
->refname
= xstrdup(u
->refname
);
1125 tombstone
->value_type
= REFTABLE_LOG_DELETION
;
1126 tombstone
->update_index
= log
.update_index
;
1129 reftable_log_record_release(&log
);
1130 reftable_iterator_destroy(&it
);
1134 } else if (u
->flags
& REF_HAVE_NEW
&&
1135 (u
->flags
& REF_FORCE_CREATE_REFLOG
||
1136 should_write_log(&arg
->refs
->base
, u
->refname
))) {
1137 struct reftable_log_record
*log
;
1139 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1140 log
= &logs
[logs_nr
++];
1141 memset(log
, 0, sizeof(*log
));
1143 fill_reftable_log_record(log
);
1144 log
->update_index
= ts
;
1145 log
->refname
= xstrdup(u
->refname
);
1146 log
->value
.update
.new_hash
= u
->new_oid
.hash
;
1147 log
->value
.update
.old_hash
= tx_update
->current_oid
.hash
;
1148 log
->value
.update
.message
=
1149 xstrndup(u
->msg
, arg
->refs
->write_options
.block_size
/ 2);
1152 if (u
->flags
& REF_LOG_ONLY
)
1155 if (u
->flags
& REF_HAVE_NEW
&& is_null_oid(&u
->new_oid
)) {
1156 struct reftable_ref_record ref
= {
1157 .refname
= (char *)u
->refname
,
1159 .value_type
= REFTABLE_REF_DELETION
,
1162 ret
= reftable_writer_add_ref(writer
, &ref
);
1165 } else if (u
->flags
& REF_HAVE_NEW
) {
1166 struct reftable_ref_record ref
= {0};
1167 struct object_id peeled
;
1170 ref
.refname
= (char *)u
->refname
;
1171 ref
.update_index
= ts
;
1173 peel_error
= peel_object(&u
->new_oid
, &peeled
);
1175 ref
.value_type
= REFTABLE_REF_VAL2
;
1176 memcpy(ref
.value
.val2
.target_value
, peeled
.hash
, GIT_MAX_RAWSZ
);
1177 memcpy(ref
.value
.val2
.value
, u
->new_oid
.hash
, GIT_MAX_RAWSZ
);
1178 } else if (!is_null_oid(&u
->new_oid
)) {
1179 ref
.value_type
= REFTABLE_REF_VAL1
;
1180 memcpy(ref
.value
.val1
, u
->new_oid
.hash
, GIT_MAX_RAWSZ
);
1183 ret
= reftable_writer_add_ref(writer
, &ref
);
1190 * Logs are written at the end so that we do not have intermixed ref
1194 ret
= reftable_writer_add_logs(writer
, logs
, logs_nr
);
1200 assert(ret
!= REFTABLE_API_ERROR
);
1201 for (i
= 0; i
< logs_nr
; i
++)
1202 clear_reftable_log_record(&logs
[i
]);
1207 static int reftable_be_transaction_finish(struct ref_store
*ref_store
,
1208 struct ref_transaction
*transaction
,
1211 struct reftable_transaction_data
*tx_data
= transaction
->backend_data
;
1214 for (size_t i
= 0; i
< tx_data
->args_nr
; i
++) {
1215 ret
= reftable_addition_add(tx_data
->args
[i
].addition
,
1216 write_transaction_table
, &tx_data
->args
[i
]);
1220 ret
= reftable_addition_commit(tx_data
->args
[i
].addition
);
1226 assert(ret
!= REFTABLE_API_ERROR
);
1227 free_transaction_data(tx_data
);
1228 transaction
->state
= REF_TRANSACTION_CLOSED
;
1231 strbuf_addf(err
, _("reftable: transaction failure: %s"),
1232 reftable_error_str(ret
));
1238 static int reftable_be_initial_transaction_commit(struct ref_store
*ref_store UNUSED
,
1239 struct ref_transaction
*transaction
,
1242 return ref_transaction_commit(transaction
, err
);
1245 static int reftable_be_pack_refs(struct ref_store
*ref_store
,
1246 struct pack_refs_opts
*opts
)
1248 struct reftable_ref_store
*refs
=
1249 reftable_be_downcast(ref_store
, REF_STORE_WRITE
| REF_STORE_ODB
, "pack_refs");
1250 struct reftable_stack
*stack
;
1256 stack
= refs
->worktree_stack
;
1258 stack
= refs
->main_stack
;
1260 ret
= reftable_stack_compact_all(stack
, NULL
);
1263 ret
= reftable_stack_clean(stack
);
1271 struct write_create_symref_arg
{
1272 struct reftable_ref_store
*refs
;
1273 struct reftable_stack
*stack
;
1274 const char *refname
;
1279 static int write_create_symref_table(struct reftable_writer
*writer
, void *cb_data
)
1281 struct write_create_symref_arg
*create
= cb_data
;
1282 uint64_t ts
= reftable_stack_next_update_index(create
->stack
);
1283 struct reftable_ref_record ref
= {
1284 .refname
= (char *)create
->refname
,
1285 .value_type
= REFTABLE_REF_SYMREF
,
1286 .value
.symref
= (char *)create
->target
,
1289 struct reftable_log_record log
= {0};
1290 struct object_id new_oid
;
1291 struct object_id old_oid
;
1294 reftable_writer_set_limits(writer
, ts
, ts
);
1296 ret
= reftable_writer_add_ref(writer
, &ref
);
1301 * Note that it is important to try and resolve the reference before we
1302 * write the log entry. This is because `should_write_log()` will munge
1303 * `core.logAllRefUpdates`, which is undesirable when we create a new
1304 * repository because it would be written into the config. As HEAD will
1305 * not resolve for new repositories this ordering will ensure that this
1308 if (!create
->logmsg
||
1309 !refs_resolve_ref_unsafe(&create
->refs
->base
, create
->target
,
1310 RESOLVE_REF_READING
, &new_oid
, NULL
) ||
1311 !should_write_log(&create
->refs
->base
, create
->refname
))
1314 fill_reftable_log_record(&log
);
1315 log
.refname
= xstrdup(create
->refname
);
1316 log
.update_index
= ts
;
1317 log
.value
.update
.message
= xstrndup(create
->logmsg
,
1318 create
->refs
->write_options
.block_size
/ 2);
1319 log
.value
.update
.new_hash
= new_oid
.hash
;
1320 if (refs_resolve_ref_unsafe(&create
->refs
->base
, create
->refname
,
1321 RESOLVE_REF_READING
, &old_oid
, NULL
))
1322 log
.value
.update
.old_hash
= old_oid
.hash
;
1324 ret
= reftable_writer_add_log(writer
, &log
);
1325 clear_reftable_log_record(&log
);
1329 static int reftable_be_create_symref(struct ref_store
*ref_store
,
1330 const char *refname
,
1334 struct reftable_ref_store
*refs
=
1335 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "create_symref");
1336 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1337 struct write_create_symref_arg arg
= {
1350 ret
= reftable_stack_reload(stack
);
1354 ret
= reftable_stack_add(stack
, &write_create_symref_table
, &arg
);
1357 assert(ret
!= REFTABLE_API_ERROR
);
1359 error("unable to write symref for %s: %s", refname
,
1360 reftable_error_str(ret
));
1364 struct write_copy_arg
{
1365 struct reftable_ref_store
*refs
;
1366 struct reftable_stack
*stack
;
1367 const char *oldname
;
1368 const char *newname
;
1373 static int write_copy_table(struct reftable_writer
*writer
, void *cb_data
)
1375 struct write_copy_arg
*arg
= cb_data
;
1376 uint64_t deletion_ts
, creation_ts
;
1377 struct reftable_merged_table
*mt
= reftable_stack_merged_table(arg
->stack
);
1378 struct reftable_ref_record old_ref
= {0}, refs
[2] = {0};
1379 struct reftable_log_record old_log
= {0}, *logs
= NULL
;
1380 struct reftable_iterator it
= {0};
1381 struct string_list skip
= STRING_LIST_INIT_NODUP
;
1382 struct strbuf errbuf
= STRBUF_INIT
;
1383 size_t logs_nr
= 0, logs_alloc
= 0, i
;
1386 if (reftable_stack_read_ref(arg
->stack
, arg
->oldname
, &old_ref
)) {
1387 ret
= error(_("refname %s not found"), arg
->oldname
);
1390 if (old_ref
.value_type
== REFTABLE_REF_SYMREF
) {
1391 ret
= error(_("refname %s is a symbolic ref, copying it is not supported"),
1397 * There's nothing to do in case the old and new name are the same, so
1398 * we exit early in that case.
1400 if (!strcmp(arg
->oldname
, arg
->newname
)) {
1406 * Verify that the new refname is available.
1408 string_list_insert(&skip
, arg
->oldname
);
1409 ret
= refs_verify_refname_available(&arg
->refs
->base
, arg
->newname
,
1410 NULL
, &skip
, &errbuf
);
1412 error("%s", errbuf
.buf
);
1417 * When deleting the old reference we have to use two update indices:
1418 * once to delete the old ref and its reflog, and once to create the
1419 * new ref and its reflog. They need to be staged with two separate
1420 * indices because the new reflog needs to encode both the deletion of
1421 * the old branch and the creation of the new branch, and we cannot do
1422 * two changes to a reflog in a single update.
1424 deletion_ts
= creation_ts
= reftable_stack_next_update_index(arg
->stack
);
1425 if (arg
->delete_old
)
1427 reftable_writer_set_limits(writer
, deletion_ts
, creation_ts
);
1430 * Add the new reference. If this is a rename then we also delete the
1434 refs
[0].refname
= (char *)arg
->newname
;
1435 refs
[0].update_index
= creation_ts
;
1436 if (arg
->delete_old
) {
1437 refs
[1].refname
= (char *)arg
->oldname
;
1438 refs
[1].value_type
= REFTABLE_REF_DELETION
;
1439 refs
[1].update_index
= deletion_ts
;
1441 ret
= reftable_writer_add_refs(writer
, refs
, arg
->delete_old
? 2 : 1);
1446 * When deleting the old branch we need to create a reflog entry on the
1447 * new branch name that indicates that the old branch has been deleted
1448 * and then recreated. This is a tad weird, but matches what the files
1451 if (arg
->delete_old
) {
1452 struct strbuf head_referent
= STRBUF_INIT
;
1453 struct object_id head_oid
;
1454 int append_head_reflog
;
1455 unsigned head_type
= 0;
1457 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1458 memset(&logs
[logs_nr
], 0, sizeof(logs
[logs_nr
]));
1459 fill_reftable_log_record(&logs
[logs_nr
]);
1460 logs
[logs_nr
].refname
= (char *)arg
->newname
;
1461 logs
[logs_nr
].update_index
= deletion_ts
;
1462 logs
[logs_nr
].value
.update
.message
=
1463 xstrndup(arg
->logmsg
, arg
->refs
->write_options
.block_size
/ 2);
1464 logs
[logs_nr
].value
.update
.old_hash
= old_ref
.value
.val1
;
1467 ret
= read_ref_without_reload(arg
->stack
, "HEAD", &head_oid
, &head_referent
, &head_type
);
1470 append_head_reflog
= (head_type
& REF_ISSYMREF
) && !strcmp(head_referent
.buf
, arg
->oldname
);
1471 strbuf_release(&head_referent
);
1474 * The files backend uses `refs_delete_ref()` to delete the old
1475 * branch name, which will append a reflog entry for HEAD in
1476 * case it points to the old branch.
1478 if (append_head_reflog
) {
1479 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1480 logs
[logs_nr
] = logs
[logs_nr
- 1];
1481 logs
[logs_nr
].refname
= "HEAD";
1487 * Create the reflog entry for the newly created branch.
1489 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1490 memset(&logs
[logs_nr
], 0, sizeof(logs
[logs_nr
]));
1491 fill_reftable_log_record(&logs
[logs_nr
]);
1492 logs
[logs_nr
].refname
= (char *)arg
->newname
;
1493 logs
[logs_nr
].update_index
= creation_ts
;
1494 logs
[logs_nr
].value
.update
.message
=
1495 xstrndup(arg
->logmsg
, arg
->refs
->write_options
.block_size
/ 2);
1496 logs
[logs_nr
].value
.update
.new_hash
= old_ref
.value
.val1
;
1500 * In addition to writing the reflog entry for the new branch, we also
1501 * copy over all log entries from the old reflog. Last but not least,
1502 * when renaming we also have to delete all the old reflog entries.
1504 ret
= reftable_merged_table_seek_log(mt
, &it
, arg
->oldname
);
1509 ret
= reftable_iterator_next_log(&it
, &old_log
);
1512 if (ret
> 0 || strcmp(old_log
.refname
, arg
->oldname
)) {
1517 free(old_log
.refname
);
1520 * Copy over the old reflog entry with the new refname.
1522 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1523 logs
[logs_nr
] = old_log
;
1524 logs
[logs_nr
].refname
= (char *)arg
->newname
;
1528 * Delete the old reflog entry in case we are renaming.
1530 if (arg
->delete_old
) {
1531 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1532 memset(&logs
[logs_nr
], 0, sizeof(logs
[logs_nr
]));
1533 logs
[logs_nr
].refname
= (char *)arg
->oldname
;
1534 logs
[logs_nr
].value_type
= REFTABLE_LOG_DELETION
;
1535 logs
[logs_nr
].update_index
= old_log
.update_index
;
1540 * Transfer ownership of the log record we're iterating over to
1541 * the array of log records. Otherwise, the pointers would get
1542 * free'd or reallocated by the iterator.
1544 memset(&old_log
, 0, sizeof(old_log
));
1547 ret
= reftable_writer_add_logs(writer
, logs
, logs_nr
);
1552 assert(ret
!= REFTABLE_API_ERROR
);
1553 reftable_iterator_destroy(&it
);
1554 string_list_clear(&skip
, 0);
1555 strbuf_release(&errbuf
);
1556 for (i
= 0; i
< logs_nr
; i
++) {
1557 if (!strcmp(logs
[i
].refname
, "HEAD"))
1559 if (logs
[i
].value
.update
.old_hash
== old_ref
.value
.val1
)
1560 logs
[i
].value
.update
.old_hash
= NULL
;
1561 if (logs
[i
].value
.update
.new_hash
== old_ref
.value
.val1
)
1562 logs
[i
].value
.update
.new_hash
= NULL
;
1563 logs
[i
].refname
= NULL
;
1564 reftable_log_record_release(&logs
[i
]);
1567 reftable_ref_record_release(&old_ref
);
1568 reftable_log_record_release(&old_log
);
1572 static int reftable_be_rename_ref(struct ref_store
*ref_store
,
1573 const char *oldrefname
,
1574 const char *newrefname
,
1577 struct reftable_ref_store
*refs
=
1578 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "rename_ref");
1579 struct reftable_stack
*stack
= stack_for(refs
, newrefname
, &newrefname
);
1580 struct write_copy_arg arg
= {
1583 .oldname
= oldrefname
,
1584 .newname
= newrefname
,
1594 ret
= reftable_stack_reload(stack
);
1597 ret
= reftable_stack_add(stack
, &write_copy_table
, &arg
);
1600 assert(ret
!= REFTABLE_API_ERROR
);
1604 static int reftable_be_copy_ref(struct ref_store
*ref_store
,
1605 const char *oldrefname
,
1606 const char *newrefname
,
1609 struct reftable_ref_store
*refs
=
1610 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "copy_ref");
1611 struct reftable_stack
*stack
= stack_for(refs
, newrefname
, &newrefname
);
1612 struct write_copy_arg arg
= {
1615 .oldname
= oldrefname
,
1616 .newname
= newrefname
,
1625 ret
= reftable_stack_reload(stack
);
1628 ret
= reftable_stack_add(stack
, &write_copy_table
, &arg
);
1631 assert(ret
!= REFTABLE_API_ERROR
);
1635 struct reftable_reflog_iterator
{
1636 struct ref_iterator base
;
1637 struct reftable_ref_store
*refs
;
1638 struct reftable_iterator iter
;
1639 struct reftable_log_record log
;
1640 struct object_id oid
;
1645 static int reftable_reflog_iterator_advance(struct ref_iterator
*ref_iterator
)
1647 struct reftable_reflog_iterator
*iter
=
1648 (struct reftable_reflog_iterator
*)ref_iterator
;
1650 while (!iter
->err
) {
1653 iter
->err
= reftable_iterator_next_log(&iter
->iter
, &iter
->log
);
1658 * We want the refnames that we have reflogs for, so we skip if
1659 * we've already produced this name. This could be faster by
1660 * seeking directly to reflog@update_index==0.
1662 if (iter
->last_name
&& !strcmp(iter
->log
.refname
, iter
->last_name
))
1665 if (!refs_resolve_ref_unsafe(&iter
->refs
->base
, iter
->log
.refname
,
1666 0, &iter
->oid
, &flags
)) {
1667 error(_("bad ref for %s"), iter
->log
.refname
);
1671 free(iter
->last_name
);
1672 iter
->last_name
= xstrdup(iter
->log
.refname
);
1673 iter
->base
.refname
= iter
->log
.refname
;
1674 iter
->base
.oid
= &iter
->oid
;
1675 iter
->base
.flags
= flags
;
1680 if (iter
->err
> 0) {
1681 if (ref_iterator_abort(ref_iterator
) != ITER_DONE
)
1686 if (iter
->err
< 0) {
1687 ref_iterator_abort(ref_iterator
);
1694 static int reftable_reflog_iterator_peel(struct ref_iterator
*ref_iterator
,
1695 struct object_id
*peeled
)
1697 BUG("reftable reflog iterator cannot be peeled");
1701 static int reftable_reflog_iterator_abort(struct ref_iterator
*ref_iterator
)
1703 struct reftable_reflog_iterator
*iter
=
1704 (struct reftable_reflog_iterator
*)ref_iterator
;
1705 reftable_log_record_release(&iter
->log
);
1706 reftable_iterator_destroy(&iter
->iter
);
1707 free(iter
->last_name
);
1712 static struct ref_iterator_vtable reftable_reflog_iterator_vtable
= {
1713 .advance
= reftable_reflog_iterator_advance
,
1714 .peel
= reftable_reflog_iterator_peel
,
1715 .abort
= reftable_reflog_iterator_abort
1718 static struct reftable_reflog_iterator
*reflog_iterator_for_stack(struct reftable_ref_store
*refs
,
1719 struct reftable_stack
*stack
)
1721 struct reftable_merged_table
*merged_table
;
1722 struct reftable_reflog_iterator
*iter
;
1725 iter
= xcalloc(1, sizeof(*iter
));
1726 base_ref_iterator_init(&iter
->base
, &reftable_reflog_iterator_vtable
, 1);
1728 iter
->base
.oid
= &iter
->oid
;
1734 ret
= reftable_stack_reload(refs
->main_stack
);
1738 merged_table
= reftable_stack_merged_table(stack
);
1740 ret
= reftable_merged_table_seek_log(merged_table
, &iter
->iter
, "");
1749 static struct ref_iterator
*reftable_be_reflog_iterator_begin(struct ref_store
*ref_store
)
1751 struct reftable_ref_store
*refs
=
1752 reftable_be_downcast(ref_store
, REF_STORE_READ
, "reflog_iterator_begin");
1753 struct reftable_reflog_iterator
*main_iter
, *worktree_iter
;
1755 main_iter
= reflog_iterator_for_stack(refs
, refs
->main_stack
);
1756 if (!refs
->worktree_stack
)
1757 return &main_iter
->base
;
1759 worktree_iter
= reflog_iterator_for_stack(refs
, refs
->worktree_stack
);
1761 return merge_ref_iterator_begin(1, &worktree_iter
->base
, &main_iter
->base
,
1762 iterator_select
, NULL
);
1765 static int yield_log_record(struct reftable_log_record
*log
,
1766 each_reflog_ent_fn fn
,
1769 struct object_id old_oid
, new_oid
;
1770 const char *full_committer
;
1772 oidread(&old_oid
, log
->value
.update
.old_hash
);
1773 oidread(&new_oid
, log
->value
.update
.new_hash
);
1776 * When both the old object ID and the new object ID are null
1777 * then this is the reflog existence marker. The caller must
1778 * not be aware of it.
1780 if (is_null_oid(&old_oid
) && is_null_oid(&new_oid
))
1783 full_committer
= fmt_ident(log
->value
.update
.name
, log
->value
.update
.email
,
1784 WANT_COMMITTER_IDENT
, NULL
, IDENT_NO_DATE
);
1785 return fn(&old_oid
, &new_oid
, full_committer
,
1786 log
->value
.update
.time
, log
->value
.update
.tz_offset
,
1787 log
->value
.update
.message
, cb_data
);
1790 static int reftable_be_for_each_reflog_ent_reverse(struct ref_store
*ref_store
,
1791 const char *refname
,
1792 each_reflog_ent_fn fn
,
1795 struct reftable_ref_store
*refs
=
1796 reftable_be_downcast(ref_store
, REF_STORE_READ
, "for_each_reflog_ent_reverse");
1797 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1798 struct reftable_merged_table
*mt
= NULL
;
1799 struct reftable_log_record log
= {0};
1800 struct reftable_iterator it
= {0};
1806 mt
= reftable_stack_merged_table(stack
);
1807 ret
= reftable_merged_table_seek_log(mt
, &it
, refname
);
1809 ret
= reftable_iterator_next_log(&it
, &log
);
1812 if (ret
> 0 || strcmp(log
.refname
, refname
)) {
1817 ret
= yield_log_record(&log
, fn
, cb_data
);
1822 reftable_log_record_release(&log
);
1823 reftable_iterator_destroy(&it
);
1827 static int reftable_be_for_each_reflog_ent(struct ref_store
*ref_store
,
1828 const char *refname
,
1829 each_reflog_ent_fn fn
,
1832 struct reftable_ref_store
*refs
=
1833 reftable_be_downcast(ref_store
, REF_STORE_READ
, "for_each_reflog_ent");
1834 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1835 struct reftable_merged_table
*mt
= NULL
;
1836 struct reftable_log_record
*logs
= NULL
;
1837 struct reftable_iterator it
= {0};
1838 size_t logs_alloc
= 0, logs_nr
= 0, i
;
1844 mt
= reftable_stack_merged_table(stack
);
1845 ret
= reftable_merged_table_seek_log(mt
, &it
, refname
);
1847 struct reftable_log_record log
= {0};
1849 ret
= reftable_iterator_next_log(&it
, &log
);
1852 if (ret
> 0 || strcmp(log
.refname
, refname
)) {
1853 reftable_log_record_release(&log
);
1858 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1859 logs
[logs_nr
++] = log
;
1862 for (i
= logs_nr
; i
--;) {
1863 ret
= yield_log_record(&logs
[i
], fn
, cb_data
);
1869 reftable_iterator_destroy(&it
);
1870 for (i
= 0; i
< logs_nr
; i
++)
1871 reftable_log_record_release(&logs
[i
]);
1876 static int reftable_be_reflog_exists(struct ref_store
*ref_store
,
1877 const char *refname
)
1879 struct reftable_ref_store
*refs
=
1880 reftable_be_downcast(ref_store
, REF_STORE_READ
, "reflog_exists");
1881 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1882 struct reftable_merged_table
*mt
= reftable_stack_merged_table(stack
);
1883 struct reftable_log_record log
= {0};
1884 struct reftable_iterator it
= {0};
1891 ret
= reftable_stack_reload(stack
);
1895 ret
= reftable_merged_table_seek_log(mt
, &it
, refname
);
1900 * Check whether we get at least one log record for the given ref name.
1901 * If so, the reflog exists, otherwise it doesn't.
1903 ret
= reftable_iterator_next_log(&it
, &log
);
1911 ret
= strcmp(log
.refname
, refname
) == 0;
1914 reftable_iterator_destroy(&it
);
1915 reftable_log_record_release(&log
);
1921 struct write_reflog_existence_arg
{
1922 struct reftable_ref_store
*refs
;
1923 const char *refname
;
1924 struct reftable_stack
*stack
;
1927 static int write_reflog_existence_table(struct reftable_writer
*writer
,
1930 struct write_reflog_existence_arg
*arg
= cb_data
;
1931 uint64_t ts
= reftable_stack_next_update_index(arg
->stack
);
1932 struct reftable_log_record log
= {0};
1935 ret
= reftable_stack_read_log(arg
->stack
, arg
->refname
, &log
);
1939 reftable_writer_set_limits(writer
, ts
, ts
);
1942 * The existence entry has both old and new object ID set to the the
1943 * null object ID. Our iterators are aware of this and will not present
1944 * them to their callers.
1946 log
.refname
= xstrdup(arg
->refname
);
1947 log
.update_index
= ts
;
1948 log
.value_type
= REFTABLE_LOG_UPDATE
;
1949 ret
= reftable_writer_add_log(writer
, &log
);
1952 assert(ret
!= REFTABLE_API_ERROR
);
1953 reftable_log_record_release(&log
);
1957 static int reftable_be_create_reflog(struct ref_store
*ref_store
,
1958 const char *refname
,
1959 struct strbuf
*errmsg
)
1961 struct reftable_ref_store
*refs
=
1962 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "create_reflog");
1963 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1964 struct write_reflog_existence_arg arg
= {
1975 ret
= reftable_stack_reload(stack
);
1979 ret
= reftable_stack_add(stack
, &write_reflog_existence_table
, &arg
);
1985 struct write_reflog_delete_arg
{
1986 struct reftable_stack
*stack
;
1987 const char *refname
;
1990 static int write_reflog_delete_table(struct reftable_writer
*writer
, void *cb_data
)
1992 struct write_reflog_delete_arg
*arg
= cb_data
;
1993 struct reftable_merged_table
*mt
=
1994 reftable_stack_merged_table(arg
->stack
);
1995 struct reftable_log_record log
= {0}, tombstone
= {0};
1996 struct reftable_iterator it
= {0};
1997 uint64_t ts
= reftable_stack_next_update_index(arg
->stack
);
2000 reftable_writer_set_limits(writer
, ts
, ts
);
2003 * In order to delete a table we need to delete all reflog entries one
2004 * by one. This is inefficient, but the reftable format does not have a
2005 * better marker right now.
2007 ret
= reftable_merged_table_seek_log(mt
, &it
, arg
->refname
);
2009 ret
= reftable_iterator_next_log(&it
, &log
);
2012 if (ret
> 0 || strcmp(log
.refname
, arg
->refname
)) {
2017 tombstone
.refname
= (char *)arg
->refname
;
2018 tombstone
.value_type
= REFTABLE_LOG_DELETION
;
2019 tombstone
.update_index
= log
.update_index
;
2021 ret
= reftable_writer_add_log(writer
, &tombstone
);
2024 reftable_log_record_release(&log
);
2025 reftable_iterator_destroy(&it
);
2029 static int reftable_be_delete_reflog(struct ref_store
*ref_store
,
2030 const char *refname
)
2032 struct reftable_ref_store
*refs
=
2033 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "delete_reflog");
2034 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
2035 struct write_reflog_delete_arg arg
= {
2041 ret
= reftable_stack_reload(stack
);
2044 ret
= reftable_stack_add(stack
, &write_reflog_delete_table
, &arg
);
2046 assert(ret
!= REFTABLE_API_ERROR
);
2050 struct reflog_expiry_arg
{
2051 struct reftable_stack
*stack
;
2052 struct reftable_log_record
*records
;
2053 struct object_id update_oid
;
2054 const char *refname
;
2058 static int write_reflog_expiry_table(struct reftable_writer
*writer
, void *cb_data
)
2060 struct reflog_expiry_arg
*arg
= cb_data
;
2061 uint64_t ts
= reftable_stack_next_update_index(arg
->stack
);
2062 uint64_t live_records
= 0;
2066 for (i
= 0; i
< arg
->len
; i
++)
2067 if (arg
->records
[i
].value_type
== REFTABLE_LOG_UPDATE
)
2070 reftable_writer_set_limits(writer
, ts
, ts
);
2072 if (!is_null_oid(&arg
->update_oid
)) {
2073 struct reftable_ref_record ref
= {0};
2074 struct object_id peeled
;
2076 ref
.refname
= (char *)arg
->refname
;
2077 ref
.update_index
= ts
;
2079 if (!peel_object(&arg
->update_oid
, &peeled
)) {
2080 ref
.value_type
= REFTABLE_REF_VAL2
;
2081 memcpy(ref
.value
.val2
.target_value
, peeled
.hash
, GIT_MAX_RAWSZ
);
2082 memcpy(ref
.value
.val2
.value
, arg
->update_oid
.hash
, GIT_MAX_RAWSZ
);
2084 ref
.value_type
= REFTABLE_REF_VAL1
;
2085 memcpy(ref
.value
.val1
, arg
->update_oid
.hash
, GIT_MAX_RAWSZ
);
2088 ret
= reftable_writer_add_ref(writer
, &ref
);
2094 * When there are no more entries left in the reflog we empty it
2095 * completely, but write a placeholder reflog entry that indicates that
2096 * the reflog still exists.
2098 if (!live_records
) {
2099 struct reftable_log_record log
= {
2100 .refname
= (char *)arg
->refname
,
2101 .value_type
= REFTABLE_LOG_UPDATE
,
2105 ret
= reftable_writer_add_log(writer
, &log
);
2110 for (i
= 0; i
< arg
->len
; i
++) {
2111 ret
= reftable_writer_add_log(writer
, &arg
->records
[i
]);
2119 static int reftable_be_reflog_expire(struct ref_store
*ref_store
,
2120 const char *refname
,
2122 reflog_expiry_prepare_fn prepare_fn
,
2123 reflog_expiry_should_prune_fn should_prune_fn
,
2124 reflog_expiry_cleanup_fn cleanup_fn
,
2125 void *policy_cb_data
)
2128 * For log expiry, we write tombstones for every single reflog entry
2129 * that is to be expired. This means that the entries are still
2130 * retrievable by delving into the stack, and expiring entries
2131 * paradoxically takes extra memory. This memory is only reclaimed when
2132 * compacting the reftable stack.
2134 * It would be better if the refs backend supported an API that sets a
2135 * criterion for all refs, passing the criterion to pack_refs().
2137 * On the plus side, because we do the expiration per ref, we can easily
2138 * insert the reflog existence dummies.
2140 struct reftable_ref_store
*refs
=
2141 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "reflog_expire");
2142 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
2143 struct reftable_merged_table
*mt
= reftable_stack_merged_table(stack
);
2144 struct reftable_log_record
*logs
= NULL
;
2145 struct reftable_log_record
*rewritten
= NULL
;
2146 struct reftable_ref_record ref_record
= {0};
2147 struct reftable_iterator it
= {0};
2148 struct reftable_addition
*add
= NULL
;
2149 struct reflog_expiry_arg arg
= {0};
2150 struct object_id oid
= {0};
2151 uint8_t *last_hash
= NULL
;
2152 size_t logs_nr
= 0, logs_alloc
= 0, i
;
2158 ret
= reftable_stack_reload(stack
);
2162 ret
= reftable_merged_table_seek_log(mt
, &it
, refname
);
2166 ret
= reftable_stack_new_addition(&add
, stack
);
2170 ret
= reftable_stack_read_ref(stack
, refname
, &ref_record
);
2173 if (reftable_ref_record_val1(&ref_record
))
2174 oidread(&oid
, reftable_ref_record_val1(&ref_record
));
2175 prepare_fn(refname
, &oid
, policy_cb_data
);
2178 struct reftable_log_record log
= {0};
2179 struct object_id old_oid
, new_oid
;
2181 ret
= reftable_iterator_next_log(&it
, &log
);
2184 if (ret
> 0 || strcmp(log
.refname
, refname
)) {
2185 reftable_log_record_release(&log
);
2189 oidread(&old_oid
, log
.value
.update
.old_hash
);
2190 oidread(&new_oid
, log
.value
.update
.new_hash
);
2193 * Skip over the reflog existence marker. We will add it back
2194 * in when there are no live reflog records.
2196 if (is_null_oid(&old_oid
) && is_null_oid(&new_oid
)) {
2197 reftable_log_record_release(&log
);
2201 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
2202 logs
[logs_nr
++] = log
;
2206 * We need to rewrite all reflog entries according to the pruning
2207 * callback function:
2209 * - If a reflog entry shall be pruned we mark the record for
2212 * - Otherwise we may have to rewrite the chain of reflog entries so
2213 * that gaps created by just-deleted records get backfilled.
2215 CALLOC_ARRAY(rewritten
, logs_nr
);
2216 for (i
= logs_nr
; i
--;) {
2217 struct reftable_log_record
*dest
= &rewritten
[i
];
2218 struct object_id old_oid
, new_oid
;
2221 oidread(&old_oid
, logs
[i
].value
.update
.old_hash
);
2222 oidread(&new_oid
, logs
[i
].value
.update
.new_hash
);
2224 if (should_prune_fn(&old_oid
, &new_oid
, logs
[i
].value
.update
.email
,
2225 (timestamp_t
)logs
[i
].value
.update
.time
,
2226 logs
[i
].value
.update
.tz_offset
,
2227 logs
[i
].value
.update
.message
,
2229 dest
->value_type
= REFTABLE_LOG_DELETION
;
2231 if ((flags
& EXPIRE_REFLOGS_REWRITE
) && last_hash
)
2232 dest
->value
.update
.old_hash
= last_hash
;
2233 last_hash
= logs
[i
].value
.update
.new_hash
;
2237 if (flags
& EXPIRE_REFLOGS_UPDATE_REF
&& last_hash
&&
2238 reftable_ref_record_val1(&ref_record
))
2239 oidread(&arg
.update_oid
, last_hash
);
2241 arg
.records
= rewritten
;
2244 arg
.refname
= refname
,
2246 ret
= reftable_addition_add(add
, &write_reflog_expiry_table
, &arg
);
2251 * Future improvement: we could skip writing records that were
2254 if (!(flags
& EXPIRE_REFLOGS_DRY_RUN
))
2255 ret
= reftable_addition_commit(add
);
2259 cleanup_fn(policy_cb_data
);
2260 assert(ret
!= REFTABLE_API_ERROR
);
2262 reftable_ref_record_release(&ref_record
);
2263 reftable_iterator_destroy(&it
);
2264 reftable_addition_destroy(add
);
2265 for (i
= 0; i
< logs_nr
; i
++)
2266 reftable_log_record_release(&logs
[i
]);
2272 struct ref_storage_be refs_be_reftable
= {
2274 .init
= reftable_be_init
,
2275 .init_db
= reftable_be_init_db
,
2276 .transaction_prepare
= reftable_be_transaction_prepare
,
2277 .transaction_finish
= reftable_be_transaction_finish
,
2278 .transaction_abort
= reftable_be_transaction_abort
,
2279 .initial_transaction_commit
= reftable_be_initial_transaction_commit
,
2281 .pack_refs
= reftable_be_pack_refs
,
2282 .create_symref
= reftable_be_create_symref
,
2283 .rename_ref
= reftable_be_rename_ref
,
2284 .copy_ref
= reftable_be_copy_ref
,
2286 .iterator_begin
= reftable_be_iterator_begin
,
2287 .read_raw_ref
= reftable_be_read_raw_ref
,
2288 .read_symbolic_ref
= reftable_be_read_symbolic_ref
,
2290 .reflog_iterator_begin
= reftable_be_reflog_iterator_begin
,
2291 .for_each_reflog_ent
= reftable_be_for_each_reflog_ent
,
2292 .for_each_reflog_ent_reverse
= reftable_be_for_each_reflog_ent_reverse
,
2293 .reflog_exists
= reftable_be_reflog_exists
,
2294 .create_reflog
= reftable_be_create_reflog
,
2295 .delete_reflog
= reftable_be_delete_reflog
,
2296 .reflog_expire
= reftable_be_reflog_expire
,