1 #include "../git-compat-util.h"
2 #include "../abspath.h"
3 #include "../chdir-notify.h"
4 #include "../environment.h"
5 #include "../gettext.h"
8 #include "../iterator.h"
10 #include "../lockfile.h"
11 #include "../object.h"
14 #include "../reftable/reftable-stack.h"
15 #include "../reftable/reftable-record.h"
16 #include "../reftable/reftable-error.h"
17 #include "../reftable/reftable-iterator.h"
18 #include "../reftable/reftable-merged.h"
20 #include "../strmap.h"
21 #include "refs-internal.h"
24 * Used as a flag in ref_update::flags when the ref_update was via an
27 #define REF_UPDATE_VIA_HEAD (1 << 8)
29 struct reftable_ref_store
{
30 struct ref_store base
;
33 * The main stack refers to the common dir and thus contains common
34 * refs as well as refs of the main repository.
36 struct reftable_stack
*main_stack
;
38 * The worktree stack refers to the gitdir in case the refdb is opened
39 * via a worktree. It thus contains the per-worktree refs.
41 struct reftable_stack
*worktree_stack
;
43 * Map of worktree stacks by their respective worktree names. The map
44 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
46 struct strmap worktree_stacks
;
47 struct reftable_write_options write_options
;
49 unsigned int store_flags
;
54 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
55 * reftable_ref_store. required_flags is compared with ref_store's store_flags
56 * to ensure the ref_store has all required capabilities. "caller" is used in
57 * any necessary error messages.
59 static struct reftable_ref_store
*reftable_be_downcast(struct ref_store
*ref_store
,
60 unsigned int required_flags
,
63 struct reftable_ref_store
*refs
;
65 if (ref_store
->be
!= &refs_be_reftable
)
66 BUG("ref_store is type \"%s\" not \"reftables\" in %s",
67 ref_store
->be
->name
, caller
);
69 refs
= (struct reftable_ref_store
*)ref_store
;
71 if ((refs
->store_flags
& required_flags
) != required_flags
)
72 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
73 caller
, required_flags
, refs
->store_flags
);
79 * Some refs are global to the repository (refs/heads/{*}), while others are
80 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
81 * multiple separate databases (ie. multiple reftable/ directories), one for
82 * the shared refs, one for the current worktree refs, and one for each
83 * additional worktree. For reading, we merge the view of both the shared and
84 * the current worktree's refs, when necessary.
86 * This function also optionally assigns the rewritten reference name that is
87 * local to the stack. This translation is required when using worktree refs
88 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
89 * those references in their normalized form.
91 static struct reftable_stack
*stack_for(struct reftable_ref_store
*store
,
93 const char **rewritten_ref
)
99 return store
->main_stack
;
101 switch (parse_worktree_ref(refname
, &wtname
, &wtname_len
, rewritten_ref
)) {
102 case REF_WORKTREE_OTHER
: {
103 static struct strbuf wtname_buf
= STRBUF_INIT
;
104 struct strbuf wt_dir
= STRBUF_INIT
;
105 struct reftable_stack
*stack
;
108 * We're using a static buffer here so that we don't need to
109 * allocate the worktree name whenever we look up a reference.
110 * This could be avoided if the strmap interface knew how to
111 * handle keys with a length.
113 strbuf_reset(&wtname_buf
);
114 strbuf_add(&wtname_buf
, wtname
, wtname_len
);
117 * There is an edge case here: when the worktree references the
118 * current worktree, then we set up the stack once via
119 * `worktree_stacks` and once via `worktree_stack`. This is
120 * wasteful, but in the reading case it shouldn't matter. And
121 * in the writing case we would notice that the stack is locked
122 * already and error out when trying to write a reference via
125 stack
= strmap_get(&store
->worktree_stacks
, wtname_buf
.buf
);
127 strbuf_addf(&wt_dir
, "%s/worktrees/%s/reftable",
128 store
->base
.repo
->commondir
, wtname_buf
.buf
);
130 store
->err
= reftable_new_stack(&stack
, wt_dir
.buf
,
131 store
->write_options
);
132 assert(store
->err
!= REFTABLE_API_ERROR
);
133 strmap_put(&store
->worktree_stacks
, wtname_buf
.buf
, stack
);
136 strbuf_release(&wt_dir
);
139 case REF_WORKTREE_CURRENT
:
141 * If there is no worktree stack then we're currently in the
142 * main worktree. We thus return the main stack in that case.
144 if (!store
->worktree_stack
)
145 return store
->main_stack
;
146 return store
->worktree_stack
;
147 case REF_WORKTREE_MAIN
:
148 case REF_WORKTREE_SHARED
:
149 return store
->main_stack
;
151 BUG("unhandled worktree reference type");
155 static int should_write_log(struct ref_store
*refs
, const char *refname
)
157 if (log_all_ref_updates
== LOG_REFS_UNSET
)
158 log_all_ref_updates
= is_bare_repository() ? LOG_REFS_NONE
: LOG_REFS_NORMAL
;
160 switch (log_all_ref_updates
) {
162 return refs_reflog_exists(refs
, refname
);
163 case LOG_REFS_ALWAYS
:
165 case LOG_REFS_NORMAL
:
166 if (should_autocreate_reflog(refname
))
168 return refs_reflog_exists(refs
, refname
);
170 BUG("unhandled core.logAllRefUpdates value %d", log_all_ref_updates
);
174 static void clear_reftable_log_record(struct reftable_log_record
*log
)
176 switch (log
->value_type
) {
177 case REFTABLE_LOG_UPDATE
:
179 * When we write log records, the hashes are owned by the
180 * caller and thus shouldn't be free'd.
182 log
->value
.update
.old_hash
= NULL
;
183 log
->value
.update
.new_hash
= NULL
;
185 case REFTABLE_LOG_DELETION
:
188 reftable_log_record_release(log
);
191 static void fill_reftable_log_record(struct reftable_log_record
*log
)
193 const char *info
= git_committer_info(0);
194 struct ident_split split
= {0};
197 if (split_ident_line(&split
, info
, strlen(info
)))
198 BUG("failed splitting committer info");
200 reftable_log_record_release(log
);
201 log
->value_type
= REFTABLE_LOG_UPDATE
;
202 log
->value
.update
.name
=
203 xstrndup(split
.name_begin
, split
.name_end
- split
.name_begin
);
204 log
->value
.update
.email
=
205 xstrndup(split
.mail_begin
, split
.mail_end
- split
.mail_begin
);
206 log
->value
.update
.time
= atol(split
.date_begin
);
207 if (*split
.tz_begin
== '-') {
211 if (*split
.tz_begin
== '+') {
216 log
->value
.update
.tz_offset
= sign
* atoi(split
.tz_begin
);
219 static int read_ref_without_reload(struct reftable_stack
*stack
,
221 struct object_id
*oid
,
222 struct strbuf
*referent
,
225 struct reftable_ref_record ref
= {0};
228 ret
= reftable_stack_read_ref(stack
, refname
, &ref
);
232 if (ref
.value_type
== REFTABLE_REF_SYMREF
) {
233 strbuf_reset(referent
);
234 strbuf_addstr(referent
, ref
.value
.symref
);
235 *type
|= REF_ISSYMREF
;
236 } else if (reftable_ref_record_val1(&ref
)) {
237 oidread(oid
, reftable_ref_record_val1(&ref
));
239 /* We got a tombstone, which should not happen. */
240 BUG("unhandled reference value type %d", ref
.value_type
);
244 assert(ret
!= REFTABLE_API_ERROR
);
245 reftable_ref_record_release(&ref
);
249 static struct ref_store
*reftable_be_init(struct repository
*repo
,
251 unsigned int store_flags
)
253 struct reftable_ref_store
*refs
= xcalloc(1, sizeof(*refs
));
254 struct strbuf path
= STRBUF_INIT
;
261 base_ref_store_init(&refs
->base
, repo
, gitdir
, &refs_be_reftable
);
262 strmap_init(&refs
->worktree_stacks
);
263 refs
->store_flags
= store_flags
;
264 refs
->write_options
.block_size
= 4096;
265 refs
->write_options
.hash_id
= repo
->hash_algo
->format_id
;
266 refs
->write_options
.default_permissions
= calc_shared_perm(0666 & ~mask
);
269 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
270 * This stack contains both the shared and the main worktree refs.
272 * Note that we don't try to resolve the path in case we have a
273 * worktree because `get_common_dir_noenv()` already does it for us.
275 is_worktree
= get_common_dir_noenv(&path
, gitdir
);
278 strbuf_realpath(&path
, gitdir
, 0);
280 strbuf_addstr(&path
, "/reftable");
281 refs
->err
= reftable_new_stack(&refs
->main_stack
, path
.buf
,
282 refs
->write_options
);
287 * If we're in a worktree we also need to set up the worktree reftable
288 * stack that is contained in the per-worktree GIT_DIR.
290 * Ideally, we would also add the stack to our worktree stack map. But
291 * we have no way to figure out the worktree name here and thus can't
296 strbuf_addf(&path
, "%s/reftable", gitdir
);
298 refs
->err
= reftable_new_stack(&refs
->worktree_stack
, path
.buf
,
299 refs
->write_options
);
304 chdir_notify_reparent("reftables-backend $GIT_DIR", &refs
->base
.gitdir
);
307 assert(refs
->err
!= REFTABLE_API_ERROR
);
308 strbuf_release(&path
);
312 static int reftable_be_init_db(struct ref_store
*ref_store
,
314 struct strbuf
*err UNUSED
)
316 struct reftable_ref_store
*refs
=
317 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "init_db");
318 struct strbuf sb
= STRBUF_INIT
;
320 strbuf_addf(&sb
, "%s/reftable", refs
->base
.gitdir
);
321 safe_create_dir(sb
.buf
, 1);
324 strbuf_addf(&sb
, "%s/HEAD", refs
->base
.gitdir
);
325 write_file(sb
.buf
, "ref: refs/heads/.invalid");
326 adjust_shared_perm(sb
.buf
);
329 strbuf_addf(&sb
, "%s/refs", refs
->base
.gitdir
);
330 safe_create_dir(sb
.buf
, 1);
333 strbuf_addf(&sb
, "%s/refs/heads", refs
->base
.gitdir
);
334 write_file(sb
.buf
, "this repository uses the reftable format");
335 adjust_shared_perm(sb
.buf
);
341 struct reftable_ref_iterator
{
342 struct ref_iterator base
;
343 struct reftable_ref_store
*refs
;
344 struct reftable_iterator iter
;
345 struct reftable_ref_record ref
;
346 struct object_id oid
;
353 static int reftable_ref_iterator_advance(struct ref_iterator
*ref_iterator
)
355 struct reftable_ref_iterator
*iter
=
356 (struct reftable_ref_iterator
*)ref_iterator
;
357 struct reftable_ref_store
*refs
= iter
->refs
;
362 iter
->err
= reftable_iterator_next_ref(&iter
->iter
, &iter
->ref
);
367 * The files backend only lists references contained in "refs/" unless
368 * the root refs are to be included. We emulate the same behaviour here.
370 if (!starts_with(iter
->ref
.refname
, "refs/") &&
371 !(iter
->flags
& DO_FOR_EACH_INCLUDE_ROOT_REFS
&&
372 (is_pseudoref(&iter
->refs
->base
, iter
->ref
.refname
) ||
373 is_headref(&iter
->refs
->base
, iter
->ref
.refname
)))) {
378 strncmp(iter
->prefix
, iter
->ref
.refname
, strlen(iter
->prefix
))) {
383 if (iter
->flags
& DO_FOR_EACH_PER_WORKTREE_ONLY
&&
384 parse_worktree_ref(iter
->ref
.refname
, NULL
, NULL
, NULL
) !=
385 REF_WORKTREE_CURRENT
)
388 switch (iter
->ref
.value_type
) {
389 case REFTABLE_REF_VAL1
:
390 oidread(&iter
->oid
, iter
->ref
.value
.val1
);
392 case REFTABLE_REF_VAL2
:
393 oidread(&iter
->oid
, iter
->ref
.value
.val2
.value
);
395 case REFTABLE_REF_SYMREF
:
396 if (!refs_resolve_ref_unsafe(&iter
->refs
->base
, iter
->ref
.refname
,
397 RESOLVE_REF_READING
, &iter
->oid
, &flags
))
401 BUG("unhandled reference value type %d", iter
->ref
.value_type
);
404 if (is_null_oid(&iter
->oid
))
405 flags
|= REF_ISBROKEN
;
407 if (check_refname_format(iter
->ref
.refname
, REFNAME_ALLOW_ONELEVEL
)) {
408 if (!refname_is_safe(iter
->ref
.refname
))
409 die(_("refname is dangerous: %s"), iter
->ref
.refname
);
411 flags
|= REF_BAD_NAME
| REF_ISBROKEN
;
414 if (iter
->flags
& DO_FOR_EACH_OMIT_DANGLING_SYMREFS
&&
415 flags
& REF_ISSYMREF
&&
416 flags
& REF_ISBROKEN
)
419 if (!(iter
->flags
& DO_FOR_EACH_INCLUDE_BROKEN
) &&
420 !ref_resolves_to_object(iter
->ref
.refname
, refs
->base
.repo
,
424 iter
->base
.refname
= iter
->ref
.refname
;
425 iter
->base
.oid
= &iter
->oid
;
426 iter
->base
.flags
= flags
;
432 if (ref_iterator_abort(ref_iterator
) != ITER_DONE
)
438 ref_iterator_abort(ref_iterator
);
445 static int reftable_ref_iterator_peel(struct ref_iterator
*ref_iterator
,
446 struct object_id
*peeled
)
448 struct reftable_ref_iterator
*iter
=
449 (struct reftable_ref_iterator
*)ref_iterator
;
451 if (iter
->ref
.value_type
== REFTABLE_REF_VAL2
) {
452 oidread(peeled
, iter
->ref
.value
.val2
.target_value
);
459 static int reftable_ref_iterator_abort(struct ref_iterator
*ref_iterator
)
461 struct reftable_ref_iterator
*iter
=
462 (struct reftable_ref_iterator
*)ref_iterator
;
463 reftable_ref_record_release(&iter
->ref
);
464 reftable_iterator_destroy(&iter
->iter
);
469 static struct ref_iterator_vtable reftable_ref_iterator_vtable
= {
470 .advance
= reftable_ref_iterator_advance
,
471 .peel
= reftable_ref_iterator_peel
,
472 .abort
= reftable_ref_iterator_abort
475 static struct reftable_ref_iterator
*ref_iterator_for_stack(struct reftable_ref_store
*refs
,
476 struct reftable_stack
*stack
,
480 struct reftable_merged_table
*merged_table
;
481 struct reftable_ref_iterator
*iter
;
484 iter
= xcalloc(1, sizeof(*iter
));
485 base_ref_iterator_init(&iter
->base
, &reftable_ref_iterator_vtable
);
486 iter
->prefix
= prefix
;
487 iter
->base
.oid
= &iter
->oid
;
495 ret
= reftable_stack_reload(stack
);
499 merged_table
= reftable_stack_merged_table(stack
);
501 ret
= reftable_merged_table_seek_ref(merged_table
, &iter
->iter
, prefix
);
510 static struct ref_iterator
*reftable_be_iterator_begin(struct ref_store
*ref_store
,
512 const char **exclude_patterns
,
515 struct reftable_ref_iterator
*main_iter
, *worktree_iter
;
516 struct reftable_ref_store
*refs
;
517 unsigned int required_flags
= REF_STORE_READ
;
519 if (!(flags
& DO_FOR_EACH_INCLUDE_BROKEN
))
520 required_flags
|= REF_STORE_ODB
;
521 refs
= reftable_be_downcast(ref_store
, required_flags
, "ref_iterator_begin");
523 main_iter
= ref_iterator_for_stack(refs
, refs
->main_stack
, prefix
, flags
);
526 * The worktree stack is only set when we're in an actual worktree
527 * right now. If we aren't, then we return the common reftable
530 if (!refs
->worktree_stack
)
531 return &main_iter
->base
;
534 * Otherwise we merge both the common and the per-worktree refs into a
537 worktree_iter
= ref_iterator_for_stack(refs
, refs
->worktree_stack
, prefix
, flags
);
538 return merge_ref_iterator_begin(&worktree_iter
->base
, &main_iter
->base
,
539 ref_iterator_select
, NULL
);
542 static int reftable_be_read_raw_ref(struct ref_store
*ref_store
,
544 struct object_id
*oid
,
545 struct strbuf
*referent
,
549 struct reftable_ref_store
*refs
=
550 reftable_be_downcast(ref_store
, REF_STORE_READ
, "read_raw_ref");
551 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
557 ret
= reftable_stack_reload(stack
);
561 ret
= read_ref_without_reload(stack
, refname
, oid
, referent
, type
);
565 *failure_errno
= ENOENT
;
572 static int reftable_be_read_symbolic_ref(struct ref_store
*ref_store
,
574 struct strbuf
*referent
)
576 struct reftable_ref_store
*refs
=
577 reftable_be_downcast(ref_store
, REF_STORE_READ
, "read_symbolic_ref");
578 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
579 struct reftable_ref_record ref
= {0};
582 ret
= reftable_stack_reload(stack
);
586 ret
= reftable_stack_read_ref(stack
, refname
, &ref
);
587 if (ret
== 0 && ref
.value_type
== REFTABLE_REF_SYMREF
)
588 strbuf_addstr(referent
, ref
.value
.symref
);
592 reftable_ref_record_release(&ref
);
597 * Return the refname under which update was originally requested.
599 static const char *original_update_refname(struct ref_update
*update
)
601 while (update
->parent_update
)
602 update
= update
->parent_update
;
603 return update
->refname
;
606 struct reftable_transaction_update
{
607 struct ref_update
*update
;
608 struct object_id current_oid
;
611 struct write_transaction_table_arg
{
612 struct reftable_ref_store
*refs
;
613 struct reftable_stack
*stack
;
614 struct reftable_addition
*addition
;
615 struct reftable_transaction_update
*updates
;
617 size_t updates_alloc
;
618 size_t updates_expected
;
621 struct reftable_transaction_data
{
622 struct write_transaction_table_arg
*args
;
623 size_t args_nr
, args_alloc
;
626 static void free_transaction_data(struct reftable_transaction_data
*tx_data
)
630 for (size_t i
= 0; i
< tx_data
->args_nr
; i
++) {
631 reftable_addition_destroy(tx_data
->args
[i
].addition
);
632 free(tx_data
->args
[i
].updates
);
639 * Prepare transaction update for the given reference update. This will cause
640 * us to lock the corresponding reftable stack for concurrent modification.
642 static int prepare_transaction_update(struct write_transaction_table_arg
**out
,
643 struct reftable_ref_store
*refs
,
644 struct reftable_transaction_data
*tx_data
,
645 struct ref_update
*update
,
648 struct reftable_stack
*stack
= stack_for(refs
, update
->refname
, NULL
);
649 struct write_transaction_table_arg
*arg
= NULL
;
654 * Search for a preexisting stack update. If there is one then we add
655 * the update to it, otherwise we set up a new stack update.
657 for (i
= 0; !arg
&& i
< tx_data
->args_nr
; i
++)
658 if (tx_data
->args
[i
].stack
== stack
)
659 arg
= &tx_data
->args
[i
];
662 struct reftable_addition
*addition
;
664 ret
= reftable_stack_reload(stack
);
668 ret
= reftable_stack_new_addition(&addition
, stack
);
670 if (ret
== REFTABLE_LOCK_ERROR
)
671 strbuf_addstr(err
, "cannot lock references");
675 ALLOC_GROW(tx_data
->args
, tx_data
->args_nr
+ 1,
676 tx_data
->args_alloc
);
677 arg
= &tx_data
->args
[tx_data
->args_nr
++];
680 arg
->addition
= addition
;
683 arg
->updates_alloc
= 0;
684 arg
->updates_expected
= 0;
687 arg
->updates_expected
++;
696 * Queue a reference update for the correct stack. We potentially need to
697 * handle multiple stack updates in a single transaction when it spans across
698 * multiple worktrees.
700 static int queue_transaction_update(struct reftable_ref_store
*refs
,
701 struct reftable_transaction_data
*tx_data
,
702 struct ref_update
*update
,
703 struct object_id
*current_oid
,
706 struct write_transaction_table_arg
*arg
= NULL
;
709 if (update
->backend_data
)
710 BUG("reference update queued more than once");
712 ret
= prepare_transaction_update(&arg
, refs
, tx_data
, update
, err
);
716 ALLOC_GROW(arg
->updates
, arg
->updates_nr
+ 1,
718 arg
->updates
[arg
->updates_nr
].update
= update
;
719 oidcpy(&arg
->updates
[arg
->updates_nr
].current_oid
, current_oid
);
720 update
->backend_data
= &arg
->updates
[arg
->updates_nr
++];
725 static int reftable_be_transaction_prepare(struct ref_store
*ref_store
,
726 struct ref_transaction
*transaction
,
729 struct reftable_ref_store
*refs
=
730 reftable_be_downcast(ref_store
, REF_STORE_WRITE
|REF_STORE_MAIN
, "ref_transaction_prepare");
731 struct strbuf referent
= STRBUF_INIT
, head_referent
= STRBUF_INIT
;
732 struct string_list affected_refnames
= STRING_LIST_INIT_NODUP
;
733 struct reftable_transaction_data
*tx_data
= NULL
;
734 struct object_id head_oid
;
735 unsigned int head_type
= 0;
743 tx_data
= xcalloc(1, sizeof(*tx_data
));
746 * Preprocess all updates. For one we check that there are no duplicate
747 * reference updates in this transaction. Second, we lock all stacks
748 * that will be modified during the transaction.
750 for (i
= 0; i
< transaction
->nr
; i
++) {
751 ret
= prepare_transaction_update(NULL
, refs
, tx_data
,
752 transaction
->updates
[i
], err
);
756 string_list_append(&affected_refnames
,
757 transaction
->updates
[i
]->refname
);
761 * Now that we have counted updates per stack we can preallocate their
762 * arrays. This avoids having to reallocate many times.
764 for (i
= 0; i
< tx_data
->args_nr
; i
++) {
765 CALLOC_ARRAY(tx_data
->args
[i
].updates
, tx_data
->args
[i
].updates_expected
);
766 tx_data
->args
[i
].updates_alloc
= tx_data
->args
[i
].updates_expected
;
770 * Fail if a refname appears more than once in the transaction.
771 * This code is taken from the files backend and is a good candidate to
772 * be moved into the generic layer.
774 string_list_sort(&affected_refnames
);
775 if (ref_update_reject_duplicates(&affected_refnames
, err
)) {
776 ret
= TRANSACTION_GENERIC_ERROR
;
780 ret
= read_ref_without_reload(stack_for(refs
, "HEAD", NULL
), "HEAD", &head_oid
,
781 &head_referent
, &head_type
);
785 for (i
= 0; i
< transaction
->nr
; i
++) {
786 struct ref_update
*u
= transaction
->updates
[i
];
787 struct object_id current_oid
= {0};
788 struct reftable_stack
*stack
;
789 const char *rewritten_ref
;
791 stack
= stack_for(refs
, u
->refname
, &rewritten_ref
);
793 /* Verify that the new object ID is valid. */
794 if ((u
->flags
& REF_HAVE_NEW
) && !is_null_oid(&u
->new_oid
) &&
795 !(u
->flags
& REF_SKIP_OID_VERIFICATION
) &&
796 !(u
->flags
& REF_LOG_ONLY
)) {
797 struct object
*o
= parse_object(refs
->base
.repo
, &u
->new_oid
);
800 _("trying to write ref '%s' with nonexistent object %s"),
801 u
->refname
, oid_to_hex(&u
->new_oid
));
806 if (o
->type
!= OBJ_COMMIT
&& is_branch(u
->refname
)) {
807 strbuf_addf(err
, _("trying to write non-commit object %s to branch '%s'"),
808 oid_to_hex(&u
->new_oid
), u
->refname
);
815 * When we update the reference that HEAD points to we enqueue
816 * a second log-only update for HEAD so that its reflog is
817 * updated accordingly.
819 if (head_type
== REF_ISSYMREF
&&
820 !(u
->flags
& REF_LOG_ONLY
) &&
821 !(u
->flags
& REF_UPDATE_VIA_HEAD
) &&
822 !strcmp(rewritten_ref
, head_referent
.buf
)) {
823 struct ref_update
*new_update
;
826 * First make sure that HEAD is not already in the
827 * transaction. This check is O(lg N) in the transaction
828 * size, but it happens at most once per transaction.
830 if (string_list_has_string(&affected_refnames
, "HEAD")) {
831 /* An entry already existed */
833 _("multiple updates for 'HEAD' (including one "
834 "via its referent '%s') are not allowed"),
836 ret
= TRANSACTION_NAME_CONFLICT
;
840 new_update
= ref_transaction_add_update(
842 u
->flags
| REF_LOG_ONLY
| REF_NO_DEREF
,
843 &u
->new_oid
, &u
->old_oid
, u
->msg
);
844 string_list_insert(&affected_refnames
, new_update
->refname
);
847 ret
= read_ref_without_reload(stack
, rewritten_ref
,
848 ¤t_oid
, &referent
, &u
->type
);
851 if (ret
> 0 && (!(u
->flags
& REF_HAVE_OLD
) || is_null_oid(&u
->old_oid
))) {
853 * The reference does not exist, and we either have no
854 * old object ID or expect the reference to not exist.
855 * We can thus skip below safety checks as well as the
856 * symref splitting. But we do want to verify that
857 * there is no conflicting reference here so that we
858 * can output a proper error message instead of failing
861 ret
= refs_verify_refname_available(ref_store
, u
->refname
,
862 &affected_refnames
, NULL
, err
);
867 * There is no need to write the reference deletion
868 * when the reference in question doesn't exist.
870 if (u
->flags
& REF_HAVE_NEW
&& !is_null_oid(&u
->new_oid
)) {
871 ret
= queue_transaction_update(refs
, tx_data
, u
,
880 /* The reference does not exist, but we expected it to. */
881 strbuf_addf(err
, _("cannot lock ref '%s': "
882 "unable to resolve reference '%s'"),
883 original_update_refname(u
), u
->refname
);
888 if (u
->type
& REF_ISSYMREF
) {
890 * The reftable stack is locked at this point already,
891 * so it is safe to call `refs_resolve_ref_unsafe()`
892 * here without causing races.
894 const char *resolved
= refs_resolve_ref_unsafe(&refs
->base
, u
->refname
, 0,
897 if (u
->flags
& REF_NO_DEREF
) {
898 if (u
->flags
& REF_HAVE_OLD
&& !resolved
) {
899 strbuf_addf(err
, _("cannot lock ref '%s': "
900 "error reading reference"), u
->refname
);
905 struct ref_update
*new_update
;
908 new_flags
= u
->flags
;
909 if (!strcmp(rewritten_ref
, "HEAD"))
910 new_flags
|= REF_UPDATE_VIA_HEAD
;
913 * If we are updating a symref (eg. HEAD), we should also
914 * update the branch that the symref points to.
916 * This is generic functionality, and would be better
917 * done in refs.c, but the current implementation is
918 * intertwined with the locking in files-backend.c.
920 new_update
= ref_transaction_add_update(
921 transaction
, referent
.buf
, new_flags
,
922 &u
->new_oid
, &u
->old_oid
, u
->msg
);
923 new_update
->parent_update
= u
;
926 * Change the symbolic ref update to log only. Also, it
927 * doesn't need to check its old OID value, as that will be
928 * done when new_update is processed.
930 u
->flags
|= REF_LOG_ONLY
| REF_NO_DEREF
;
931 u
->flags
&= ~REF_HAVE_OLD
;
933 if (string_list_has_string(&affected_refnames
, new_update
->refname
)) {
935 _("multiple updates for '%s' (including one "
936 "via symref '%s') are not allowed"),
937 referent
.buf
, u
->refname
);
938 ret
= TRANSACTION_NAME_CONFLICT
;
941 string_list_insert(&affected_refnames
, new_update
->refname
);
946 * Verify that the old object matches our expectations. Note
947 * that the error messages here do not make a lot of sense in
948 * the context of the reftable backend as we never lock
949 * individual refs. But the error messages match what the files
950 * backend returns, which keeps our tests happy.
952 if (u
->flags
& REF_HAVE_OLD
&& !oideq(¤t_oid
, &u
->old_oid
)) {
953 if (is_null_oid(&u
->old_oid
))
954 strbuf_addf(err
, _("cannot lock ref '%s': "
955 "reference already exists"),
956 original_update_refname(u
));
957 else if (is_null_oid(¤t_oid
))
958 strbuf_addf(err
, _("cannot lock ref '%s': "
959 "reference is missing but expected %s"),
960 original_update_refname(u
),
961 oid_to_hex(&u
->old_oid
));
963 strbuf_addf(err
, _("cannot lock ref '%s': "
964 "is at %s but expected %s"),
965 original_update_refname(u
),
966 oid_to_hex(¤t_oid
),
967 oid_to_hex(&u
->old_oid
));
973 * If all of the following conditions are true:
975 * - We're not about to write a symref.
976 * - We're not about to write a log-only entry.
977 * - Old and new object ID are different.
979 * Then we're essentially doing a no-op update that can be
980 * skipped. This is not only for the sake of efficiency, but
981 * also skips writing unneeded reflog entries.
983 if ((u
->type
& REF_ISSYMREF
) ||
984 (u
->flags
& REF_LOG_ONLY
) ||
985 (u
->flags
& REF_HAVE_NEW
&& !oideq(¤t_oid
, &u
->new_oid
))) {
986 ret
= queue_transaction_update(refs
, tx_data
, u
,
993 transaction
->backend_data
= tx_data
;
994 transaction
->state
= REF_TRANSACTION_PREPARED
;
997 assert(ret
!= REFTABLE_API_ERROR
);
999 free_transaction_data(tx_data
);
1000 transaction
->state
= REF_TRANSACTION_CLOSED
;
1002 strbuf_addf(err
, _("reftable: transaction prepare: %s"),
1003 reftable_error_str(ret
));
1005 string_list_clear(&affected_refnames
, 0);
1006 strbuf_release(&referent
);
1007 strbuf_release(&head_referent
);
1012 static int reftable_be_transaction_abort(struct ref_store
*ref_store
,
1013 struct ref_transaction
*transaction
,
1016 struct reftable_transaction_data
*tx_data
= transaction
->backend_data
;
1017 free_transaction_data(tx_data
);
1018 transaction
->state
= REF_TRANSACTION_CLOSED
;
1022 static int transaction_update_cmp(const void *a
, const void *b
)
1024 return strcmp(((struct reftable_transaction_update
*)a
)->update
->refname
,
1025 ((struct reftable_transaction_update
*)b
)->update
->refname
);
1028 static int write_transaction_table(struct reftable_writer
*writer
, void *cb_data
)
1030 struct write_transaction_table_arg
*arg
= cb_data
;
1031 struct reftable_merged_table
*mt
=
1032 reftable_stack_merged_table(arg
->stack
);
1033 uint64_t ts
= reftable_stack_next_update_index(arg
->stack
);
1034 struct reftable_log_record
*logs
= NULL
;
1035 size_t logs_nr
= 0, logs_alloc
= 0, i
;
1038 QSORT(arg
->updates
, arg
->updates_nr
, transaction_update_cmp
);
1040 reftable_writer_set_limits(writer
, ts
, ts
);
1042 for (i
= 0; i
< arg
->updates_nr
; i
++) {
1043 struct reftable_transaction_update
*tx_update
= &arg
->updates
[i
];
1044 struct ref_update
*u
= tx_update
->update
;
1047 * Write a reflog entry when updating a ref to point to
1048 * something new in either of the following cases:
1050 * - The reference is about to be deleted. We always want to
1051 * delete the reflog in that case.
1052 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1054 * - `core.logAllRefUpdates` tells us to create the reflog for
1057 if (u
->flags
& REF_HAVE_NEW
&& !(u
->type
& REF_ISSYMREF
) && is_null_oid(&u
->new_oid
)) {
1058 struct reftable_log_record log
= {0};
1059 struct reftable_iterator it
= {0};
1062 * When deleting refs we also delete all reflog entries
1063 * with them. While it is not strictly required to
1064 * delete reflogs together with their refs, this
1065 * matches the behaviour of the files backend.
1067 * Unfortunately, we have no better way than to delete
1068 * all reflog entries one by one.
1070 ret
= reftable_merged_table_seek_log(mt
, &it
, u
->refname
);
1072 struct reftable_log_record
*tombstone
;
1074 ret
= reftable_iterator_next_log(&it
, &log
);
1077 if (ret
> 0 || strcmp(log
.refname
, u
->refname
)) {
1082 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1083 tombstone
= &logs
[logs_nr
++];
1084 tombstone
->refname
= xstrdup(u
->refname
);
1085 tombstone
->value_type
= REFTABLE_LOG_DELETION
;
1086 tombstone
->update_index
= log
.update_index
;
1089 reftable_log_record_release(&log
);
1090 reftable_iterator_destroy(&it
);
1094 } else if (u
->flags
& REF_HAVE_NEW
&&
1095 (u
->flags
& REF_FORCE_CREATE_REFLOG
||
1096 should_write_log(&arg
->refs
->base
, u
->refname
))) {
1097 struct reftable_log_record
*log
;
1099 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1100 log
= &logs
[logs_nr
++];
1101 memset(log
, 0, sizeof(*log
));
1103 fill_reftable_log_record(log
);
1104 log
->update_index
= ts
;
1105 log
->refname
= xstrdup(u
->refname
);
1106 log
->value
.update
.new_hash
= u
->new_oid
.hash
;
1107 log
->value
.update
.old_hash
= tx_update
->current_oid
.hash
;
1108 log
->value
.update
.message
=
1109 xstrndup(u
->msg
, arg
->refs
->write_options
.block_size
/ 2);
1112 if (u
->flags
& REF_LOG_ONLY
)
1115 if (u
->flags
& REF_HAVE_NEW
&& is_null_oid(&u
->new_oid
)) {
1116 struct reftable_ref_record ref
= {
1117 .refname
= (char *)u
->refname
,
1119 .value_type
= REFTABLE_REF_DELETION
,
1122 ret
= reftable_writer_add_ref(writer
, &ref
);
1125 } else if (u
->flags
& REF_HAVE_NEW
) {
1126 struct reftable_ref_record ref
= {0};
1127 struct object_id peeled
;
1130 ref
.refname
= (char *)u
->refname
;
1131 ref
.update_index
= ts
;
1133 peel_error
= peel_object(&u
->new_oid
, &peeled
);
1135 ref
.value_type
= REFTABLE_REF_VAL2
;
1136 memcpy(ref
.value
.val2
.target_value
, peeled
.hash
, GIT_MAX_RAWSZ
);
1137 memcpy(ref
.value
.val2
.value
, u
->new_oid
.hash
, GIT_MAX_RAWSZ
);
1138 } else if (!is_null_oid(&u
->new_oid
)) {
1139 ref
.value_type
= REFTABLE_REF_VAL1
;
1140 memcpy(ref
.value
.val1
, u
->new_oid
.hash
, GIT_MAX_RAWSZ
);
1143 ret
= reftable_writer_add_ref(writer
, &ref
);
1150 * Logs are written at the end so that we do not have intermixed ref
1154 ret
= reftable_writer_add_logs(writer
, logs
, logs_nr
);
1160 assert(ret
!= REFTABLE_API_ERROR
);
1161 for (i
= 0; i
< logs_nr
; i
++)
1162 clear_reftable_log_record(&logs
[i
]);
1167 static int reftable_be_transaction_finish(struct ref_store
*ref_store
,
1168 struct ref_transaction
*transaction
,
1171 struct reftable_transaction_data
*tx_data
= transaction
->backend_data
;
1174 for (size_t i
= 0; i
< tx_data
->args_nr
; i
++) {
1175 ret
= reftable_addition_add(tx_data
->args
[i
].addition
,
1176 write_transaction_table
, &tx_data
->args
[i
]);
1180 ret
= reftable_addition_commit(tx_data
->args
[i
].addition
);
1186 assert(ret
!= REFTABLE_API_ERROR
);
1187 free_transaction_data(tx_data
);
1188 transaction
->state
= REF_TRANSACTION_CLOSED
;
1191 strbuf_addf(err
, _("reftable: transaction failure: %s"),
1192 reftable_error_str(ret
));
1198 static int reftable_be_initial_transaction_commit(struct ref_store
*ref_store UNUSED
,
1199 struct ref_transaction
*transaction
,
1202 return ref_transaction_commit(transaction
, err
);
1205 static int reftable_be_pack_refs(struct ref_store
*ref_store
,
1206 struct pack_refs_opts
*opts
)
1208 struct reftable_ref_store
*refs
=
1209 reftable_be_downcast(ref_store
, REF_STORE_WRITE
| REF_STORE_ODB
, "pack_refs");
1210 struct reftable_stack
*stack
;
1216 stack
= refs
->worktree_stack
;
1218 stack
= refs
->main_stack
;
1220 ret
= reftable_stack_compact_all(stack
, NULL
);
1223 ret
= reftable_stack_clean(stack
);
1231 struct write_create_symref_arg
{
1232 struct reftable_ref_store
*refs
;
1233 struct reftable_stack
*stack
;
1234 const char *refname
;
1239 static int write_create_symref_table(struct reftable_writer
*writer
, void *cb_data
)
1241 struct write_create_symref_arg
*create
= cb_data
;
1242 uint64_t ts
= reftable_stack_next_update_index(create
->stack
);
1243 struct reftable_ref_record ref
= {
1244 .refname
= (char *)create
->refname
,
1245 .value_type
= REFTABLE_REF_SYMREF
,
1246 .value
.symref
= (char *)create
->target
,
1249 struct reftable_log_record log
= {0};
1250 struct object_id new_oid
;
1251 struct object_id old_oid
;
1254 reftable_writer_set_limits(writer
, ts
, ts
);
1256 ret
= reftable_writer_add_ref(writer
, &ref
);
1261 * Note that it is important to try and resolve the reference before we
1262 * write the log entry. This is because `should_write_log()` will munge
1263 * `core.logAllRefUpdates`, which is undesirable when we create a new
1264 * repository because it would be written into the config. As HEAD will
1265 * not resolve for new repositories this ordering will ensure that this
1268 if (!create
->logmsg
||
1269 !refs_resolve_ref_unsafe(&create
->refs
->base
, create
->target
,
1270 RESOLVE_REF_READING
, &new_oid
, NULL
) ||
1271 !should_write_log(&create
->refs
->base
, create
->refname
))
1274 fill_reftable_log_record(&log
);
1275 log
.refname
= xstrdup(create
->refname
);
1276 log
.update_index
= ts
;
1277 log
.value
.update
.message
= xstrndup(create
->logmsg
,
1278 create
->refs
->write_options
.block_size
/ 2);
1279 log
.value
.update
.new_hash
= new_oid
.hash
;
1280 if (refs_resolve_ref_unsafe(&create
->refs
->base
, create
->refname
,
1281 RESOLVE_REF_READING
, &old_oid
, NULL
))
1282 log
.value
.update
.old_hash
= old_oid
.hash
;
1284 ret
= reftable_writer_add_log(writer
, &log
);
1285 clear_reftable_log_record(&log
);
1289 static int reftable_be_create_symref(struct ref_store
*ref_store
,
1290 const char *refname
,
1294 struct reftable_ref_store
*refs
=
1295 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "create_symref");
1296 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1297 struct write_create_symref_arg arg
= {
1310 ret
= reftable_stack_reload(stack
);
1314 ret
= reftable_stack_add(stack
, &write_create_symref_table
, &arg
);
1317 assert(ret
!= REFTABLE_API_ERROR
);
1319 error("unable to write symref for %s: %s", refname
,
1320 reftable_error_str(ret
));
1324 struct write_copy_arg
{
1325 struct reftable_ref_store
*refs
;
1326 struct reftable_stack
*stack
;
1327 const char *oldname
;
1328 const char *newname
;
1333 static int write_copy_table(struct reftable_writer
*writer
, void *cb_data
)
1335 struct write_copy_arg
*arg
= cb_data
;
1336 uint64_t deletion_ts
, creation_ts
;
1337 struct reftable_merged_table
*mt
= reftable_stack_merged_table(arg
->stack
);
1338 struct reftable_ref_record old_ref
= {0}, refs
[2] = {0};
1339 struct reftable_log_record old_log
= {0}, *logs
= NULL
;
1340 struct reftable_iterator it
= {0};
1341 struct string_list skip
= STRING_LIST_INIT_NODUP
;
1342 struct strbuf errbuf
= STRBUF_INIT
;
1343 size_t logs_nr
= 0, logs_alloc
= 0, i
;
1346 if (reftable_stack_read_ref(arg
->stack
, arg
->oldname
, &old_ref
)) {
1347 ret
= error(_("refname %s not found"), arg
->oldname
);
1350 if (old_ref
.value_type
== REFTABLE_REF_SYMREF
) {
1351 ret
= error(_("refname %s is a symbolic ref, copying it is not supported"),
1357 * There's nothing to do in case the old and new name are the same, so
1358 * we exit early in that case.
1360 if (!strcmp(arg
->oldname
, arg
->newname
)) {
1366 * Verify that the new refname is available.
1368 string_list_insert(&skip
, arg
->oldname
);
1369 ret
= refs_verify_refname_available(&arg
->refs
->base
, arg
->newname
,
1370 NULL
, &skip
, &errbuf
);
1372 error("%s", errbuf
.buf
);
1377 * When deleting the old reference we have to use two update indices:
1378 * once to delete the old ref and its reflog, and once to create the
1379 * new ref and its reflog. They need to be staged with two separate
1380 * indices because the new reflog needs to encode both the deletion of
1381 * the old branch and the creation of the new branch, and we cannot do
1382 * two changes to a reflog in a single update.
1384 deletion_ts
= creation_ts
= reftable_stack_next_update_index(arg
->stack
);
1385 if (arg
->delete_old
)
1387 reftable_writer_set_limits(writer
, deletion_ts
, creation_ts
);
1390 * Add the new reference. If this is a rename then we also delete the
1394 refs
[0].refname
= (char *)arg
->newname
;
1395 refs
[0].update_index
= creation_ts
;
1396 if (arg
->delete_old
) {
1397 refs
[1].refname
= (char *)arg
->oldname
;
1398 refs
[1].value_type
= REFTABLE_REF_DELETION
;
1399 refs
[1].update_index
= deletion_ts
;
1401 ret
= reftable_writer_add_refs(writer
, refs
, arg
->delete_old
? 2 : 1);
1406 * When deleting the old branch we need to create a reflog entry on the
1407 * new branch name that indicates that the old branch has been deleted
1408 * and then recreated. This is a tad weird, but matches what the files
1411 if (arg
->delete_old
) {
1412 struct strbuf head_referent
= STRBUF_INIT
;
1413 struct object_id head_oid
;
1414 int append_head_reflog
;
1415 unsigned head_type
= 0;
1417 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1418 memset(&logs
[logs_nr
], 0, sizeof(logs
[logs_nr
]));
1419 fill_reftable_log_record(&logs
[logs_nr
]);
1420 logs
[logs_nr
].refname
= (char *)arg
->newname
;
1421 logs
[logs_nr
].update_index
= deletion_ts
;
1422 logs
[logs_nr
].value
.update
.message
=
1423 xstrndup(arg
->logmsg
, arg
->refs
->write_options
.block_size
/ 2);
1424 logs
[logs_nr
].value
.update
.old_hash
= old_ref
.value
.val1
;
1427 ret
= read_ref_without_reload(arg
->stack
, "HEAD", &head_oid
, &head_referent
, &head_type
);
1430 append_head_reflog
= (head_type
& REF_ISSYMREF
) && !strcmp(head_referent
.buf
, arg
->oldname
);
1431 strbuf_release(&head_referent
);
1434 * The files backend uses `refs_delete_ref()` to delete the old
1435 * branch name, which will append a reflog entry for HEAD in
1436 * case it points to the old branch.
1438 if (append_head_reflog
) {
1439 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1440 logs
[logs_nr
] = logs
[logs_nr
- 1];
1441 logs
[logs_nr
].refname
= "HEAD";
1447 * Create the reflog entry for the newly created branch.
1449 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1450 memset(&logs
[logs_nr
], 0, sizeof(logs
[logs_nr
]));
1451 fill_reftable_log_record(&logs
[logs_nr
]);
1452 logs
[logs_nr
].refname
= (char *)arg
->newname
;
1453 logs
[logs_nr
].update_index
= creation_ts
;
1454 logs
[logs_nr
].value
.update
.message
=
1455 xstrndup(arg
->logmsg
, arg
->refs
->write_options
.block_size
/ 2);
1456 logs
[logs_nr
].value
.update
.new_hash
= old_ref
.value
.val1
;
1460 * In addition to writing the reflog entry for the new branch, we also
1461 * copy over all log entries from the old reflog. Last but not least,
1462 * when renaming we also have to delete all the old reflog entries.
1464 ret
= reftable_merged_table_seek_log(mt
, &it
, arg
->oldname
);
1469 ret
= reftable_iterator_next_log(&it
, &old_log
);
1472 if (ret
> 0 || strcmp(old_log
.refname
, arg
->oldname
)) {
1477 free(old_log
.refname
);
1480 * Copy over the old reflog entry with the new refname.
1482 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1483 logs
[logs_nr
] = old_log
;
1484 logs
[logs_nr
].refname
= (char *)arg
->newname
;
1488 * Delete the old reflog entry in case we are renaming.
1490 if (arg
->delete_old
) {
1491 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1492 memset(&logs
[logs_nr
], 0, sizeof(logs
[logs_nr
]));
1493 logs
[logs_nr
].refname
= (char *)arg
->oldname
;
1494 logs
[logs_nr
].value_type
= REFTABLE_LOG_DELETION
;
1495 logs
[logs_nr
].update_index
= old_log
.update_index
;
1500 * Transfer ownership of the log record we're iterating over to
1501 * the array of log records. Otherwise, the pointers would get
1502 * free'd or reallocated by the iterator.
1504 memset(&old_log
, 0, sizeof(old_log
));
1507 ret
= reftable_writer_add_logs(writer
, logs
, logs_nr
);
1512 assert(ret
!= REFTABLE_API_ERROR
);
1513 reftable_iterator_destroy(&it
);
1514 string_list_clear(&skip
, 0);
1515 strbuf_release(&errbuf
);
1516 for (i
= 0; i
< logs_nr
; i
++) {
1517 if (!strcmp(logs
[i
].refname
, "HEAD"))
1519 if (logs
[i
].value
.update
.old_hash
== old_ref
.value
.val1
)
1520 logs
[i
].value
.update
.old_hash
= NULL
;
1521 if (logs
[i
].value
.update
.new_hash
== old_ref
.value
.val1
)
1522 logs
[i
].value
.update
.new_hash
= NULL
;
1523 logs
[i
].refname
= NULL
;
1524 reftable_log_record_release(&logs
[i
]);
1527 reftable_ref_record_release(&old_ref
);
1528 reftable_log_record_release(&old_log
);
1532 static int reftable_be_rename_ref(struct ref_store
*ref_store
,
1533 const char *oldrefname
,
1534 const char *newrefname
,
1537 struct reftable_ref_store
*refs
=
1538 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "rename_ref");
1539 struct reftable_stack
*stack
= stack_for(refs
, newrefname
, &newrefname
);
1540 struct write_copy_arg arg
= {
1543 .oldname
= oldrefname
,
1544 .newname
= newrefname
,
1554 ret
= reftable_stack_reload(stack
);
1557 ret
= reftable_stack_add(stack
, &write_copy_table
, &arg
);
1560 assert(ret
!= REFTABLE_API_ERROR
);
1564 static int reftable_be_copy_ref(struct ref_store
*ref_store
,
1565 const char *oldrefname
,
1566 const char *newrefname
,
1569 struct reftable_ref_store
*refs
=
1570 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "copy_ref");
1571 struct reftable_stack
*stack
= stack_for(refs
, newrefname
, &newrefname
);
1572 struct write_copy_arg arg
= {
1575 .oldname
= oldrefname
,
1576 .newname
= newrefname
,
1585 ret
= reftable_stack_reload(stack
);
1588 ret
= reftable_stack_add(stack
, &write_copy_table
, &arg
);
1591 assert(ret
!= REFTABLE_API_ERROR
);
1595 struct reftable_reflog_iterator
{
1596 struct ref_iterator base
;
1597 struct reftable_ref_store
*refs
;
1598 struct reftable_iterator iter
;
1599 struct reftable_log_record log
;
1604 static int reftable_reflog_iterator_advance(struct ref_iterator
*ref_iterator
)
1606 struct reftable_reflog_iterator
*iter
=
1607 (struct reftable_reflog_iterator
*)ref_iterator
;
1609 while (!iter
->err
) {
1610 iter
->err
= reftable_iterator_next_log(&iter
->iter
, &iter
->log
);
1615 * We want the refnames that we have reflogs for, so we skip if
1616 * we've already produced this name. This could be faster by
1617 * seeking directly to reflog@update_index==0.
1619 if (iter
->last_name
&& !strcmp(iter
->log
.refname
, iter
->last_name
))
1622 if (check_refname_format(iter
->log
.refname
,
1623 REFNAME_ALLOW_ONELEVEL
))
1626 free(iter
->last_name
);
1627 iter
->last_name
= xstrdup(iter
->log
.refname
);
1628 iter
->base
.refname
= iter
->log
.refname
;
1633 if (iter
->err
> 0) {
1634 if (ref_iterator_abort(ref_iterator
) != ITER_DONE
)
1639 if (iter
->err
< 0) {
1640 ref_iterator_abort(ref_iterator
);
1647 static int reftable_reflog_iterator_peel(struct ref_iterator
*ref_iterator
,
1648 struct object_id
*peeled
)
1650 BUG("reftable reflog iterator cannot be peeled");
1654 static int reftable_reflog_iterator_abort(struct ref_iterator
*ref_iterator
)
1656 struct reftable_reflog_iterator
*iter
=
1657 (struct reftable_reflog_iterator
*)ref_iterator
;
1658 reftable_log_record_release(&iter
->log
);
1659 reftable_iterator_destroy(&iter
->iter
);
1660 free(iter
->last_name
);
1665 static struct ref_iterator_vtable reftable_reflog_iterator_vtable
= {
1666 .advance
= reftable_reflog_iterator_advance
,
1667 .peel
= reftable_reflog_iterator_peel
,
1668 .abort
= reftable_reflog_iterator_abort
1671 static struct reftable_reflog_iterator
*reflog_iterator_for_stack(struct reftable_ref_store
*refs
,
1672 struct reftable_stack
*stack
)
1674 struct reftable_merged_table
*merged_table
;
1675 struct reftable_reflog_iterator
*iter
;
1678 iter
= xcalloc(1, sizeof(*iter
));
1679 base_ref_iterator_init(&iter
->base
, &reftable_reflog_iterator_vtable
);
1686 ret
= reftable_stack_reload(refs
->main_stack
);
1690 merged_table
= reftable_stack_merged_table(stack
);
1692 ret
= reftable_merged_table_seek_log(merged_table
, &iter
->iter
, "");
1701 static struct ref_iterator
*reftable_be_reflog_iterator_begin(struct ref_store
*ref_store
)
1703 struct reftable_ref_store
*refs
=
1704 reftable_be_downcast(ref_store
, REF_STORE_READ
, "reflog_iterator_begin");
1705 struct reftable_reflog_iterator
*main_iter
, *worktree_iter
;
1707 main_iter
= reflog_iterator_for_stack(refs
, refs
->main_stack
);
1708 if (!refs
->worktree_stack
)
1709 return &main_iter
->base
;
1711 worktree_iter
= reflog_iterator_for_stack(refs
, refs
->worktree_stack
);
1713 return merge_ref_iterator_begin(&worktree_iter
->base
, &main_iter
->base
,
1714 ref_iterator_select
, NULL
);
1717 static int yield_log_record(struct reftable_log_record
*log
,
1718 each_reflog_ent_fn fn
,
1721 struct object_id old_oid
, new_oid
;
1722 const char *full_committer
;
1724 oidread(&old_oid
, log
->value
.update
.old_hash
);
1725 oidread(&new_oid
, log
->value
.update
.new_hash
);
1728 * When both the old object ID and the new object ID are null
1729 * then this is the reflog existence marker. The caller must
1730 * not be aware of it.
1732 if (is_null_oid(&old_oid
) && is_null_oid(&new_oid
))
1735 full_committer
= fmt_ident(log
->value
.update
.name
, log
->value
.update
.email
,
1736 WANT_COMMITTER_IDENT
, NULL
, IDENT_NO_DATE
);
1737 return fn(&old_oid
, &new_oid
, full_committer
,
1738 log
->value
.update
.time
, log
->value
.update
.tz_offset
,
1739 log
->value
.update
.message
, cb_data
);
1742 static int reftable_be_for_each_reflog_ent_reverse(struct ref_store
*ref_store
,
1743 const char *refname
,
1744 each_reflog_ent_fn fn
,
1747 struct reftable_ref_store
*refs
=
1748 reftable_be_downcast(ref_store
, REF_STORE_READ
, "for_each_reflog_ent_reverse");
1749 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1750 struct reftable_merged_table
*mt
= NULL
;
1751 struct reftable_log_record log
= {0};
1752 struct reftable_iterator it
= {0};
1758 mt
= reftable_stack_merged_table(stack
);
1759 ret
= reftable_merged_table_seek_log(mt
, &it
, refname
);
1761 ret
= reftable_iterator_next_log(&it
, &log
);
1764 if (ret
> 0 || strcmp(log
.refname
, refname
)) {
1769 ret
= yield_log_record(&log
, fn
, cb_data
);
1774 reftable_log_record_release(&log
);
1775 reftable_iterator_destroy(&it
);
1779 static int reftable_be_for_each_reflog_ent(struct ref_store
*ref_store
,
1780 const char *refname
,
1781 each_reflog_ent_fn fn
,
1784 struct reftable_ref_store
*refs
=
1785 reftable_be_downcast(ref_store
, REF_STORE_READ
, "for_each_reflog_ent");
1786 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1787 struct reftable_merged_table
*mt
= NULL
;
1788 struct reftable_log_record
*logs
= NULL
;
1789 struct reftable_iterator it
= {0};
1790 size_t logs_alloc
= 0, logs_nr
= 0, i
;
1796 mt
= reftable_stack_merged_table(stack
);
1797 ret
= reftable_merged_table_seek_log(mt
, &it
, refname
);
1799 struct reftable_log_record log
= {0};
1801 ret
= reftable_iterator_next_log(&it
, &log
);
1804 if (ret
> 0 || strcmp(log
.refname
, refname
)) {
1805 reftable_log_record_release(&log
);
1810 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1811 logs
[logs_nr
++] = log
;
1814 for (i
= logs_nr
; i
--;) {
1815 ret
= yield_log_record(&logs
[i
], fn
, cb_data
);
1821 reftable_iterator_destroy(&it
);
1822 for (i
= 0; i
< logs_nr
; i
++)
1823 reftable_log_record_release(&logs
[i
]);
1828 static int reftable_be_reflog_exists(struct ref_store
*ref_store
,
1829 const char *refname
)
1831 struct reftable_ref_store
*refs
=
1832 reftable_be_downcast(ref_store
, REF_STORE_READ
, "reflog_exists");
1833 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1834 struct reftable_merged_table
*mt
= reftable_stack_merged_table(stack
);
1835 struct reftable_log_record log
= {0};
1836 struct reftable_iterator it
= {0};
1843 ret
= reftable_stack_reload(stack
);
1847 ret
= reftable_merged_table_seek_log(mt
, &it
, refname
);
1852 * Check whether we get at least one log record for the given ref name.
1853 * If so, the reflog exists, otherwise it doesn't.
1855 ret
= reftable_iterator_next_log(&it
, &log
);
1863 ret
= strcmp(log
.refname
, refname
) == 0;
1866 reftable_iterator_destroy(&it
);
1867 reftable_log_record_release(&log
);
1873 struct write_reflog_existence_arg
{
1874 struct reftable_ref_store
*refs
;
1875 const char *refname
;
1876 struct reftable_stack
*stack
;
1879 static int write_reflog_existence_table(struct reftable_writer
*writer
,
1882 struct write_reflog_existence_arg
*arg
= cb_data
;
1883 uint64_t ts
= reftable_stack_next_update_index(arg
->stack
);
1884 struct reftable_log_record log
= {0};
1887 ret
= reftable_stack_read_log(arg
->stack
, arg
->refname
, &log
);
1891 reftable_writer_set_limits(writer
, ts
, ts
);
1894 * The existence entry has both old and new object ID set to the the
1895 * null object ID. Our iterators are aware of this and will not present
1896 * them to their callers.
1898 log
.refname
= xstrdup(arg
->refname
);
1899 log
.update_index
= ts
;
1900 log
.value_type
= REFTABLE_LOG_UPDATE
;
1901 ret
= reftable_writer_add_log(writer
, &log
);
1904 assert(ret
!= REFTABLE_API_ERROR
);
1905 reftable_log_record_release(&log
);
1909 static int reftable_be_create_reflog(struct ref_store
*ref_store
,
1910 const char *refname
,
1911 struct strbuf
*errmsg
)
1913 struct reftable_ref_store
*refs
=
1914 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "create_reflog");
1915 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1916 struct write_reflog_existence_arg arg
= {
1927 ret
= reftable_stack_reload(stack
);
1931 ret
= reftable_stack_add(stack
, &write_reflog_existence_table
, &arg
);
1937 struct write_reflog_delete_arg
{
1938 struct reftable_stack
*stack
;
1939 const char *refname
;
1942 static int write_reflog_delete_table(struct reftable_writer
*writer
, void *cb_data
)
1944 struct write_reflog_delete_arg
*arg
= cb_data
;
1945 struct reftable_merged_table
*mt
=
1946 reftable_stack_merged_table(arg
->stack
);
1947 struct reftable_log_record log
= {0}, tombstone
= {0};
1948 struct reftable_iterator it
= {0};
1949 uint64_t ts
= reftable_stack_next_update_index(arg
->stack
);
1952 reftable_writer_set_limits(writer
, ts
, ts
);
1955 * In order to delete a table we need to delete all reflog entries one
1956 * by one. This is inefficient, but the reftable format does not have a
1957 * better marker right now.
1959 ret
= reftable_merged_table_seek_log(mt
, &it
, arg
->refname
);
1961 ret
= reftable_iterator_next_log(&it
, &log
);
1964 if (ret
> 0 || strcmp(log
.refname
, arg
->refname
)) {
1969 tombstone
.refname
= (char *)arg
->refname
;
1970 tombstone
.value_type
= REFTABLE_LOG_DELETION
;
1971 tombstone
.update_index
= log
.update_index
;
1973 ret
= reftable_writer_add_log(writer
, &tombstone
);
1976 reftable_log_record_release(&log
);
1977 reftable_iterator_destroy(&it
);
1981 static int reftable_be_delete_reflog(struct ref_store
*ref_store
,
1982 const char *refname
)
1984 struct reftable_ref_store
*refs
=
1985 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "delete_reflog");
1986 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1987 struct write_reflog_delete_arg arg
= {
1993 ret
= reftable_stack_reload(stack
);
1996 ret
= reftable_stack_add(stack
, &write_reflog_delete_table
, &arg
);
1998 assert(ret
!= REFTABLE_API_ERROR
);
2002 struct reflog_expiry_arg
{
2003 struct reftable_stack
*stack
;
2004 struct reftable_log_record
*records
;
2005 struct object_id update_oid
;
2006 const char *refname
;
2010 static int write_reflog_expiry_table(struct reftable_writer
*writer
, void *cb_data
)
2012 struct reflog_expiry_arg
*arg
= cb_data
;
2013 uint64_t ts
= reftable_stack_next_update_index(arg
->stack
);
2014 uint64_t live_records
= 0;
2018 for (i
= 0; i
< arg
->len
; i
++)
2019 if (arg
->records
[i
].value_type
== REFTABLE_LOG_UPDATE
)
2022 reftable_writer_set_limits(writer
, ts
, ts
);
2024 if (!is_null_oid(&arg
->update_oid
)) {
2025 struct reftable_ref_record ref
= {0};
2026 struct object_id peeled
;
2028 ref
.refname
= (char *)arg
->refname
;
2029 ref
.update_index
= ts
;
2031 if (!peel_object(&arg
->update_oid
, &peeled
)) {
2032 ref
.value_type
= REFTABLE_REF_VAL2
;
2033 memcpy(ref
.value
.val2
.target_value
, peeled
.hash
, GIT_MAX_RAWSZ
);
2034 memcpy(ref
.value
.val2
.value
, arg
->update_oid
.hash
, GIT_MAX_RAWSZ
);
2036 ref
.value_type
= REFTABLE_REF_VAL1
;
2037 memcpy(ref
.value
.val1
, arg
->update_oid
.hash
, GIT_MAX_RAWSZ
);
2040 ret
= reftable_writer_add_ref(writer
, &ref
);
2046 * When there are no more entries left in the reflog we empty it
2047 * completely, but write a placeholder reflog entry that indicates that
2048 * the reflog still exists.
2050 if (!live_records
) {
2051 struct reftable_log_record log
= {
2052 .refname
= (char *)arg
->refname
,
2053 .value_type
= REFTABLE_LOG_UPDATE
,
2057 ret
= reftable_writer_add_log(writer
, &log
);
2062 for (i
= 0; i
< arg
->len
; i
++) {
2063 ret
= reftable_writer_add_log(writer
, &arg
->records
[i
]);
2071 static int reftable_be_reflog_expire(struct ref_store
*ref_store
,
2072 const char *refname
,
2074 reflog_expiry_prepare_fn prepare_fn
,
2075 reflog_expiry_should_prune_fn should_prune_fn
,
2076 reflog_expiry_cleanup_fn cleanup_fn
,
2077 void *policy_cb_data
)
2080 * For log expiry, we write tombstones for every single reflog entry
2081 * that is to be expired. This means that the entries are still
2082 * retrievable by delving into the stack, and expiring entries
2083 * paradoxically takes extra memory. This memory is only reclaimed when
2084 * compacting the reftable stack.
2086 * It would be better if the refs backend supported an API that sets a
2087 * criterion for all refs, passing the criterion to pack_refs().
2089 * On the plus side, because we do the expiration per ref, we can easily
2090 * insert the reflog existence dummies.
2092 struct reftable_ref_store
*refs
=
2093 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "reflog_expire");
2094 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
2095 struct reftable_merged_table
*mt
= reftable_stack_merged_table(stack
);
2096 struct reftable_log_record
*logs
= NULL
;
2097 struct reftable_log_record
*rewritten
= NULL
;
2098 struct reftable_ref_record ref_record
= {0};
2099 struct reftable_iterator it
= {0};
2100 struct reftable_addition
*add
= NULL
;
2101 struct reflog_expiry_arg arg
= {0};
2102 struct object_id oid
= {0};
2103 uint8_t *last_hash
= NULL
;
2104 size_t logs_nr
= 0, logs_alloc
= 0, i
;
2110 ret
= reftable_stack_reload(stack
);
2114 ret
= reftable_merged_table_seek_log(mt
, &it
, refname
);
2118 ret
= reftable_stack_new_addition(&add
, stack
);
2122 ret
= reftable_stack_read_ref(stack
, refname
, &ref_record
);
2125 if (reftable_ref_record_val1(&ref_record
))
2126 oidread(&oid
, reftable_ref_record_val1(&ref_record
));
2127 prepare_fn(refname
, &oid
, policy_cb_data
);
2130 struct reftable_log_record log
= {0};
2131 struct object_id old_oid
, new_oid
;
2133 ret
= reftable_iterator_next_log(&it
, &log
);
2136 if (ret
> 0 || strcmp(log
.refname
, refname
)) {
2137 reftable_log_record_release(&log
);
2141 oidread(&old_oid
, log
.value
.update
.old_hash
);
2142 oidread(&new_oid
, log
.value
.update
.new_hash
);
2145 * Skip over the reflog existence marker. We will add it back
2146 * in when there are no live reflog records.
2148 if (is_null_oid(&old_oid
) && is_null_oid(&new_oid
)) {
2149 reftable_log_record_release(&log
);
2153 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
2154 logs
[logs_nr
++] = log
;
2158 * We need to rewrite all reflog entries according to the pruning
2159 * callback function:
2161 * - If a reflog entry shall be pruned we mark the record for
2164 * - Otherwise we may have to rewrite the chain of reflog entries so
2165 * that gaps created by just-deleted records get backfilled.
2167 CALLOC_ARRAY(rewritten
, logs_nr
);
2168 for (i
= logs_nr
; i
--;) {
2169 struct reftable_log_record
*dest
= &rewritten
[i
];
2170 struct object_id old_oid
, new_oid
;
2173 oidread(&old_oid
, logs
[i
].value
.update
.old_hash
);
2174 oidread(&new_oid
, logs
[i
].value
.update
.new_hash
);
2176 if (should_prune_fn(&old_oid
, &new_oid
, logs
[i
].value
.update
.email
,
2177 (timestamp_t
)logs
[i
].value
.update
.time
,
2178 logs
[i
].value
.update
.tz_offset
,
2179 logs
[i
].value
.update
.message
,
2181 dest
->value_type
= REFTABLE_LOG_DELETION
;
2183 if ((flags
& EXPIRE_REFLOGS_REWRITE
) && last_hash
)
2184 dest
->value
.update
.old_hash
= last_hash
;
2185 last_hash
= logs
[i
].value
.update
.new_hash
;
2189 if (flags
& EXPIRE_REFLOGS_UPDATE_REF
&& last_hash
&&
2190 reftable_ref_record_val1(&ref_record
))
2191 oidread(&arg
.update_oid
, last_hash
);
2193 arg
.records
= rewritten
;
2196 arg
.refname
= refname
,
2198 ret
= reftable_addition_add(add
, &write_reflog_expiry_table
, &arg
);
2203 * Future improvement: we could skip writing records that were
2206 if (!(flags
& EXPIRE_REFLOGS_DRY_RUN
))
2207 ret
= reftable_addition_commit(add
);
2211 cleanup_fn(policy_cb_data
);
2212 assert(ret
!= REFTABLE_API_ERROR
);
2214 reftable_ref_record_release(&ref_record
);
2215 reftable_iterator_destroy(&it
);
2216 reftable_addition_destroy(add
);
2217 for (i
= 0; i
< logs_nr
; i
++)
2218 reftable_log_record_release(&logs
[i
]);
2224 struct ref_storage_be refs_be_reftable
= {
2226 .init
= reftable_be_init
,
2227 .init_db
= reftable_be_init_db
,
2228 .transaction_prepare
= reftable_be_transaction_prepare
,
2229 .transaction_finish
= reftable_be_transaction_finish
,
2230 .transaction_abort
= reftable_be_transaction_abort
,
2231 .initial_transaction_commit
= reftable_be_initial_transaction_commit
,
2233 .pack_refs
= reftable_be_pack_refs
,
2234 .create_symref
= reftable_be_create_symref
,
2235 .rename_ref
= reftable_be_rename_ref
,
2236 .copy_ref
= reftable_be_copy_ref
,
2238 .iterator_begin
= reftable_be_iterator_begin
,
2239 .read_raw_ref
= reftable_be_read_raw_ref
,
2240 .read_symbolic_ref
= reftable_be_read_symbolic_ref
,
2242 .reflog_iterator_begin
= reftable_be_reflog_iterator_begin
,
2243 .for_each_reflog_ent
= reftable_be_for_each_reflog_ent
,
2244 .for_each_reflog_ent_reverse
= reftable_be_for_each_reflog_ent_reverse
,
2245 .reflog_exists
= reftable_be_reflog_exists
,
2246 .create_reflog
= reftable_be_create_reflog
,
2247 .delete_reflog
= reftable_be_delete_reflog
,
2248 .reflog_expire
= reftable_be_reflog_expire
,