1 #include "../git-compat-util.h"
2 #include "../abspath.h"
3 #include "../chdir-notify.h"
5 #include "../environment.h"
6 #include "../gettext.h"
9 #include "../iterator.h"
11 #include "../lockfile.h"
12 #include "../object.h"
15 #include "../reftable/reftable-stack.h"
16 #include "../reftable/reftable-record.h"
17 #include "../reftable/reftable-error.h"
18 #include "../reftable/reftable-iterator.h"
20 #include "../strmap.h"
22 #include "refs-internal.h"
25 * Used as a flag in ref_update::flags when the ref_update was via an
28 #define REF_UPDATE_VIA_HEAD (1 << 8)
30 struct reftable_ref_store
{
31 struct ref_store base
;
34 * The main stack refers to the common dir and thus contains common
35 * refs as well as refs of the main repository.
37 struct reftable_stack
*main_stack
;
39 * The worktree stack refers to the gitdir in case the refdb is opened
40 * via a worktree. It thus contains the per-worktree refs.
42 struct reftable_stack
*worktree_stack
;
44 * Map of worktree stacks by their respective worktree names. The map
45 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
47 struct strmap worktree_stacks
;
48 struct reftable_write_options write_options
;
50 unsigned int store_flags
;
55 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
56 * reftable_ref_store. required_flags is compared with ref_store's store_flags
57 * to ensure the ref_store has all required capabilities. "caller" is used in
58 * any necessary error messages.
60 static struct reftable_ref_store
*reftable_be_downcast(struct ref_store
*ref_store
,
61 unsigned int required_flags
,
64 struct reftable_ref_store
*refs
;
66 if (ref_store
->be
!= &refs_be_reftable
)
67 BUG("ref_store is type \"%s\" not \"reftables\" in %s",
68 ref_store
->be
->name
, caller
);
70 refs
= (struct reftable_ref_store
*)ref_store
;
72 if ((refs
->store_flags
& required_flags
) != required_flags
)
73 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
74 caller
, required_flags
, refs
->store_flags
);
80 * Some refs are global to the repository (refs/heads/{*}), while others are
81 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
82 * multiple separate databases (ie. multiple reftable/ directories), one for
83 * the shared refs, one for the current worktree refs, and one for each
84 * additional worktree. For reading, we merge the view of both the shared and
85 * the current worktree's refs, when necessary.
87 * This function also optionally assigns the rewritten reference name that is
88 * local to the stack. This translation is required when using worktree refs
89 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
90 * those references in their normalized form.
92 static struct reftable_stack
*stack_for(struct reftable_ref_store
*store
,
94 const char **rewritten_ref
)
100 return store
->main_stack
;
102 switch (parse_worktree_ref(refname
, &wtname
, &wtname_len
, rewritten_ref
)) {
103 case REF_WORKTREE_OTHER
: {
104 static struct strbuf wtname_buf
= STRBUF_INIT
;
105 struct strbuf wt_dir
= STRBUF_INIT
;
106 struct reftable_stack
*stack
;
109 * We're using a static buffer here so that we don't need to
110 * allocate the worktree name whenever we look up a reference.
111 * This could be avoided if the strmap interface knew how to
112 * handle keys with a length.
114 strbuf_reset(&wtname_buf
);
115 strbuf_add(&wtname_buf
, wtname
, wtname_len
);
118 * There is an edge case here: when the worktree references the
119 * current worktree, then we set up the stack once via
120 * `worktree_stacks` and once via `worktree_stack`. This is
121 * wasteful, but in the reading case it shouldn't matter. And
122 * in the writing case we would notice that the stack is locked
123 * already and error out when trying to write a reference via
126 stack
= strmap_get(&store
->worktree_stacks
, wtname_buf
.buf
);
128 strbuf_addf(&wt_dir
, "%s/worktrees/%s/reftable",
129 store
->base
.repo
->commondir
, wtname_buf
.buf
);
131 store
->err
= reftable_new_stack(&stack
, wt_dir
.buf
,
132 &store
->write_options
);
133 assert(store
->err
!= REFTABLE_API_ERROR
);
134 strmap_put(&store
->worktree_stacks
, wtname_buf
.buf
, stack
);
137 strbuf_release(&wt_dir
);
140 case REF_WORKTREE_CURRENT
:
142 * If there is no worktree stack then we're currently in the
143 * main worktree. We thus return the main stack in that case.
145 if (!store
->worktree_stack
)
146 return store
->main_stack
;
147 return store
->worktree_stack
;
148 case REF_WORKTREE_MAIN
:
149 case REF_WORKTREE_SHARED
:
150 return store
->main_stack
;
152 BUG("unhandled worktree reference type");
156 static int should_write_log(struct ref_store
*refs
, const char *refname
)
158 if (log_all_ref_updates
== LOG_REFS_UNSET
)
159 log_all_ref_updates
= is_bare_repository() ? LOG_REFS_NONE
: LOG_REFS_NORMAL
;
161 switch (log_all_ref_updates
) {
163 return refs_reflog_exists(refs
, refname
);
164 case LOG_REFS_ALWAYS
:
166 case LOG_REFS_NORMAL
:
167 if (should_autocreate_reflog(refname
))
169 return refs_reflog_exists(refs
, refname
);
171 BUG("unhandled core.logAllRefUpdates value %d", log_all_ref_updates
);
175 static void fill_reftable_log_record(struct reftable_log_record
*log
, const struct ident_split
*split
)
177 const char *tz_begin
;
180 reftable_log_record_release(log
);
181 log
->value_type
= REFTABLE_LOG_UPDATE
;
182 log
->value
.update
.name
=
183 xstrndup(split
->name_begin
, split
->name_end
- split
->name_begin
);
184 log
->value
.update
.email
=
185 xstrndup(split
->mail_begin
, split
->mail_end
- split
->mail_begin
);
186 log
->value
.update
.time
= atol(split
->date_begin
);
188 tz_begin
= split
->tz_begin
;
189 if (*tz_begin
== '-') {
193 if (*tz_begin
== '+') {
198 log
->value
.update
.tz_offset
= sign
* atoi(tz_begin
);
201 static int read_ref_without_reload(struct reftable_stack
*stack
,
203 struct object_id
*oid
,
204 struct strbuf
*referent
,
207 struct reftable_ref_record ref
= {0};
210 ret
= reftable_stack_read_ref(stack
, refname
, &ref
);
214 if (ref
.value_type
== REFTABLE_REF_SYMREF
) {
215 strbuf_reset(referent
);
216 strbuf_addstr(referent
, ref
.value
.symref
);
217 *type
|= REF_ISSYMREF
;
218 } else if (reftable_ref_record_val1(&ref
)) {
219 oidread(oid
, reftable_ref_record_val1(&ref
));
221 /* We got a tombstone, which should not happen. */
222 BUG("unhandled reference value type %d", ref
.value_type
);
226 assert(ret
!= REFTABLE_API_ERROR
);
227 reftable_ref_record_release(&ref
);
231 static int reftable_be_config(const char *var
, const char *value
,
232 const struct config_context
*ctx
,
235 struct reftable_write_options
*opts
= _opts
;
237 if (!strcmp(var
, "reftable.blocksize")) {
238 unsigned long block_size
= git_config_ulong(var
, value
, ctx
->kvi
);
239 if (block_size
> 16777215)
240 die("reftable block size cannot exceed 16MB");
241 opts
->block_size
= block_size
;
242 } else if (!strcmp(var
, "reftable.restartinterval")) {
243 unsigned long restart_interval
= git_config_ulong(var
, value
, ctx
->kvi
);
244 if (restart_interval
> UINT16_MAX
)
245 die("reftable block size cannot exceed %u", (unsigned)UINT16_MAX
);
246 opts
->restart_interval
= restart_interval
;
247 } else if (!strcmp(var
, "reftable.indexobjects")) {
248 opts
->skip_index_objects
= !git_config_bool(var
, value
);
249 } else if (!strcmp(var
, "reftable.geometricfactor")) {
250 unsigned long factor
= git_config_ulong(var
, value
, ctx
->kvi
);
251 if (factor
> UINT8_MAX
)
252 die("reftable geometric factor cannot exceed %u", (unsigned)UINT8_MAX
);
253 opts
->auto_compaction_factor
= factor
;
259 static struct ref_store
*reftable_be_init(struct repository
*repo
,
261 unsigned int store_flags
)
263 struct reftable_ref_store
*refs
= xcalloc(1, sizeof(*refs
));
264 struct strbuf path
= STRBUF_INIT
;
271 base_ref_store_init(&refs
->base
, repo
, gitdir
, &refs_be_reftable
);
272 strmap_init(&refs
->worktree_stacks
);
273 refs
->store_flags
= store_flags
;
275 refs
->write_options
.hash_id
= repo
->hash_algo
->format_id
;
276 refs
->write_options
.default_permissions
= calc_shared_perm(0666 & ~mask
);
277 refs
->write_options
.disable_auto_compact
=
278 !git_env_bool("GIT_TEST_REFTABLE_AUTOCOMPACTION", 1);
280 git_config(reftable_be_config
, &refs
->write_options
);
283 * It is somewhat unfortunate that we have to mirror the default block
284 * size of the reftable library here. But given that the write options
285 * wouldn't be updated by the library here, and given that we require
286 * the proper block size to trim reflog message so that they fit, we
287 * must set up a proper value here.
289 if (!refs
->write_options
.block_size
)
290 refs
->write_options
.block_size
= 4096;
293 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
294 * This stack contains both the shared and the main worktree refs.
296 * Note that we don't try to resolve the path in case we have a
297 * worktree because `get_common_dir_noenv()` already does it for us.
299 is_worktree
= get_common_dir_noenv(&path
, gitdir
);
302 strbuf_realpath(&path
, gitdir
, 0);
304 strbuf_addstr(&path
, "/reftable");
305 refs
->err
= reftable_new_stack(&refs
->main_stack
, path
.buf
,
306 &refs
->write_options
);
311 * If we're in a worktree we also need to set up the worktree reftable
312 * stack that is contained in the per-worktree GIT_DIR.
314 * Ideally, we would also add the stack to our worktree stack map. But
315 * we have no way to figure out the worktree name here and thus can't
320 strbuf_addf(&path
, "%s/reftable", gitdir
);
322 refs
->err
= reftable_new_stack(&refs
->worktree_stack
, path
.buf
,
323 &refs
->write_options
);
328 chdir_notify_reparent("reftables-backend $GIT_DIR", &refs
->base
.gitdir
);
331 assert(refs
->err
!= REFTABLE_API_ERROR
);
332 strbuf_release(&path
);
336 static void reftable_be_release(struct ref_store
*ref_store
)
338 struct reftable_ref_store
*refs
= reftable_be_downcast(ref_store
, 0, "release");
339 struct strmap_entry
*entry
;
340 struct hashmap_iter iter
;
342 if (refs
->main_stack
) {
343 reftable_stack_destroy(refs
->main_stack
);
344 refs
->main_stack
= NULL
;
347 if (refs
->worktree_stack
) {
348 reftable_stack_destroy(refs
->worktree_stack
);
349 refs
->worktree_stack
= NULL
;
352 strmap_for_each_entry(&refs
->worktree_stacks
, &iter
, entry
)
353 reftable_stack_destroy(entry
->value
);
354 strmap_clear(&refs
->worktree_stacks
, 0);
357 static int reftable_be_create_on_disk(struct ref_store
*ref_store
,
359 struct strbuf
*err UNUSED
)
361 struct reftable_ref_store
*refs
=
362 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "create");
363 struct strbuf sb
= STRBUF_INIT
;
365 strbuf_addf(&sb
, "%s/reftable", refs
->base
.gitdir
);
366 safe_create_dir(sb
.buf
, 1);
369 strbuf_addf(&sb
, "%s/HEAD", refs
->base
.gitdir
);
370 write_file(sb
.buf
, "ref: refs/heads/.invalid");
371 adjust_shared_perm(sb
.buf
);
374 strbuf_addf(&sb
, "%s/refs", refs
->base
.gitdir
);
375 safe_create_dir(sb
.buf
, 1);
378 strbuf_addf(&sb
, "%s/refs/heads", refs
->base
.gitdir
);
379 write_file(sb
.buf
, "this repository uses the reftable format");
380 adjust_shared_perm(sb
.buf
);
386 struct reftable_ref_iterator
{
387 struct ref_iterator base
;
388 struct reftable_ref_store
*refs
;
389 struct reftable_iterator iter
;
390 struct reftable_ref_record ref
;
391 struct object_id oid
;
399 static int reftable_ref_iterator_advance(struct ref_iterator
*ref_iterator
)
401 struct reftable_ref_iterator
*iter
=
402 (struct reftable_ref_iterator
*)ref_iterator
;
403 struct reftable_ref_store
*refs
= iter
->refs
;
408 iter
->err
= reftable_iterator_next_ref(&iter
->iter
, &iter
->ref
);
413 * The files backend only lists references contained in "refs/" unless
414 * the root refs are to be included. We emulate the same behaviour here.
416 if (!starts_with(iter
->ref
.refname
, "refs/") &&
417 !(iter
->flags
& DO_FOR_EACH_INCLUDE_ROOT_REFS
&&
418 is_root_ref(iter
->ref
.refname
))) {
422 if (iter
->prefix_len
&&
423 strncmp(iter
->prefix
, iter
->ref
.refname
, iter
->prefix_len
)) {
428 if (iter
->flags
& DO_FOR_EACH_PER_WORKTREE_ONLY
&&
429 parse_worktree_ref(iter
->ref
.refname
, NULL
, NULL
, NULL
) !=
430 REF_WORKTREE_CURRENT
)
433 switch (iter
->ref
.value_type
) {
434 case REFTABLE_REF_VAL1
:
435 oidread(&iter
->oid
, iter
->ref
.value
.val1
);
437 case REFTABLE_REF_VAL2
:
438 oidread(&iter
->oid
, iter
->ref
.value
.val2
.value
);
440 case REFTABLE_REF_SYMREF
:
441 if (!refs_resolve_ref_unsafe(&iter
->refs
->base
, iter
->ref
.refname
,
442 RESOLVE_REF_READING
, &iter
->oid
, &flags
))
446 BUG("unhandled reference value type %d", iter
->ref
.value_type
);
449 if (is_null_oid(&iter
->oid
))
450 flags
|= REF_ISBROKEN
;
452 if (check_refname_format(iter
->ref
.refname
, REFNAME_ALLOW_ONELEVEL
)) {
453 if (!refname_is_safe(iter
->ref
.refname
))
454 die(_("refname is dangerous: %s"), iter
->ref
.refname
);
456 flags
|= REF_BAD_NAME
| REF_ISBROKEN
;
459 if (iter
->flags
& DO_FOR_EACH_OMIT_DANGLING_SYMREFS
&&
460 flags
& REF_ISSYMREF
&&
461 flags
& REF_ISBROKEN
)
464 if (!(iter
->flags
& DO_FOR_EACH_INCLUDE_BROKEN
) &&
465 !ref_resolves_to_object(iter
->ref
.refname
, refs
->base
.repo
,
469 iter
->base
.refname
= iter
->ref
.refname
;
470 iter
->base
.oid
= &iter
->oid
;
471 iter
->base
.flags
= flags
;
477 if (ref_iterator_abort(ref_iterator
) != ITER_DONE
)
483 ref_iterator_abort(ref_iterator
);
490 static int reftable_ref_iterator_peel(struct ref_iterator
*ref_iterator
,
491 struct object_id
*peeled
)
493 struct reftable_ref_iterator
*iter
=
494 (struct reftable_ref_iterator
*)ref_iterator
;
496 if (iter
->ref
.value_type
== REFTABLE_REF_VAL2
) {
497 oidread(peeled
, iter
->ref
.value
.val2
.target_value
);
504 static int reftable_ref_iterator_abort(struct ref_iterator
*ref_iterator
)
506 struct reftable_ref_iterator
*iter
=
507 (struct reftable_ref_iterator
*)ref_iterator
;
508 reftable_ref_record_release(&iter
->ref
);
509 reftable_iterator_destroy(&iter
->iter
);
514 static struct ref_iterator_vtable reftable_ref_iterator_vtable
= {
515 .advance
= reftable_ref_iterator_advance
,
516 .peel
= reftable_ref_iterator_peel
,
517 .abort
= reftable_ref_iterator_abort
520 static struct reftable_ref_iterator
*ref_iterator_for_stack(struct reftable_ref_store
*refs
,
521 struct reftable_stack
*stack
,
525 struct reftable_ref_iterator
*iter
;
528 iter
= xcalloc(1, sizeof(*iter
));
529 base_ref_iterator_init(&iter
->base
, &reftable_ref_iterator_vtable
);
530 iter
->prefix
= prefix
;
531 iter
->prefix_len
= prefix
? strlen(prefix
) : 0;
532 iter
->base
.oid
= &iter
->oid
;
540 ret
= reftable_stack_reload(stack
);
544 reftable_stack_init_ref_iterator(stack
, &iter
->iter
);
545 ret
= reftable_iterator_seek_ref(&iter
->iter
, prefix
);
554 static struct ref_iterator
*reftable_be_iterator_begin(struct ref_store
*ref_store
,
556 const char **exclude_patterns
,
559 struct reftable_ref_iterator
*main_iter
, *worktree_iter
;
560 struct reftable_ref_store
*refs
;
561 unsigned int required_flags
= REF_STORE_READ
;
563 if (!(flags
& DO_FOR_EACH_INCLUDE_BROKEN
))
564 required_flags
|= REF_STORE_ODB
;
565 refs
= reftable_be_downcast(ref_store
, required_flags
, "ref_iterator_begin");
567 main_iter
= ref_iterator_for_stack(refs
, refs
->main_stack
, prefix
, flags
);
570 * The worktree stack is only set when we're in an actual worktree
571 * right now. If we aren't, then we return the common reftable
574 if (!refs
->worktree_stack
)
575 return &main_iter
->base
;
578 * Otherwise we merge both the common and the per-worktree refs into a
581 worktree_iter
= ref_iterator_for_stack(refs
, refs
->worktree_stack
, prefix
, flags
);
582 return merge_ref_iterator_begin(&worktree_iter
->base
, &main_iter
->base
,
583 ref_iterator_select
, NULL
);
586 static int reftable_be_read_raw_ref(struct ref_store
*ref_store
,
588 struct object_id
*oid
,
589 struct strbuf
*referent
,
593 struct reftable_ref_store
*refs
=
594 reftable_be_downcast(ref_store
, REF_STORE_READ
, "read_raw_ref");
595 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
601 ret
= reftable_stack_reload(stack
);
605 ret
= read_ref_without_reload(stack
, refname
, oid
, referent
, type
);
609 *failure_errno
= ENOENT
;
616 static int reftable_be_read_symbolic_ref(struct ref_store
*ref_store
,
618 struct strbuf
*referent
)
620 struct reftable_ref_store
*refs
=
621 reftable_be_downcast(ref_store
, REF_STORE_READ
, "read_symbolic_ref");
622 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
623 struct reftable_ref_record ref
= {0};
626 ret
= reftable_stack_reload(stack
);
630 ret
= reftable_stack_read_ref(stack
, refname
, &ref
);
631 if (ret
== 0 && ref
.value_type
== REFTABLE_REF_SYMREF
)
632 strbuf_addstr(referent
, ref
.value
.symref
);
636 reftable_ref_record_release(&ref
);
640 struct reftable_transaction_update
{
641 struct ref_update
*update
;
642 struct object_id current_oid
;
645 struct write_transaction_table_arg
{
646 struct reftable_ref_store
*refs
;
647 struct reftable_stack
*stack
;
648 struct reftable_addition
*addition
;
649 struct reftable_transaction_update
*updates
;
651 size_t updates_alloc
;
652 size_t updates_expected
;
655 struct reftable_transaction_data
{
656 struct write_transaction_table_arg
*args
;
657 size_t args_nr
, args_alloc
;
660 static void free_transaction_data(struct reftable_transaction_data
*tx_data
)
664 for (size_t i
= 0; i
< tx_data
->args_nr
; i
++) {
665 reftable_addition_destroy(tx_data
->args
[i
].addition
);
666 free(tx_data
->args
[i
].updates
);
673 * Prepare transaction update for the given reference update. This will cause
674 * us to lock the corresponding reftable stack for concurrent modification.
676 static int prepare_transaction_update(struct write_transaction_table_arg
**out
,
677 struct reftable_ref_store
*refs
,
678 struct reftable_transaction_data
*tx_data
,
679 struct ref_update
*update
,
682 struct reftable_stack
*stack
= stack_for(refs
, update
->refname
, NULL
);
683 struct write_transaction_table_arg
*arg
= NULL
;
688 * Search for a preexisting stack update. If there is one then we add
689 * the update to it, otherwise we set up a new stack update.
691 for (i
= 0; !arg
&& i
< tx_data
->args_nr
; i
++)
692 if (tx_data
->args
[i
].stack
== stack
)
693 arg
= &tx_data
->args
[i
];
696 struct reftable_addition
*addition
;
698 ret
= reftable_stack_reload(stack
);
702 ret
= reftable_stack_new_addition(&addition
, stack
);
704 if (ret
== REFTABLE_LOCK_ERROR
)
705 strbuf_addstr(err
, "cannot lock references");
709 ALLOC_GROW(tx_data
->args
, tx_data
->args_nr
+ 1,
710 tx_data
->args_alloc
);
711 arg
= &tx_data
->args
[tx_data
->args_nr
++];
714 arg
->addition
= addition
;
717 arg
->updates_alloc
= 0;
718 arg
->updates_expected
= 0;
721 arg
->updates_expected
++;
730 * Queue a reference update for the correct stack. We potentially need to
731 * handle multiple stack updates in a single transaction when it spans across
732 * multiple worktrees.
734 static int queue_transaction_update(struct reftable_ref_store
*refs
,
735 struct reftable_transaction_data
*tx_data
,
736 struct ref_update
*update
,
737 struct object_id
*current_oid
,
740 struct write_transaction_table_arg
*arg
= NULL
;
743 if (update
->backend_data
)
744 BUG("reference update queued more than once");
746 ret
= prepare_transaction_update(&arg
, refs
, tx_data
, update
, err
);
750 ALLOC_GROW(arg
->updates
, arg
->updates_nr
+ 1,
752 arg
->updates
[arg
->updates_nr
].update
= update
;
753 oidcpy(&arg
->updates
[arg
->updates_nr
].current_oid
, current_oid
);
754 update
->backend_data
= &arg
->updates
[arg
->updates_nr
++];
759 static int reftable_be_transaction_prepare(struct ref_store
*ref_store
,
760 struct ref_transaction
*transaction
,
763 struct reftable_ref_store
*refs
=
764 reftable_be_downcast(ref_store
, REF_STORE_WRITE
|REF_STORE_MAIN
, "ref_transaction_prepare");
765 struct strbuf referent
= STRBUF_INIT
, head_referent
= STRBUF_INIT
;
766 struct string_list affected_refnames
= STRING_LIST_INIT_NODUP
;
767 struct reftable_transaction_data
*tx_data
= NULL
;
768 struct object_id head_oid
;
769 unsigned int head_type
= 0;
777 tx_data
= xcalloc(1, sizeof(*tx_data
));
780 * Preprocess all updates. For one we check that there are no duplicate
781 * reference updates in this transaction. Second, we lock all stacks
782 * that will be modified during the transaction.
784 for (i
= 0; i
< transaction
->nr
; i
++) {
785 ret
= prepare_transaction_update(NULL
, refs
, tx_data
,
786 transaction
->updates
[i
], err
);
790 string_list_append(&affected_refnames
,
791 transaction
->updates
[i
]->refname
);
795 * Now that we have counted updates per stack we can preallocate their
796 * arrays. This avoids having to reallocate many times.
798 for (i
= 0; i
< tx_data
->args_nr
; i
++) {
799 CALLOC_ARRAY(tx_data
->args
[i
].updates
, tx_data
->args
[i
].updates_expected
);
800 tx_data
->args
[i
].updates_alloc
= tx_data
->args
[i
].updates_expected
;
804 * Fail if a refname appears more than once in the transaction.
805 * This code is taken from the files backend and is a good candidate to
806 * be moved into the generic layer.
808 string_list_sort(&affected_refnames
);
809 if (ref_update_reject_duplicates(&affected_refnames
, err
)) {
810 ret
= TRANSACTION_GENERIC_ERROR
;
814 ret
= read_ref_without_reload(stack_for(refs
, "HEAD", NULL
), "HEAD", &head_oid
,
815 &head_referent
, &head_type
);
820 for (i
= 0; i
< transaction
->nr
; i
++) {
821 struct ref_update
*u
= transaction
->updates
[i
];
822 struct object_id current_oid
= {0};
823 struct reftable_stack
*stack
;
824 const char *rewritten_ref
;
826 stack
= stack_for(refs
, u
->refname
, &rewritten_ref
);
828 /* Verify that the new object ID is valid. */
829 if ((u
->flags
& REF_HAVE_NEW
) && !is_null_oid(&u
->new_oid
) &&
830 !(u
->flags
& REF_SKIP_OID_VERIFICATION
) &&
831 !(u
->flags
& REF_LOG_ONLY
)) {
832 struct object
*o
= parse_object(refs
->base
.repo
, &u
->new_oid
);
835 _("trying to write ref '%s' with nonexistent object %s"),
836 u
->refname
, oid_to_hex(&u
->new_oid
));
841 if (o
->type
!= OBJ_COMMIT
&& is_branch(u
->refname
)) {
842 strbuf_addf(err
, _("trying to write non-commit object %s to branch '%s'"),
843 oid_to_hex(&u
->new_oid
), u
->refname
);
850 * When we update the reference that HEAD points to we enqueue
851 * a second log-only update for HEAD so that its reflog is
852 * updated accordingly.
854 if (head_type
== REF_ISSYMREF
&&
855 !(u
->flags
& REF_LOG_ONLY
) &&
856 !(u
->flags
& REF_UPDATE_VIA_HEAD
) &&
857 !strcmp(rewritten_ref
, head_referent
.buf
)) {
858 struct ref_update
*new_update
;
861 * First make sure that HEAD is not already in the
862 * transaction. This check is O(lg N) in the transaction
863 * size, but it happens at most once per transaction.
865 if (string_list_has_string(&affected_refnames
, "HEAD")) {
866 /* An entry already existed */
868 _("multiple updates for 'HEAD' (including one "
869 "via its referent '%s') are not allowed"),
871 ret
= TRANSACTION_NAME_CONFLICT
;
875 new_update
= ref_transaction_add_update(
877 u
->flags
| REF_LOG_ONLY
| REF_NO_DEREF
,
878 &u
->new_oid
, &u
->old_oid
, NULL
, NULL
, u
->msg
);
879 string_list_insert(&affected_refnames
, new_update
->refname
);
882 ret
= read_ref_without_reload(stack
, rewritten_ref
,
883 ¤t_oid
, &referent
, &u
->type
);
886 if (ret
> 0 && (!(u
->flags
& REF_HAVE_OLD
) || is_null_oid(&u
->old_oid
))) {
888 * The reference does not exist, and we either have no
889 * old object ID or expect the reference to not exist.
890 * We can thus skip below safety checks as well as the
891 * symref splitting. But we do want to verify that
892 * there is no conflicting reference here so that we
893 * can output a proper error message instead of failing
896 ret
= refs_verify_refname_available(ref_store
, u
->refname
,
897 &affected_refnames
, NULL
, err
);
902 * There is no need to write the reference deletion
903 * when the reference in question doesn't exist.
905 if ((u
->flags
& REF_HAVE_NEW
) && !ref_update_has_null_new_value(u
)) {
906 ret
= queue_transaction_update(refs
, tx_data
, u
,
915 /* The reference does not exist, but we expected it to. */
916 strbuf_addf(err
, _("cannot lock ref '%s': "
917 "unable to resolve reference '%s'"),
918 ref_update_original_update_refname(u
), u
->refname
);
923 if (u
->type
& REF_ISSYMREF
) {
925 * The reftable stack is locked at this point already,
926 * so it is safe to call `refs_resolve_ref_unsafe()`
927 * here without causing races.
929 const char *resolved
= refs_resolve_ref_unsafe(&refs
->base
, u
->refname
, 0,
932 if (u
->flags
& REF_NO_DEREF
) {
933 if (u
->flags
& REF_HAVE_OLD
&& !resolved
) {
934 strbuf_addf(err
, _("cannot lock ref '%s': "
935 "error reading reference"), u
->refname
);
940 struct ref_update
*new_update
;
943 new_flags
= u
->flags
;
944 if (!strcmp(rewritten_ref
, "HEAD"))
945 new_flags
|= REF_UPDATE_VIA_HEAD
;
948 * If we are updating a symref (eg. HEAD), we should also
949 * update the branch that the symref points to.
951 * This is generic functionality, and would be better
952 * done in refs.c, but the current implementation is
953 * intertwined with the locking in files-backend.c.
955 new_update
= ref_transaction_add_update(
956 transaction
, referent
.buf
, new_flags
,
957 &u
->new_oid
, &u
->old_oid
, u
->new_target
,
958 u
->old_target
, u
->msg
);
960 new_update
->parent_update
= u
;
963 * Change the symbolic ref update to log only. Also, it
964 * doesn't need to check its old OID value, as that will be
965 * done when new_update is processed.
967 u
->flags
|= REF_LOG_ONLY
| REF_NO_DEREF
;
968 u
->flags
&= ~REF_HAVE_OLD
;
970 if (string_list_has_string(&affected_refnames
, new_update
->refname
)) {
972 _("multiple updates for '%s' (including one "
973 "via symref '%s') are not allowed"),
974 referent
.buf
, u
->refname
);
975 ret
= TRANSACTION_NAME_CONFLICT
;
978 string_list_insert(&affected_refnames
, new_update
->refname
);
983 * Verify that the old object matches our expectations. Note
984 * that the error messages here do not make a lot of sense in
985 * the context of the reftable backend as we never lock
986 * individual refs. But the error messages match what the files
987 * backend returns, which keeps our tests happy.
990 if (ref_update_check_old_target(referent
.buf
, u
, err
)) {
994 } else if ((u
->flags
& REF_HAVE_OLD
) && !oideq(¤t_oid
, &u
->old_oid
)) {
995 if (is_null_oid(&u
->old_oid
))
996 strbuf_addf(err
, _("cannot lock ref '%s': "
997 "reference already exists"),
998 ref_update_original_update_refname(u
));
999 else if (is_null_oid(¤t_oid
))
1000 strbuf_addf(err
, _("cannot lock ref '%s': "
1001 "reference is missing but expected %s"),
1002 ref_update_original_update_refname(u
),
1003 oid_to_hex(&u
->old_oid
));
1005 strbuf_addf(err
, _("cannot lock ref '%s': "
1006 "is at %s but expected %s"),
1007 ref_update_original_update_refname(u
),
1008 oid_to_hex(¤t_oid
),
1009 oid_to_hex(&u
->old_oid
));
1015 * If all of the following conditions are true:
1017 * - We're not about to write a symref.
1018 * - We're not about to write a log-only entry.
1019 * - Old and new object ID are different.
1021 * Then we're essentially doing a no-op update that can be
1022 * skipped. This is not only for the sake of efficiency, but
1023 * also skips writing unneeded reflog entries.
1025 if ((u
->type
& REF_ISSYMREF
) ||
1026 (u
->flags
& REF_LOG_ONLY
) ||
1027 (u
->flags
& REF_HAVE_NEW
&& !oideq(¤t_oid
, &u
->new_oid
))) {
1028 ret
= queue_transaction_update(refs
, tx_data
, u
,
1035 transaction
->backend_data
= tx_data
;
1036 transaction
->state
= REF_TRANSACTION_PREPARED
;
1039 assert(ret
!= REFTABLE_API_ERROR
);
1041 free_transaction_data(tx_data
);
1042 transaction
->state
= REF_TRANSACTION_CLOSED
;
1044 strbuf_addf(err
, _("reftable: transaction prepare: %s"),
1045 reftable_error_str(ret
));
1047 string_list_clear(&affected_refnames
, 0);
1048 strbuf_release(&referent
);
1049 strbuf_release(&head_referent
);
1054 static int reftable_be_transaction_abort(struct ref_store
*ref_store
,
1055 struct ref_transaction
*transaction
,
1058 struct reftable_transaction_data
*tx_data
= transaction
->backend_data
;
1059 free_transaction_data(tx_data
);
1060 transaction
->state
= REF_TRANSACTION_CLOSED
;
1064 static int transaction_update_cmp(const void *a
, const void *b
)
1066 return strcmp(((struct reftable_transaction_update
*)a
)->update
->refname
,
1067 ((struct reftable_transaction_update
*)b
)->update
->refname
);
1070 static int write_transaction_table(struct reftable_writer
*writer
, void *cb_data
)
1072 struct write_transaction_table_arg
*arg
= cb_data
;
1073 uint64_t ts
= reftable_stack_next_update_index(arg
->stack
);
1074 struct reftable_log_record
*logs
= NULL
;
1075 struct ident_split committer_ident
= {0};
1076 size_t logs_nr
= 0, logs_alloc
= 0, i
;
1077 const char *committer_info
;
1080 committer_info
= git_committer_info(0);
1081 if (split_ident_line(&committer_ident
, committer_info
, strlen(committer_info
)))
1082 BUG("failed splitting committer info");
1084 QSORT(arg
->updates
, arg
->updates_nr
, transaction_update_cmp
);
1086 reftable_writer_set_limits(writer
, ts
, ts
);
1088 for (i
= 0; i
< arg
->updates_nr
; i
++) {
1089 struct reftable_transaction_update
*tx_update
= &arg
->updates
[i
];
1090 struct ref_update
*u
= tx_update
->update
;
1093 * Write a reflog entry when updating a ref to point to
1094 * something new in either of the following cases:
1096 * - The reference is about to be deleted. We always want to
1097 * delete the reflog in that case.
1098 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1100 * - `core.logAllRefUpdates` tells us to create the reflog for
1103 if ((u
->flags
& REF_HAVE_NEW
) &&
1104 !(u
->type
& REF_ISSYMREF
) &&
1105 ref_update_has_null_new_value(u
)) {
1106 struct reftable_log_record log
= {0};
1107 struct reftable_iterator it
= {0};
1109 reftable_stack_init_log_iterator(arg
->stack
, &it
);
1112 * When deleting refs we also delete all reflog entries
1113 * with them. While it is not strictly required to
1114 * delete reflogs together with their refs, this
1115 * matches the behaviour of the files backend.
1117 * Unfortunately, we have no better way than to delete
1118 * all reflog entries one by one.
1120 ret
= reftable_iterator_seek_log(&it
, u
->refname
);
1122 struct reftable_log_record
*tombstone
;
1124 ret
= reftable_iterator_next_log(&it
, &log
);
1127 if (ret
> 0 || strcmp(log
.refname
, u
->refname
)) {
1132 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1133 tombstone
= &logs
[logs_nr
++];
1134 tombstone
->refname
= xstrdup(u
->refname
);
1135 tombstone
->value_type
= REFTABLE_LOG_DELETION
;
1136 tombstone
->update_index
= log
.update_index
;
1139 reftable_log_record_release(&log
);
1140 reftable_iterator_destroy(&it
);
1144 } else if (u
->flags
& REF_HAVE_NEW
&&
1145 (u
->flags
& REF_FORCE_CREATE_REFLOG
||
1146 should_write_log(&arg
->refs
->base
, u
->refname
))) {
1147 struct reftable_log_record
*log
;
1148 int create_reflog
= 1;
1150 if (u
->new_target
) {
1151 if (!refs_resolve_ref_unsafe(&arg
->refs
->base
, u
->new_target
,
1152 RESOLVE_REF_READING
, &u
->new_oid
, NULL
)) {
1154 * TODO: currently we skip creating reflogs for dangling
1155 * symref updates. It would be nice to capture this as
1156 * zero oid updates however.
1162 if (create_reflog
) {
1163 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1164 log
= &logs
[logs_nr
++];
1165 memset(log
, 0, sizeof(*log
));
1167 fill_reftable_log_record(log
, &committer_ident
);
1168 log
->update_index
= ts
;
1169 log
->refname
= xstrdup(u
->refname
);
1170 memcpy(log
->value
.update
.new_hash
,
1171 u
->new_oid
.hash
, GIT_MAX_RAWSZ
);
1172 memcpy(log
->value
.update
.old_hash
,
1173 tx_update
->current_oid
.hash
, GIT_MAX_RAWSZ
);
1174 log
->value
.update
.message
=
1175 xstrndup(u
->msg
, arg
->refs
->write_options
.block_size
/ 2);
1179 if (u
->flags
& REF_LOG_ONLY
)
1182 if (u
->new_target
) {
1183 struct reftable_ref_record ref
= {
1184 .refname
= (char *)u
->refname
,
1185 .value_type
= REFTABLE_REF_SYMREF
,
1186 .value
.symref
= (char *)u
->new_target
,
1190 ret
= reftable_writer_add_ref(writer
, &ref
);
1193 } else if ((u
->flags
& REF_HAVE_NEW
) && ref_update_has_null_new_value(u
)) {
1194 struct reftable_ref_record ref
= {
1195 .refname
= (char *)u
->refname
,
1197 .value_type
= REFTABLE_REF_DELETION
,
1200 ret
= reftable_writer_add_ref(writer
, &ref
);
1203 } else if (u
->flags
& REF_HAVE_NEW
) {
1204 struct reftable_ref_record ref
= {0};
1205 struct object_id peeled
;
1208 ref
.refname
= (char *)u
->refname
;
1209 ref
.update_index
= ts
;
1211 peel_error
= peel_object(arg
->refs
->base
.repo
, &u
->new_oid
, &peeled
);
1213 ref
.value_type
= REFTABLE_REF_VAL2
;
1214 memcpy(ref
.value
.val2
.target_value
, peeled
.hash
, GIT_MAX_RAWSZ
);
1215 memcpy(ref
.value
.val2
.value
, u
->new_oid
.hash
, GIT_MAX_RAWSZ
);
1216 } else if (!is_null_oid(&u
->new_oid
)) {
1217 ref
.value_type
= REFTABLE_REF_VAL1
;
1218 memcpy(ref
.value
.val1
, u
->new_oid
.hash
, GIT_MAX_RAWSZ
);
1221 ret
= reftable_writer_add_ref(writer
, &ref
);
1228 * Logs are written at the end so that we do not have intermixed ref
1232 ret
= reftable_writer_add_logs(writer
, logs
, logs_nr
);
1238 assert(ret
!= REFTABLE_API_ERROR
);
1239 for (i
= 0; i
< logs_nr
; i
++)
1240 reftable_log_record_release(&logs
[i
]);
1245 static int reftable_be_transaction_finish(struct ref_store
*ref_store
,
1246 struct ref_transaction
*transaction
,
1249 struct reftable_transaction_data
*tx_data
= transaction
->backend_data
;
1252 for (size_t i
= 0; i
< tx_data
->args_nr
; i
++) {
1253 ret
= reftable_addition_add(tx_data
->args
[i
].addition
,
1254 write_transaction_table
, &tx_data
->args
[i
]);
1258 ret
= reftable_addition_commit(tx_data
->args
[i
].addition
);
1264 assert(ret
!= REFTABLE_API_ERROR
);
1265 free_transaction_data(tx_data
);
1266 transaction
->state
= REF_TRANSACTION_CLOSED
;
1269 strbuf_addf(err
, _("reftable: transaction failure: %s"),
1270 reftable_error_str(ret
));
1276 static int reftable_be_initial_transaction_commit(struct ref_store
*ref_store UNUSED
,
1277 struct ref_transaction
*transaction
,
1280 return ref_transaction_commit(transaction
, err
);
1283 static int reftable_be_pack_refs(struct ref_store
*ref_store
,
1284 struct pack_refs_opts
*opts
)
1286 struct reftable_ref_store
*refs
=
1287 reftable_be_downcast(ref_store
, REF_STORE_WRITE
| REF_STORE_ODB
, "pack_refs");
1288 struct reftable_stack
*stack
;
1294 stack
= refs
->worktree_stack
;
1296 stack
= refs
->main_stack
;
1298 if (opts
->flags
& PACK_REFS_AUTO
)
1299 ret
= reftable_stack_auto_compact(stack
);
1301 ret
= reftable_stack_compact_all(stack
, NULL
);
1303 ret
= error(_("unable to compact stack: %s"),
1304 reftable_error_str(ret
));
1308 ret
= reftable_stack_clean(stack
);
1316 struct write_create_symref_arg
{
1317 struct reftable_ref_store
*refs
;
1318 struct reftable_stack
*stack
;
1320 const char *refname
;
1325 struct write_copy_arg
{
1326 struct reftable_ref_store
*refs
;
1327 struct reftable_stack
*stack
;
1328 const char *oldname
;
1329 const char *newname
;
1334 static int write_copy_table(struct reftable_writer
*writer
, void *cb_data
)
1336 struct write_copy_arg
*arg
= cb_data
;
1337 uint64_t deletion_ts
, creation_ts
;
1338 struct reftable_ref_record old_ref
= {0}, refs
[2] = {0};
1339 struct reftable_log_record old_log
= {0}, *logs
= NULL
;
1340 struct reftable_iterator it
= {0};
1341 struct string_list skip
= STRING_LIST_INIT_NODUP
;
1342 struct ident_split committer_ident
= {0};
1343 struct strbuf errbuf
= STRBUF_INIT
;
1344 size_t logs_nr
= 0, logs_alloc
= 0, i
;
1345 const char *committer_info
;
1348 committer_info
= git_committer_info(0);
1349 if (split_ident_line(&committer_ident
, committer_info
, strlen(committer_info
)))
1350 BUG("failed splitting committer info");
1352 if (reftable_stack_read_ref(arg
->stack
, arg
->oldname
, &old_ref
)) {
1353 ret
= error(_("refname %s not found"), arg
->oldname
);
1356 if (old_ref
.value_type
== REFTABLE_REF_SYMREF
) {
1357 ret
= error(_("refname %s is a symbolic ref, copying it is not supported"),
1363 * There's nothing to do in case the old and new name are the same, so
1364 * we exit early in that case.
1366 if (!strcmp(arg
->oldname
, arg
->newname
)) {
1372 * Verify that the new refname is available.
1374 if (arg
->delete_old
)
1375 string_list_insert(&skip
, arg
->oldname
);
1376 ret
= refs_verify_refname_available(&arg
->refs
->base
, arg
->newname
,
1377 NULL
, &skip
, &errbuf
);
1379 error("%s", errbuf
.buf
);
1384 * When deleting the old reference we have to use two update indices:
1385 * once to delete the old ref and its reflog, and once to create the
1386 * new ref and its reflog. They need to be staged with two separate
1387 * indices because the new reflog needs to encode both the deletion of
1388 * the old branch and the creation of the new branch, and we cannot do
1389 * two changes to a reflog in a single update.
1391 deletion_ts
= creation_ts
= reftable_stack_next_update_index(arg
->stack
);
1392 if (arg
->delete_old
)
1394 reftable_writer_set_limits(writer
, deletion_ts
, creation_ts
);
1397 * Add the new reference. If this is a rename then we also delete the
1401 refs
[0].refname
= (char *)arg
->newname
;
1402 refs
[0].update_index
= creation_ts
;
1403 if (arg
->delete_old
) {
1404 refs
[1].refname
= (char *)arg
->oldname
;
1405 refs
[1].value_type
= REFTABLE_REF_DELETION
;
1406 refs
[1].update_index
= deletion_ts
;
1408 ret
= reftable_writer_add_refs(writer
, refs
, arg
->delete_old
? 2 : 1);
1413 * When deleting the old branch we need to create a reflog entry on the
1414 * new branch name that indicates that the old branch has been deleted
1415 * and then recreated. This is a tad weird, but matches what the files
1418 if (arg
->delete_old
) {
1419 struct strbuf head_referent
= STRBUF_INIT
;
1420 struct object_id head_oid
;
1421 int append_head_reflog
;
1422 unsigned head_type
= 0;
1424 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1425 memset(&logs
[logs_nr
], 0, sizeof(logs
[logs_nr
]));
1426 fill_reftable_log_record(&logs
[logs_nr
], &committer_ident
);
1427 logs
[logs_nr
].refname
= (char *)arg
->newname
;
1428 logs
[logs_nr
].update_index
= deletion_ts
;
1429 logs
[logs_nr
].value
.update
.message
=
1430 xstrndup(arg
->logmsg
, arg
->refs
->write_options
.block_size
/ 2);
1431 memcpy(logs
[logs_nr
].value
.update
.old_hash
, old_ref
.value
.val1
, GIT_MAX_RAWSZ
);
1434 ret
= read_ref_without_reload(arg
->stack
, "HEAD", &head_oid
, &head_referent
, &head_type
);
1437 append_head_reflog
= (head_type
& REF_ISSYMREF
) && !strcmp(head_referent
.buf
, arg
->oldname
);
1438 strbuf_release(&head_referent
);
1441 * The files backend uses `refs_delete_ref()` to delete the old
1442 * branch name, which will append a reflog entry for HEAD in
1443 * case it points to the old branch.
1445 if (append_head_reflog
) {
1446 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1447 logs
[logs_nr
] = logs
[logs_nr
- 1];
1448 logs
[logs_nr
].refname
= "HEAD";
1454 * Create the reflog entry for the newly created branch.
1456 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1457 memset(&logs
[logs_nr
], 0, sizeof(logs
[logs_nr
]));
1458 fill_reftable_log_record(&logs
[logs_nr
], &committer_ident
);
1459 logs
[logs_nr
].refname
= (char *)arg
->newname
;
1460 logs
[logs_nr
].update_index
= creation_ts
;
1461 logs
[logs_nr
].value
.update
.message
=
1462 xstrndup(arg
->logmsg
, arg
->refs
->write_options
.block_size
/ 2);
1463 memcpy(logs
[logs_nr
].value
.update
.new_hash
, old_ref
.value
.val1
, GIT_MAX_RAWSZ
);
1467 * In addition to writing the reflog entry for the new branch, we also
1468 * copy over all log entries from the old reflog. Last but not least,
1469 * when renaming we also have to delete all the old reflog entries.
1471 reftable_stack_init_log_iterator(arg
->stack
, &it
);
1472 ret
= reftable_iterator_seek_log(&it
, arg
->oldname
);
1477 ret
= reftable_iterator_next_log(&it
, &old_log
);
1480 if (ret
> 0 || strcmp(old_log
.refname
, arg
->oldname
)) {
1485 free(old_log
.refname
);
1488 * Copy over the old reflog entry with the new refname.
1490 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1491 logs
[logs_nr
] = old_log
;
1492 logs
[logs_nr
].refname
= (char *)arg
->newname
;
1496 * Delete the old reflog entry in case we are renaming.
1498 if (arg
->delete_old
) {
1499 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1500 memset(&logs
[logs_nr
], 0, sizeof(logs
[logs_nr
]));
1501 logs
[logs_nr
].refname
= (char *)arg
->oldname
;
1502 logs
[logs_nr
].value_type
= REFTABLE_LOG_DELETION
;
1503 logs
[logs_nr
].update_index
= old_log
.update_index
;
1508 * Transfer ownership of the log record we're iterating over to
1509 * the array of log records. Otherwise, the pointers would get
1510 * free'd or reallocated by the iterator.
1512 memset(&old_log
, 0, sizeof(old_log
));
1515 ret
= reftable_writer_add_logs(writer
, logs
, logs_nr
);
1520 assert(ret
!= REFTABLE_API_ERROR
);
1521 reftable_iterator_destroy(&it
);
1522 string_list_clear(&skip
, 0);
1523 strbuf_release(&errbuf
);
1524 for (i
= 0; i
< logs_nr
; i
++) {
1525 if (!strcmp(logs
[i
].refname
, "HEAD"))
1527 logs
[i
].refname
= NULL
;
1528 reftable_log_record_release(&logs
[i
]);
1531 reftable_ref_record_release(&old_ref
);
1532 reftable_log_record_release(&old_log
);
1536 static int reftable_be_rename_ref(struct ref_store
*ref_store
,
1537 const char *oldrefname
,
1538 const char *newrefname
,
1541 struct reftable_ref_store
*refs
=
1542 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "rename_ref");
1543 struct reftable_stack
*stack
= stack_for(refs
, newrefname
, &newrefname
);
1544 struct write_copy_arg arg
= {
1547 .oldname
= oldrefname
,
1548 .newname
= newrefname
,
1558 ret
= reftable_stack_reload(stack
);
1561 ret
= reftable_stack_add(stack
, &write_copy_table
, &arg
);
1564 assert(ret
!= REFTABLE_API_ERROR
);
1568 static int reftable_be_copy_ref(struct ref_store
*ref_store
,
1569 const char *oldrefname
,
1570 const char *newrefname
,
1573 struct reftable_ref_store
*refs
=
1574 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "copy_ref");
1575 struct reftable_stack
*stack
= stack_for(refs
, newrefname
, &newrefname
);
1576 struct write_copy_arg arg
= {
1579 .oldname
= oldrefname
,
1580 .newname
= newrefname
,
1589 ret
= reftable_stack_reload(stack
);
1592 ret
= reftable_stack_add(stack
, &write_copy_table
, &arg
);
1595 assert(ret
!= REFTABLE_API_ERROR
);
1599 struct reftable_reflog_iterator
{
1600 struct ref_iterator base
;
1601 struct reftable_ref_store
*refs
;
1602 struct reftable_iterator iter
;
1603 struct reftable_log_record log
;
1604 struct strbuf last_name
;
1608 static int reftable_reflog_iterator_advance(struct ref_iterator
*ref_iterator
)
1610 struct reftable_reflog_iterator
*iter
=
1611 (struct reftable_reflog_iterator
*)ref_iterator
;
1613 while (!iter
->err
) {
1614 iter
->err
= reftable_iterator_next_log(&iter
->iter
, &iter
->log
);
1619 * We want the refnames that we have reflogs for, so we skip if
1620 * we've already produced this name. This could be faster by
1621 * seeking directly to reflog@update_index==0.
1623 if (!strcmp(iter
->log
.refname
, iter
->last_name
.buf
))
1626 if (check_refname_format(iter
->log
.refname
,
1627 REFNAME_ALLOW_ONELEVEL
))
1630 strbuf_reset(&iter
->last_name
);
1631 strbuf_addstr(&iter
->last_name
, iter
->log
.refname
);
1632 iter
->base
.refname
= iter
->log
.refname
;
1637 if (iter
->err
> 0) {
1638 if (ref_iterator_abort(ref_iterator
) != ITER_DONE
)
1643 if (iter
->err
< 0) {
1644 ref_iterator_abort(ref_iterator
);
1651 static int reftable_reflog_iterator_peel(struct ref_iterator
*ref_iterator
,
1652 struct object_id
*peeled
)
1654 BUG("reftable reflog iterator cannot be peeled");
1658 static int reftable_reflog_iterator_abort(struct ref_iterator
*ref_iterator
)
1660 struct reftable_reflog_iterator
*iter
=
1661 (struct reftable_reflog_iterator
*)ref_iterator
;
1662 reftable_log_record_release(&iter
->log
);
1663 reftable_iterator_destroy(&iter
->iter
);
1664 strbuf_release(&iter
->last_name
);
1669 static struct ref_iterator_vtable reftable_reflog_iterator_vtable
= {
1670 .advance
= reftable_reflog_iterator_advance
,
1671 .peel
= reftable_reflog_iterator_peel
,
1672 .abort
= reftable_reflog_iterator_abort
1675 static struct reftable_reflog_iterator
*reflog_iterator_for_stack(struct reftable_ref_store
*refs
,
1676 struct reftable_stack
*stack
)
1678 struct reftable_reflog_iterator
*iter
;
1681 iter
= xcalloc(1, sizeof(*iter
));
1682 base_ref_iterator_init(&iter
->base
, &reftable_reflog_iterator_vtable
);
1683 strbuf_init(&iter
->last_name
, 0);
1690 ret
= reftable_stack_reload(stack
);
1694 reftable_stack_init_log_iterator(stack
, &iter
->iter
);
1695 ret
= reftable_iterator_seek_log(&iter
->iter
, "");
1704 static struct ref_iterator
*reftable_be_reflog_iterator_begin(struct ref_store
*ref_store
)
1706 struct reftable_ref_store
*refs
=
1707 reftable_be_downcast(ref_store
, REF_STORE_READ
, "reflog_iterator_begin");
1708 struct reftable_reflog_iterator
*main_iter
, *worktree_iter
;
1710 main_iter
= reflog_iterator_for_stack(refs
, refs
->main_stack
);
1711 if (!refs
->worktree_stack
)
1712 return &main_iter
->base
;
1714 worktree_iter
= reflog_iterator_for_stack(refs
, refs
->worktree_stack
);
1716 return merge_ref_iterator_begin(&worktree_iter
->base
, &main_iter
->base
,
1717 ref_iterator_select
, NULL
);
1720 static int yield_log_record(struct reftable_log_record
*log
,
1721 each_reflog_ent_fn fn
,
1724 struct object_id old_oid
, new_oid
;
1725 const char *full_committer
;
1727 oidread(&old_oid
, log
->value
.update
.old_hash
);
1728 oidread(&new_oid
, log
->value
.update
.new_hash
);
1731 * When both the old object ID and the new object ID are null
1732 * then this is the reflog existence marker. The caller must
1733 * not be aware of it.
1735 if (is_null_oid(&old_oid
) && is_null_oid(&new_oid
))
1738 full_committer
= fmt_ident(log
->value
.update
.name
, log
->value
.update
.email
,
1739 WANT_COMMITTER_IDENT
, NULL
, IDENT_NO_DATE
);
1740 return fn(&old_oid
, &new_oid
, full_committer
,
1741 log
->value
.update
.time
, log
->value
.update
.tz_offset
,
1742 log
->value
.update
.message
, cb_data
);
1745 static int reftable_be_for_each_reflog_ent_reverse(struct ref_store
*ref_store
,
1746 const char *refname
,
1747 each_reflog_ent_fn fn
,
1750 struct reftable_ref_store
*refs
=
1751 reftable_be_downcast(ref_store
, REF_STORE_READ
, "for_each_reflog_ent_reverse");
1752 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1753 struct reftable_log_record log
= {0};
1754 struct reftable_iterator it
= {0};
1760 reftable_stack_init_log_iterator(stack
, &it
);
1761 ret
= reftable_iterator_seek_log(&it
, refname
);
1763 ret
= reftable_iterator_next_log(&it
, &log
);
1766 if (ret
> 0 || strcmp(log
.refname
, refname
)) {
1771 ret
= yield_log_record(&log
, fn
, cb_data
);
1776 reftable_log_record_release(&log
);
1777 reftable_iterator_destroy(&it
);
1781 static int reftable_be_for_each_reflog_ent(struct ref_store
*ref_store
,
1782 const char *refname
,
1783 each_reflog_ent_fn fn
,
1786 struct reftable_ref_store
*refs
=
1787 reftable_be_downcast(ref_store
, REF_STORE_READ
, "for_each_reflog_ent");
1788 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1789 struct reftable_log_record
*logs
= NULL
;
1790 struct reftable_iterator it
= {0};
1791 size_t logs_alloc
= 0, logs_nr
= 0, i
;
1797 reftable_stack_init_log_iterator(stack
, &it
);
1798 ret
= reftable_iterator_seek_log(&it
, refname
);
1800 struct reftable_log_record log
= {0};
1802 ret
= reftable_iterator_next_log(&it
, &log
);
1805 if (ret
> 0 || strcmp(log
.refname
, refname
)) {
1806 reftable_log_record_release(&log
);
1811 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1812 logs
[logs_nr
++] = log
;
1815 for (i
= logs_nr
; i
--;) {
1816 ret
= yield_log_record(&logs
[i
], fn
, cb_data
);
1822 reftable_iterator_destroy(&it
);
1823 for (i
= 0; i
< logs_nr
; i
++)
1824 reftable_log_record_release(&logs
[i
]);
1829 static int reftable_be_reflog_exists(struct ref_store
*ref_store
,
1830 const char *refname
)
1832 struct reftable_ref_store
*refs
=
1833 reftable_be_downcast(ref_store
, REF_STORE_READ
, "reflog_exists");
1834 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1835 struct reftable_log_record log
= {0};
1836 struct reftable_iterator it
= {0};
1843 ret
= reftable_stack_reload(stack
);
1847 reftable_stack_init_log_iterator(stack
, &it
);
1848 ret
= reftable_iterator_seek_log(&it
, refname
);
1853 * Check whether we get at least one log record for the given ref name.
1854 * If so, the reflog exists, otherwise it doesn't.
1856 ret
= reftable_iterator_next_log(&it
, &log
);
1864 ret
= strcmp(log
.refname
, refname
) == 0;
1867 reftable_iterator_destroy(&it
);
1868 reftable_log_record_release(&log
);
1874 struct write_reflog_existence_arg
{
1875 struct reftable_ref_store
*refs
;
1876 const char *refname
;
1877 struct reftable_stack
*stack
;
1880 static int write_reflog_existence_table(struct reftable_writer
*writer
,
1883 struct write_reflog_existence_arg
*arg
= cb_data
;
1884 uint64_t ts
= reftable_stack_next_update_index(arg
->stack
);
1885 struct reftable_log_record log
= {0};
1888 ret
= reftable_stack_read_log(arg
->stack
, arg
->refname
, &log
);
1892 reftable_writer_set_limits(writer
, ts
, ts
);
1895 * The existence entry has both old and new object ID set to the the
1896 * null object ID. Our iterators are aware of this and will not present
1897 * them to their callers.
1899 log
.refname
= xstrdup(arg
->refname
);
1900 log
.update_index
= ts
;
1901 log
.value_type
= REFTABLE_LOG_UPDATE
;
1902 ret
= reftable_writer_add_log(writer
, &log
);
1905 assert(ret
!= REFTABLE_API_ERROR
);
1906 reftable_log_record_release(&log
);
1910 static int reftable_be_create_reflog(struct ref_store
*ref_store
,
1911 const char *refname
,
1912 struct strbuf
*errmsg
)
1914 struct reftable_ref_store
*refs
=
1915 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "create_reflog");
1916 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1917 struct write_reflog_existence_arg arg
= {
1928 ret
= reftable_stack_reload(stack
);
1932 ret
= reftable_stack_add(stack
, &write_reflog_existence_table
, &arg
);
1938 struct write_reflog_delete_arg
{
1939 struct reftable_stack
*stack
;
1940 const char *refname
;
1943 static int write_reflog_delete_table(struct reftable_writer
*writer
, void *cb_data
)
1945 struct write_reflog_delete_arg
*arg
= cb_data
;
1946 struct reftable_log_record log
= {0}, tombstone
= {0};
1947 struct reftable_iterator it
= {0};
1948 uint64_t ts
= reftable_stack_next_update_index(arg
->stack
);
1951 reftable_writer_set_limits(writer
, ts
, ts
);
1953 reftable_stack_init_log_iterator(arg
->stack
, &it
);
1956 * In order to delete a table we need to delete all reflog entries one
1957 * by one. This is inefficient, but the reftable format does not have a
1958 * better marker right now.
1960 ret
= reftable_iterator_seek_log(&it
, arg
->refname
);
1962 ret
= reftable_iterator_next_log(&it
, &log
);
1965 if (ret
> 0 || strcmp(log
.refname
, arg
->refname
)) {
1970 tombstone
.refname
= (char *)arg
->refname
;
1971 tombstone
.value_type
= REFTABLE_LOG_DELETION
;
1972 tombstone
.update_index
= log
.update_index
;
1974 ret
= reftable_writer_add_log(writer
, &tombstone
);
1977 reftable_log_record_release(&log
);
1978 reftable_iterator_destroy(&it
);
1982 static int reftable_be_delete_reflog(struct ref_store
*ref_store
,
1983 const char *refname
)
1985 struct reftable_ref_store
*refs
=
1986 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "delete_reflog");
1987 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
1988 struct write_reflog_delete_arg arg
= {
1994 ret
= reftable_stack_reload(stack
);
1997 ret
= reftable_stack_add(stack
, &write_reflog_delete_table
, &arg
);
1999 assert(ret
!= REFTABLE_API_ERROR
);
2003 struct reflog_expiry_arg
{
2004 struct reftable_ref_store
*refs
;
2005 struct reftable_stack
*stack
;
2006 struct reftable_log_record
*records
;
2007 struct object_id update_oid
;
2008 const char *refname
;
2012 static int write_reflog_expiry_table(struct reftable_writer
*writer
, void *cb_data
)
2014 struct reflog_expiry_arg
*arg
= cb_data
;
2015 uint64_t ts
= reftable_stack_next_update_index(arg
->stack
);
2016 uint64_t live_records
= 0;
2020 for (i
= 0; i
< arg
->len
; i
++)
2021 if (arg
->records
[i
].value_type
== REFTABLE_LOG_UPDATE
)
2024 reftable_writer_set_limits(writer
, ts
, ts
);
2026 if (!is_null_oid(&arg
->update_oid
)) {
2027 struct reftable_ref_record ref
= {0};
2028 struct object_id peeled
;
2030 ref
.refname
= (char *)arg
->refname
;
2031 ref
.update_index
= ts
;
2033 if (!peel_object(arg
->refs
->base
.repo
, &arg
->update_oid
, &peeled
)) {
2034 ref
.value_type
= REFTABLE_REF_VAL2
;
2035 memcpy(ref
.value
.val2
.target_value
, peeled
.hash
, GIT_MAX_RAWSZ
);
2036 memcpy(ref
.value
.val2
.value
, arg
->update_oid
.hash
, GIT_MAX_RAWSZ
);
2038 ref
.value_type
= REFTABLE_REF_VAL1
;
2039 memcpy(ref
.value
.val1
, arg
->update_oid
.hash
, GIT_MAX_RAWSZ
);
2042 ret
= reftable_writer_add_ref(writer
, &ref
);
2048 * When there are no more entries left in the reflog we empty it
2049 * completely, but write a placeholder reflog entry that indicates that
2050 * the reflog still exists.
2052 if (!live_records
) {
2053 struct reftable_log_record log
= {
2054 .refname
= (char *)arg
->refname
,
2055 .value_type
= REFTABLE_LOG_UPDATE
,
2059 ret
= reftable_writer_add_log(writer
, &log
);
2064 for (i
= 0; i
< arg
->len
; i
++) {
2065 ret
= reftable_writer_add_log(writer
, &arg
->records
[i
]);
2073 static int reftable_be_reflog_expire(struct ref_store
*ref_store
,
2074 const char *refname
,
2076 reflog_expiry_prepare_fn prepare_fn
,
2077 reflog_expiry_should_prune_fn should_prune_fn
,
2078 reflog_expiry_cleanup_fn cleanup_fn
,
2079 void *policy_cb_data
)
2082 * For log expiry, we write tombstones for every single reflog entry
2083 * that is to be expired. This means that the entries are still
2084 * retrievable by delving into the stack, and expiring entries
2085 * paradoxically takes extra memory. This memory is only reclaimed when
2086 * compacting the reftable stack.
2088 * It would be better if the refs backend supported an API that sets a
2089 * criterion for all refs, passing the criterion to pack_refs().
2091 * On the plus side, because we do the expiration per ref, we can easily
2092 * insert the reflog existence dummies.
2094 struct reftable_ref_store
*refs
=
2095 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "reflog_expire");
2096 struct reftable_stack
*stack
= stack_for(refs
, refname
, &refname
);
2097 struct reftable_log_record
*logs
= NULL
;
2098 struct reftable_log_record
*rewritten
= NULL
;
2099 struct reftable_ref_record ref_record
= {0};
2100 struct reftable_iterator it
= {0};
2101 struct reftable_addition
*add
= NULL
;
2102 struct reflog_expiry_arg arg
= {0};
2103 struct object_id oid
= {0};
2104 uint8_t *last_hash
= NULL
;
2105 size_t logs_nr
= 0, logs_alloc
= 0, i
;
2111 ret
= reftable_stack_reload(stack
);
2115 reftable_stack_init_log_iterator(stack
, &it
);
2117 ret
= reftable_iterator_seek_log(&it
, refname
);
2121 ret
= reftable_stack_new_addition(&add
, stack
);
2125 ret
= reftable_stack_read_ref(stack
, refname
, &ref_record
);
2128 if (reftable_ref_record_val1(&ref_record
))
2129 oidread(&oid
, reftable_ref_record_val1(&ref_record
));
2130 prepare_fn(refname
, &oid
, policy_cb_data
);
2133 struct reftable_log_record log
= {0};
2134 struct object_id old_oid
, new_oid
;
2136 ret
= reftable_iterator_next_log(&it
, &log
);
2139 if (ret
> 0 || strcmp(log
.refname
, refname
)) {
2140 reftable_log_record_release(&log
);
2144 oidread(&old_oid
, log
.value
.update
.old_hash
);
2145 oidread(&new_oid
, log
.value
.update
.new_hash
);
2148 * Skip over the reflog existence marker. We will add it back
2149 * in when there are no live reflog records.
2151 if (is_null_oid(&old_oid
) && is_null_oid(&new_oid
)) {
2152 reftable_log_record_release(&log
);
2156 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
2157 logs
[logs_nr
++] = log
;
2161 * We need to rewrite all reflog entries according to the pruning
2162 * callback function:
2164 * - If a reflog entry shall be pruned we mark the record for
2167 * - Otherwise we may have to rewrite the chain of reflog entries so
2168 * that gaps created by just-deleted records get backfilled.
2170 CALLOC_ARRAY(rewritten
, logs_nr
);
2171 for (i
= logs_nr
; i
--;) {
2172 struct reftable_log_record
*dest
= &rewritten
[i
];
2173 struct object_id old_oid
, new_oid
;
2176 oidread(&old_oid
, logs
[i
].value
.update
.old_hash
);
2177 oidread(&new_oid
, logs
[i
].value
.update
.new_hash
);
2179 if (should_prune_fn(&old_oid
, &new_oid
, logs
[i
].value
.update
.email
,
2180 (timestamp_t
)logs
[i
].value
.update
.time
,
2181 logs
[i
].value
.update
.tz_offset
,
2182 logs
[i
].value
.update
.message
,
2184 dest
->value_type
= REFTABLE_LOG_DELETION
;
2186 if ((flags
& EXPIRE_REFLOGS_REWRITE
) && last_hash
)
2187 memcpy(dest
->value
.update
.old_hash
, last_hash
, GIT_MAX_RAWSZ
);
2188 last_hash
= logs
[i
].value
.update
.new_hash
;
2192 if (flags
& EXPIRE_REFLOGS_UPDATE_REF
&& last_hash
&&
2193 reftable_ref_record_val1(&ref_record
))
2194 oidread(&arg
.update_oid
, last_hash
);
2197 arg
.records
= rewritten
;
2200 arg
.refname
= refname
,
2202 ret
= reftable_addition_add(add
, &write_reflog_expiry_table
, &arg
);
2207 * Future improvement: we could skip writing records that were
2210 if (!(flags
& EXPIRE_REFLOGS_DRY_RUN
))
2211 ret
= reftable_addition_commit(add
);
2215 cleanup_fn(policy_cb_data
);
2216 assert(ret
!= REFTABLE_API_ERROR
);
2218 reftable_ref_record_release(&ref_record
);
2219 reftable_iterator_destroy(&it
);
2220 reftable_addition_destroy(add
);
2221 for (i
= 0; i
< logs_nr
; i
++)
2222 reftable_log_record_release(&logs
[i
]);
2228 struct ref_storage_be refs_be_reftable
= {
2230 .init
= reftable_be_init
,
2231 .release
= reftable_be_release
,
2232 .create_on_disk
= reftable_be_create_on_disk
,
2234 .transaction_prepare
= reftable_be_transaction_prepare
,
2235 .transaction_finish
= reftable_be_transaction_finish
,
2236 .transaction_abort
= reftable_be_transaction_abort
,
2237 .initial_transaction_commit
= reftable_be_initial_transaction_commit
,
2239 .pack_refs
= reftable_be_pack_refs
,
2240 .rename_ref
= reftable_be_rename_ref
,
2241 .copy_ref
= reftable_be_copy_ref
,
2243 .iterator_begin
= reftable_be_iterator_begin
,
2244 .read_raw_ref
= reftable_be_read_raw_ref
,
2245 .read_symbolic_ref
= reftable_be_read_symbolic_ref
,
2247 .reflog_iterator_begin
= reftable_be_reflog_iterator_begin
,
2248 .for_each_reflog_ent
= reftable_be_for_each_reflog_ent
,
2249 .for_each_reflog_ent_reverse
= reftable_be_for_each_reflog_ent_reverse
,
2250 .reflog_exists
= reftable_be_reflog_exists
,
2251 .create_reflog
= reftable_be_create_reflog
,
2252 .delete_reflog
= reftable_be_delete_reflog
,
2253 .reflog_expire
= reftable_be_reflog_expire
,