1 #define USE_THE_REPOSITORY_VARIABLE
3 #include "../git-compat-util.h"
4 #include "../abspath.h"
5 #include "../chdir-notify.h"
8 #include "../environment.h"
9 #include "../gettext.h"
12 #include "../iterator.h"
14 #include "../lockfile.h"
15 #include "../object.h"
18 #include "../reftable/reftable-basics.h"
19 #include "../reftable/reftable-stack.h"
20 #include "../reftable/reftable-record.h"
21 #include "../reftable/reftable-error.h"
22 #include "../reftable/reftable-iterator.h"
23 #include "../repo-settings.h"
25 #include "../strmap.h"
26 #include "../trace2.h"
27 #include "../write-or-die.h"
29 #include "refs-internal.h"
32 * Used as a flag in ref_update::flags when the ref_update was via an
35 #define REF_UPDATE_VIA_HEAD (1 << 8)
37 struct reftable_backend
{
38 struct reftable_stack
*stack
;
39 struct reftable_iterator it
;
42 static void reftable_backend_on_reload(void *payload
)
44 struct reftable_backend
*be
= payload
;
45 reftable_iterator_destroy(&be
->it
);
48 static int reftable_backend_init(struct reftable_backend
*be
,
50 const struct reftable_write_options
*_opts
)
52 struct reftable_write_options opts
= *_opts
;
53 opts
.on_reload
= reftable_backend_on_reload
;
54 opts
.on_reload_payload
= be
;
55 return reftable_new_stack(&be
->stack
, path
, &opts
);
58 static void reftable_backend_release(struct reftable_backend
*be
)
60 reftable_stack_destroy(be
->stack
);
62 reftable_iterator_destroy(&be
->it
);
65 static int reftable_backend_read_ref(struct reftable_backend
*be
,
67 struct object_id
*oid
,
68 struct strbuf
*referent
,
71 struct reftable_ref_record ref
= {0};
75 ret
= reftable_stack_init_ref_iterator(be
->stack
, &be
->it
);
80 ret
= reftable_iterator_seek_ref(&be
->it
, refname
);
84 ret
= reftable_iterator_next_ref(&be
->it
, &ref
);
88 if (strcmp(ref
.refname
, refname
)) {
93 if (ref
.value_type
== REFTABLE_REF_SYMREF
) {
94 strbuf_reset(referent
);
95 strbuf_addstr(referent
, ref
.value
.symref
);
96 *type
|= REF_ISSYMREF
;
97 } else if (reftable_ref_record_val1(&ref
)) {
100 switch (reftable_stack_hash_id(be
->stack
)) {
101 case REFTABLE_HASH_SHA1
:
102 hash_id
= GIT_HASH_SHA1
;
104 case REFTABLE_HASH_SHA256
:
105 hash_id
= GIT_HASH_SHA256
;
108 BUG("unhandled hash ID %d", reftable_stack_hash_id(be
->stack
));
111 oidread(oid
, reftable_ref_record_val1(&ref
),
112 &hash_algos
[hash_id
]);
114 /* We got a tombstone, which should not happen. */
115 BUG("unhandled reference value type %d", ref
.value_type
);
119 assert(ret
!= REFTABLE_API_ERROR
);
120 reftable_ref_record_release(&ref
);
124 struct reftable_ref_store
{
125 struct ref_store base
;
128 * The main backend refers to the common dir and thus contains common
129 * refs as well as refs of the main repository.
131 struct reftable_backend main_backend
;
133 * The worktree backend refers to the gitdir in case the refdb is opened
134 * via a worktree. It thus contains the per-worktree refs.
136 struct reftable_backend worktree_backend
;
138 * Map of worktree backends by their respective worktree names. The map
139 * is populated lazily when we try to resolve `worktrees/$worktree` refs.
141 struct strmap worktree_backends
;
142 struct reftable_write_options write_options
;
144 unsigned int store_flags
;
145 enum log_refs_config log_all_ref_updates
;
150 * Downcast ref_store to reftable_ref_store. Die if ref_store is not a
151 * reftable_ref_store. required_flags is compared with ref_store's store_flags
152 * to ensure the ref_store has all required capabilities. "caller" is used in
153 * any necessary error messages.
155 static struct reftable_ref_store
*reftable_be_downcast(struct ref_store
*ref_store
,
156 unsigned int required_flags
,
159 struct reftable_ref_store
*refs
;
161 if (ref_store
->be
!= &refs_be_reftable
)
162 BUG("ref_store is type \"%s\" not \"reftables\" in %s",
163 ref_store
->be
->name
, caller
);
165 refs
= (struct reftable_ref_store
*)ref_store
;
167 if ((refs
->store_flags
& required_flags
) != required_flags
)
168 BUG("operation %s requires abilities 0x%x, but only have 0x%x",
169 caller
, required_flags
, refs
->store_flags
);
175 * Some refs are global to the repository (refs/heads/{*}), while others are
176 * local to the worktree (eg. HEAD, refs/bisect/{*}). We solve this by having
177 * multiple separate databases (ie. multiple reftable/ directories), one for
178 * the shared refs, one for the current worktree refs, and one for each
179 * additional worktree. For reading, we merge the view of both the shared and
180 * the current worktree's refs, when necessary.
182 * This function also optionally assigns the rewritten reference name that is
183 * local to the stack. This translation is required when using worktree refs
184 * like `worktrees/$worktree/refs/heads/foo` as worktree stacks will store
185 * those references in their normalized form.
187 static int backend_for(struct reftable_backend
**out
,
188 struct reftable_ref_store
*store
,
190 const char **rewritten_ref
,
193 struct reftable_backend
*be
;
198 be
= &store
->main_backend
;
202 switch (parse_worktree_ref(refname
, &wtname
, &wtname_len
, rewritten_ref
)) {
203 case REF_WORKTREE_OTHER
: {
204 static struct strbuf wtname_buf
= STRBUF_INIT
;
205 struct strbuf wt_dir
= STRBUF_INIT
;
208 * We're using a static buffer here so that we don't need to
209 * allocate the worktree name whenever we look up a reference.
210 * This could be avoided if the strmap interface knew how to
211 * handle keys with a length.
213 strbuf_reset(&wtname_buf
);
214 strbuf_add(&wtname_buf
, wtname
, wtname_len
);
217 * There is an edge case here: when the worktree references the
218 * current worktree, then we set up the stack once via
219 * `worktree_backends` and once via `worktree_backend`. This is
220 * wasteful, but in the reading case it shouldn't matter. And
221 * in the writing case we would notice that the stack is locked
222 * already and error out when trying to write a reference via
225 be
= strmap_get(&store
->worktree_backends
, wtname_buf
.buf
);
227 strbuf_addf(&wt_dir
, "%s/worktrees/%s/reftable",
228 store
->base
.repo
->commondir
, wtname_buf
.buf
);
231 store
->err
= reftable_backend_init(be
, wt_dir
.buf
,
232 &store
->write_options
);
233 assert(store
->err
!= REFTABLE_API_ERROR
);
235 strmap_put(&store
->worktree_backends
, wtname_buf
.buf
, be
);
238 strbuf_release(&wt_dir
);
241 case REF_WORKTREE_CURRENT
:
243 * If there is no worktree stack then we're currently in the
244 * main worktree. We thus return the main stack in that case.
246 if (!store
->worktree_backend
.stack
)
247 be
= &store
->main_backend
;
249 be
= &store
->worktree_backend
;
251 case REF_WORKTREE_MAIN
:
252 case REF_WORKTREE_SHARED
:
253 be
= &store
->main_backend
;
256 BUG("unhandled worktree reference type");
261 int ret
= reftable_stack_reload(be
->stack
);
270 static int should_write_log(struct reftable_ref_store
*refs
, const char *refname
)
272 enum log_refs_config log_refs_cfg
= refs
->log_all_ref_updates
;
273 if (log_refs_cfg
== LOG_REFS_UNSET
)
274 log_refs_cfg
= is_bare_repository() ? LOG_REFS_NONE
: LOG_REFS_NORMAL
;
276 switch (log_refs_cfg
) {
278 return refs_reflog_exists(&refs
->base
, refname
);
279 case LOG_REFS_ALWAYS
:
281 case LOG_REFS_NORMAL
:
282 if (should_autocreate_reflog(log_refs_cfg
, refname
))
284 return refs_reflog_exists(&refs
->base
, refname
);
286 BUG("unhandled core.logAllRefUpdates value %d", log_refs_cfg
);
290 static void fill_reftable_log_record(struct reftable_log_record
*log
, const struct ident_split
*split
)
292 const char *tz_begin
;
295 reftable_log_record_release(log
);
296 log
->value_type
= REFTABLE_LOG_UPDATE
;
297 log
->value
.update
.name
=
298 xstrndup(split
->name_begin
, split
->name_end
- split
->name_begin
);
299 log
->value
.update
.email
=
300 xstrndup(split
->mail_begin
, split
->mail_end
- split
->mail_begin
);
301 log
->value
.update
.time
= atol(split
->date_begin
);
303 tz_begin
= split
->tz_begin
;
304 if (*tz_begin
== '-') {
308 if (*tz_begin
== '+') {
313 log
->value
.update
.tz_offset
= sign
* atoi(tz_begin
);
316 static int reftable_be_config(const char *var
, const char *value
,
317 const struct config_context
*ctx
,
320 struct reftable_write_options
*opts
= _opts
;
322 if (!strcmp(var
, "reftable.blocksize")) {
323 unsigned long block_size
= git_config_ulong(var
, value
, ctx
->kvi
);
324 if (block_size
> 16777215)
325 die("reftable block size cannot exceed 16MB");
326 opts
->block_size
= block_size
;
327 } else if (!strcmp(var
, "reftable.restartinterval")) {
328 unsigned long restart_interval
= git_config_ulong(var
, value
, ctx
->kvi
);
329 if (restart_interval
> UINT16_MAX
)
330 die("reftable block size cannot exceed %u", (unsigned)UINT16_MAX
);
331 opts
->restart_interval
= restart_interval
;
332 } else if (!strcmp(var
, "reftable.indexobjects")) {
333 opts
->skip_index_objects
= !git_config_bool(var
, value
);
334 } else if (!strcmp(var
, "reftable.geometricfactor")) {
335 unsigned long factor
= git_config_ulong(var
, value
, ctx
->kvi
);
336 if (factor
> UINT8_MAX
)
337 die("reftable geometric factor cannot exceed %u", (unsigned)UINT8_MAX
);
338 opts
->auto_compaction_factor
= factor
;
339 } else if (!strcmp(var
, "reftable.locktimeout")) {
340 int64_t lock_timeout
= git_config_int64(var
, value
, ctx
->kvi
);
341 if (lock_timeout
> LONG_MAX
)
342 die("reftable lock timeout cannot exceed %"PRIdMAX
, (intmax_t)LONG_MAX
);
343 if (lock_timeout
< 0 && lock_timeout
!= -1)
344 die("reftable lock timeout does not support negative values other than -1");
345 opts
->lock_timeout_ms
= lock_timeout
;
351 static int reftable_be_fsync(int fd
)
353 return fsync_component(FSYNC_COMPONENT_REFERENCE
, fd
);
356 static struct ref_store
*reftable_be_init(struct repository
*repo
,
358 unsigned int store_flags
)
360 struct reftable_ref_store
*refs
= xcalloc(1, sizeof(*refs
));
361 struct strbuf path
= STRBUF_INIT
;
368 base_ref_store_init(&refs
->base
, repo
, gitdir
, &refs_be_reftable
);
369 strmap_init(&refs
->worktree_backends
);
370 refs
->store_flags
= store_flags
;
371 refs
->log_all_ref_updates
= repo_settings_get_log_all_ref_updates(repo
);
373 switch (repo
->hash_algo
->format_id
) {
374 case GIT_SHA1_FORMAT_ID
:
375 refs
->write_options
.hash_id
= REFTABLE_HASH_SHA1
;
377 case GIT_SHA256_FORMAT_ID
:
378 refs
->write_options
.hash_id
= REFTABLE_HASH_SHA256
;
381 BUG("unknown hash algorithm %d", repo
->hash_algo
->format_id
);
383 refs
->write_options
.default_permissions
= calc_shared_perm(the_repository
, 0666 & ~mask
);
384 refs
->write_options
.disable_auto_compact
=
385 !git_env_bool("GIT_TEST_REFTABLE_AUTOCOMPACTION", 1);
386 refs
->write_options
.lock_timeout_ms
= 100;
387 refs
->write_options
.fsync
= reftable_be_fsync
;
389 git_config(reftable_be_config
, &refs
->write_options
);
392 * It is somewhat unfortunate that we have to mirror the default block
393 * size of the reftable library here. But given that the write options
394 * wouldn't be updated by the library here, and given that we require
395 * the proper block size to trim reflog message so that they fit, we
396 * must set up a proper value here.
398 if (!refs
->write_options
.block_size
)
399 refs
->write_options
.block_size
= 4096;
402 * Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
403 * This stack contains both the shared and the main worktree refs.
405 * Note that we don't try to resolve the path in case we have a
406 * worktree because `get_common_dir_noenv()` already does it for us.
408 is_worktree
= get_common_dir_noenv(&path
, gitdir
);
411 strbuf_realpath(&path
, gitdir
, 0);
413 strbuf_addstr(&path
, "/reftable");
414 refs
->err
= reftable_backend_init(&refs
->main_backend
, path
.buf
,
415 &refs
->write_options
);
420 * If we're in a worktree we also need to set up the worktree reftable
421 * stack that is contained in the per-worktree GIT_DIR.
423 * Ideally, we would also add the stack to our worktree stack map. But
424 * we have no way to figure out the worktree name here and thus can't
429 strbuf_addf(&path
, "%s/reftable", gitdir
);
431 refs
->err
= reftable_backend_init(&refs
->worktree_backend
, path
.buf
,
432 &refs
->write_options
);
437 chdir_notify_reparent("reftables-backend $GIT_DIR", &refs
->base
.gitdir
);
440 assert(refs
->err
!= REFTABLE_API_ERROR
);
441 strbuf_release(&path
);
445 static void reftable_be_release(struct ref_store
*ref_store
)
447 struct reftable_ref_store
*refs
= reftable_be_downcast(ref_store
, 0, "release");
448 struct strmap_entry
*entry
;
449 struct hashmap_iter iter
;
451 if (refs
->main_backend
.stack
)
452 reftable_backend_release(&refs
->main_backend
);
453 if (refs
->worktree_backend
.stack
)
454 reftable_backend_release(&refs
->worktree_backend
);
456 strmap_for_each_entry(&refs
->worktree_backends
, &iter
, entry
) {
457 struct reftable_backend
*be
= entry
->value
;
458 reftable_backend_release(be
);
461 strmap_clear(&refs
->worktree_backends
, 0);
464 static int reftable_be_create_on_disk(struct ref_store
*ref_store
,
466 struct strbuf
*err UNUSED
)
468 struct reftable_ref_store
*refs
=
469 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "create");
470 struct strbuf sb
= STRBUF_INIT
;
472 strbuf_addf(&sb
, "%s/reftable", refs
->base
.gitdir
);
473 safe_create_dir(the_repository
, sb
.buf
, 1);
476 strbuf_addf(&sb
, "%s/HEAD", refs
->base
.gitdir
);
477 write_file(sb
.buf
, "ref: refs/heads/.invalid");
478 adjust_shared_perm(the_repository
, sb
.buf
);
481 strbuf_addf(&sb
, "%s/refs", refs
->base
.gitdir
);
482 safe_create_dir(the_repository
, sb
.buf
, 1);
485 strbuf_addf(&sb
, "%s/refs/heads", refs
->base
.gitdir
);
486 write_file(sb
.buf
, "this repository uses the reftable format");
487 adjust_shared_perm(the_repository
, sb
.buf
);
493 static int reftable_be_remove_on_disk(struct ref_store
*ref_store
,
496 struct reftable_ref_store
*refs
=
497 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "remove");
498 struct strbuf sb
= STRBUF_INIT
;
502 * Release the ref store such that all stacks are closed. This is
503 * required so that the "tables.list" file is not open anymore, which
504 * would otherwise make it impossible to remove the file on Windows.
506 reftable_be_release(ref_store
);
508 strbuf_addf(&sb
, "%s/reftable", refs
->base
.gitdir
);
509 if (remove_dir_recursively(&sb
, 0) < 0) {
510 strbuf_addf(err
, "could not delete reftables: %s",
516 strbuf_addf(&sb
, "%s/HEAD", refs
->base
.gitdir
);
517 if (unlink(sb
.buf
) < 0) {
518 strbuf_addf(err
, "could not delete stub HEAD: %s",
524 strbuf_addf(&sb
, "%s/refs/heads", refs
->base
.gitdir
);
525 if (unlink(sb
.buf
) < 0) {
526 strbuf_addf(err
, "could not delete stub heads: %s",
532 strbuf_addf(&sb
, "%s/refs", refs
->base
.gitdir
);
533 if (rmdir(sb
.buf
) < 0) {
534 strbuf_addf(err
, "could not delete refs directory: %s",
543 struct reftable_ref_iterator
{
544 struct ref_iterator base
;
545 struct reftable_ref_store
*refs
;
546 struct reftable_iterator iter
;
547 struct reftable_ref_record ref
;
548 struct object_id oid
;
552 char **exclude_patterns
;
553 size_t exclude_patterns_index
;
554 size_t exclude_patterns_strlen
;
560 * Handle exclude patterns. Returns either `1`, which tells the caller that the
561 * current reference shall not be shown. Or `0`, which indicates that it should
564 static int should_exclude_current_ref(struct reftable_ref_iterator
*iter
)
566 while (iter
->exclude_patterns
[iter
->exclude_patterns_index
]) {
567 const char *pattern
= iter
->exclude_patterns
[iter
->exclude_patterns_index
];
568 char *ref_after_pattern
;
572 * Lazily cache the pattern length so that we don't have to
573 * recompute it every time this function is called.
575 if (!iter
->exclude_patterns_strlen
)
576 iter
->exclude_patterns_strlen
= strlen(pattern
);
579 * When the reference name is lexicographically bigger than the
580 * current exclude pattern we know that it won't ever match any
581 * of the following references, either. We thus advance to the
582 * next pattern and re-check whether it matches.
584 * Otherwise, if it's smaller, then we do not have a match and
585 * thus want to show the current reference.
587 cmp
= strncmp(iter
->ref
.refname
, pattern
,
588 iter
->exclude_patterns_strlen
);
590 iter
->exclude_patterns_index
++;
591 iter
->exclude_patterns_strlen
= 0;
598 * The reference shares a prefix with the exclude pattern and
599 * shall thus be omitted. We skip all references that match the
600 * pattern by seeking to the first reference after the block of
603 * This is done by appending the highest possible character to
604 * the pattern. Consequently, all references that have the
605 * pattern as prefix and whose suffix starts with anything in
606 * the range [0x00, 0xfe] are skipped. And given that 0xff is a
607 * non-printable character that shouldn't ever be in a ref name,
608 * we'd not yield any such record, either.
610 * Note that the seeked-to reference may also be excluded. This
611 * is not handled here though, but the caller is expected to
612 * loop and re-verify the next reference for us.
614 ref_after_pattern
= xstrfmt("%s%c", pattern
, 0xff);
615 iter
->err
= reftable_iterator_seek_ref(&iter
->iter
, ref_after_pattern
);
616 iter
->exclude_patterns_index
++;
617 iter
->exclude_patterns_strlen
= 0;
618 trace2_counter_add(TRACE2_COUNTER_ID_REFTABLE_RESEEKS
, 1);
620 free(ref_after_pattern
);
627 static int reftable_ref_iterator_advance(struct ref_iterator
*ref_iterator
)
629 struct reftable_ref_iterator
*iter
=
630 (struct reftable_ref_iterator
*)ref_iterator
;
631 struct reftable_ref_store
*refs
= iter
->refs
;
632 const char *referent
= NULL
;
637 iter
->err
= reftable_iterator_next_ref(&iter
->iter
, &iter
->ref
);
642 * The files backend only lists references contained in "refs/" unless
643 * the root refs are to be included. We emulate the same behaviour here.
645 if (!starts_with(iter
->ref
.refname
, "refs/") &&
646 !(iter
->flags
& DO_FOR_EACH_INCLUDE_ROOT_REFS
&&
647 is_root_ref(iter
->ref
.refname
))) {
651 if (iter
->prefix_len
&&
652 strncmp(iter
->prefix
, iter
->ref
.refname
, iter
->prefix_len
)) {
657 if (iter
->exclude_patterns
&& should_exclude_current_ref(iter
))
660 if (iter
->flags
& DO_FOR_EACH_PER_WORKTREE_ONLY
&&
661 parse_worktree_ref(iter
->ref
.refname
, NULL
, NULL
, NULL
) !=
662 REF_WORKTREE_CURRENT
)
665 switch (iter
->ref
.value_type
) {
666 case REFTABLE_REF_VAL1
:
667 oidread(&iter
->oid
, iter
->ref
.value
.val1
,
668 refs
->base
.repo
->hash_algo
);
670 case REFTABLE_REF_VAL2
:
671 oidread(&iter
->oid
, iter
->ref
.value
.val2
.value
,
672 refs
->base
.repo
->hash_algo
);
674 case REFTABLE_REF_SYMREF
:
675 referent
= refs_resolve_ref_unsafe(&iter
->refs
->base
,
680 oidclr(&iter
->oid
, refs
->base
.repo
->hash_algo
);
683 BUG("unhandled reference value type %d", iter
->ref
.value_type
);
686 if (is_null_oid(&iter
->oid
))
687 flags
|= REF_ISBROKEN
;
689 if (check_refname_format(iter
->ref
.refname
, REFNAME_ALLOW_ONELEVEL
)) {
690 if (!refname_is_safe(iter
->ref
.refname
))
691 die(_("refname is dangerous: %s"), iter
->ref
.refname
);
692 oidclr(&iter
->oid
, refs
->base
.repo
->hash_algo
);
693 flags
|= REF_BAD_NAME
| REF_ISBROKEN
;
696 if (iter
->flags
& DO_FOR_EACH_OMIT_DANGLING_SYMREFS
&&
697 flags
& REF_ISSYMREF
&&
698 flags
& REF_ISBROKEN
)
701 if (!(iter
->flags
& DO_FOR_EACH_INCLUDE_BROKEN
) &&
702 !ref_resolves_to_object(iter
->ref
.refname
, refs
->base
.repo
,
706 iter
->base
.refname
= iter
->ref
.refname
;
707 iter
->base
.referent
= referent
;
708 iter
->base
.oid
= &iter
->oid
;
709 iter
->base
.flags
= flags
;
721 static int reftable_ref_iterator_seek(struct ref_iterator
*ref_iterator
,
724 struct reftable_ref_iterator
*iter
=
725 (struct reftable_ref_iterator
*)ref_iterator
;
728 iter
->prefix
= xstrdup_or_null(prefix
);
729 iter
->prefix_len
= prefix
? strlen(prefix
) : 0;
730 iter
->err
= reftable_iterator_seek_ref(&iter
->iter
, prefix
);
735 static int reftable_ref_iterator_peel(struct ref_iterator
*ref_iterator
,
736 struct object_id
*peeled
)
738 struct reftable_ref_iterator
*iter
=
739 (struct reftable_ref_iterator
*)ref_iterator
;
741 if (iter
->ref
.value_type
== REFTABLE_REF_VAL2
) {
742 oidread(peeled
, iter
->ref
.value
.val2
.target_value
,
743 iter
->refs
->base
.repo
->hash_algo
);
750 static void reftable_ref_iterator_release(struct ref_iterator
*ref_iterator
)
752 struct reftable_ref_iterator
*iter
=
753 (struct reftable_ref_iterator
*)ref_iterator
;
754 reftable_ref_record_release(&iter
->ref
);
755 reftable_iterator_destroy(&iter
->iter
);
756 if (iter
->exclude_patterns
) {
757 for (size_t i
= 0; iter
->exclude_patterns
[i
]; i
++)
758 free(iter
->exclude_patterns
[i
]);
759 free(iter
->exclude_patterns
);
764 static struct ref_iterator_vtable reftable_ref_iterator_vtable
= {
765 .advance
= reftable_ref_iterator_advance
,
766 .seek
= reftable_ref_iterator_seek
,
767 .peel
= reftable_ref_iterator_peel
,
768 .release
= reftable_ref_iterator_release
,
771 static int qsort_strcmp(const void *va
, const void *vb
)
773 const char *a
= *(const char **)va
;
774 const char *b
= *(const char **)vb
;
778 static char **filter_exclude_patterns(const char **exclude_patterns
)
780 size_t filtered_size
= 0, filtered_alloc
= 0;
781 char **filtered
= NULL
;
783 if (!exclude_patterns
)
786 for (size_t i
= 0; ; i
++) {
787 const char *exclude_pattern
= exclude_patterns
[i
];
790 if (!exclude_pattern
)
793 for (const char *p
= exclude_pattern
; *p
; p
++) {
794 has_glob
= is_glob_special(*p
);
801 ALLOC_GROW(filtered
, filtered_size
+ 1, filtered_alloc
);
802 filtered
[filtered_size
++] = xstrdup(exclude_pattern
);
806 QSORT(filtered
, filtered_size
, qsort_strcmp
);
807 ALLOC_GROW(filtered
, filtered_size
+ 1, filtered_alloc
);
808 filtered
[filtered_size
++] = NULL
;
814 static struct reftable_ref_iterator
*ref_iterator_for_stack(struct reftable_ref_store
*refs
,
815 struct reftable_stack
*stack
,
817 const char **exclude_patterns
,
820 struct reftable_ref_iterator
*iter
;
823 iter
= xcalloc(1, sizeof(*iter
));
824 base_ref_iterator_init(&iter
->base
, &reftable_ref_iterator_vtable
);
825 iter
->base
.oid
= &iter
->oid
;
828 iter
->exclude_patterns
= filter_exclude_patterns(exclude_patterns
);
834 ret
= reftable_stack_reload(stack
);
838 ret
= reftable_stack_init_ref_iterator(stack
, &iter
->iter
);
842 ret
= reftable_ref_iterator_seek(&iter
->base
, prefix
);
851 static struct ref_iterator
*reftable_be_iterator_begin(struct ref_store
*ref_store
,
853 const char **exclude_patterns
,
856 struct reftable_ref_iterator
*main_iter
, *worktree_iter
;
857 struct reftable_ref_store
*refs
;
858 unsigned int required_flags
= REF_STORE_READ
;
860 if (!(flags
& DO_FOR_EACH_INCLUDE_BROKEN
))
861 required_flags
|= REF_STORE_ODB
;
862 refs
= reftable_be_downcast(ref_store
, required_flags
, "ref_iterator_begin");
864 main_iter
= ref_iterator_for_stack(refs
, refs
->main_backend
.stack
, prefix
,
865 exclude_patterns
, flags
);
868 * The worktree stack is only set when we're in an actual worktree
869 * right now. If we aren't, then we return the common reftable
872 if (!refs
->worktree_backend
.stack
)
873 return &main_iter
->base
;
876 * Otherwise we merge both the common and the per-worktree refs into a
879 worktree_iter
= ref_iterator_for_stack(refs
, refs
->worktree_backend
.stack
, prefix
,
880 exclude_patterns
, flags
);
881 return merge_ref_iterator_begin(&worktree_iter
->base
, &main_iter
->base
,
882 ref_iterator_select
, NULL
);
885 static int reftable_be_read_raw_ref(struct ref_store
*ref_store
,
887 struct object_id
*oid
,
888 struct strbuf
*referent
,
892 struct reftable_ref_store
*refs
=
893 reftable_be_downcast(ref_store
, REF_STORE_READ
, "read_raw_ref");
894 struct reftable_backend
*be
;
900 ret
= backend_for(&be
, refs
, refname
, &refname
, 1);
904 ret
= reftable_backend_read_ref(be
, refname
, oid
, referent
, type
);
908 *failure_errno
= ENOENT
;
915 static int reftable_be_read_symbolic_ref(struct ref_store
*ref_store
,
917 struct strbuf
*referent
)
919 struct reftable_ref_store
*refs
=
920 reftable_be_downcast(ref_store
, REF_STORE_READ
, "read_symbolic_ref");
921 struct reftable_backend
*be
;
922 struct object_id oid
;
923 unsigned int type
= 0;
926 ret
= backend_for(&be
, refs
, refname
, &refname
, 1);
930 ret
= reftable_backend_read_ref(be
, refname
, &oid
, referent
, &type
);
933 else if (type
== REF_ISSYMREF
)
940 struct reftable_transaction_update
{
941 struct ref_update
*update
;
942 struct object_id current_oid
;
945 struct write_transaction_table_arg
{
946 struct reftable_ref_store
*refs
;
947 struct reftable_backend
*be
;
948 struct reftable_addition
*addition
;
949 struct reftable_transaction_update
*updates
;
951 size_t updates_alloc
;
952 size_t updates_expected
;
956 struct reftable_transaction_data
{
957 struct write_transaction_table_arg
*args
;
958 size_t args_nr
, args_alloc
;
961 static void free_transaction_data(struct reftable_transaction_data
*tx_data
)
965 for (size_t i
= 0; i
< tx_data
->args_nr
; i
++) {
966 reftable_addition_destroy(tx_data
->args
[i
].addition
);
967 free(tx_data
->args
[i
].updates
);
974 * Prepare transaction update for the given reference update. This will cause
975 * us to lock the corresponding reftable stack for concurrent modification.
977 static int prepare_transaction_update(struct write_transaction_table_arg
**out
,
978 struct reftable_ref_store
*refs
,
979 struct reftable_transaction_data
*tx_data
,
980 struct ref_update
*update
,
983 struct write_transaction_table_arg
*arg
= NULL
;
984 struct reftable_backend
*be
;
989 * This function gets called in a loop, and we don't want to repeatedly
990 * reload the stack for every single ref update. Instead, we manually
991 * reload further down in the case where we haven't yet prepared the
992 * specific `reftable_backend`.
994 ret
= backend_for(&be
, refs
, update
->refname
, NULL
, 0);
999 * Search for a preexisting stack update. If there is one then we add
1000 * the update to it, otherwise we set up a new stack update.
1002 for (i
= 0; !arg
&& i
< tx_data
->args_nr
; i
++)
1003 if (tx_data
->args
[i
].be
== be
)
1004 arg
= &tx_data
->args
[i
];
1007 struct reftable_addition
*addition
;
1009 ret
= reftable_stack_reload(be
->stack
);
1013 ret
= reftable_stack_new_addition(&addition
, be
->stack
,
1014 REFTABLE_STACK_NEW_ADDITION_RELOAD
);
1016 if (ret
== REFTABLE_LOCK_ERROR
)
1017 strbuf_addstr(err
, "cannot lock references");
1021 ALLOC_GROW(tx_data
->args
, tx_data
->args_nr
+ 1,
1022 tx_data
->args_alloc
);
1023 arg
= &tx_data
->args
[tx_data
->args_nr
++];
1026 arg
->addition
= addition
;
1027 arg
->updates
= NULL
;
1028 arg
->updates_nr
= 0;
1029 arg
->updates_alloc
= 0;
1030 arg
->updates_expected
= 0;
1034 arg
->updates_expected
++;
1043 * Queue a reference update for the correct stack. We potentially need to
1044 * handle multiple stack updates in a single transaction when it spans across
1045 * multiple worktrees.
1047 static int queue_transaction_update(struct reftable_ref_store
*refs
,
1048 struct reftable_transaction_data
*tx_data
,
1049 struct ref_update
*update
,
1050 struct object_id
*current_oid
,
1053 struct write_transaction_table_arg
*arg
= NULL
;
1056 if (update
->backend_data
)
1057 BUG("reference update queued more than once");
1059 ret
= prepare_transaction_update(&arg
, refs
, tx_data
, update
, err
);
1063 ALLOC_GROW(arg
->updates
, arg
->updates_nr
+ 1,
1064 arg
->updates_alloc
);
1065 arg
->updates
[arg
->updates_nr
].update
= update
;
1066 oidcpy(&arg
->updates
[arg
->updates_nr
].current_oid
, current_oid
);
1067 update
->backend_data
= &arg
->updates
[arg
->updates_nr
++];
1072 static enum ref_transaction_error
prepare_single_update(struct reftable_ref_store
*refs
,
1073 struct reftable_transaction_data
*tx_data
,
1074 struct ref_transaction
*transaction
,
1075 struct reftable_backend
*be
,
1076 struct ref_update
*u
,
1078 struct string_list
*refnames_to_check
,
1079 unsigned int head_type
,
1080 struct strbuf
*head_referent
,
1081 struct strbuf
*referent
,
1084 enum ref_transaction_error ret
= 0;
1085 struct object_id current_oid
= {0};
1086 const char *rewritten_ref
;
1089 * There is no need to reload the respective backends here as
1090 * we have already reloaded them when preparing the transaction
1091 * update. And given that the stacks have been locked there
1092 * shouldn't have been any concurrent modifications of the
1095 ret
= backend_for(&be
, refs
, u
->refname
, &rewritten_ref
, 0);
1097 return REF_TRANSACTION_ERROR_GENERIC
;
1099 /* Verify that the new object ID is valid. */
1100 if ((u
->flags
& REF_HAVE_NEW
) && !is_null_oid(&u
->new_oid
) &&
1101 !(u
->flags
& REF_SKIP_OID_VERIFICATION
) &&
1102 !(u
->flags
& REF_LOG_ONLY
)) {
1103 struct object
*o
= parse_object(refs
->base
.repo
, &u
->new_oid
);
1106 _("trying to write ref '%s' with nonexistent object %s"),
1107 u
->refname
, oid_to_hex(&u
->new_oid
));
1108 return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE
;
1111 if (o
->type
!= OBJ_COMMIT
&& is_branch(u
->refname
)) {
1112 strbuf_addf(err
, _("trying to write non-commit object %s to branch '%s'"),
1113 oid_to_hex(&u
->new_oid
), u
->refname
);
1114 return REF_TRANSACTION_ERROR_INVALID_NEW_VALUE
;
1119 * When we update the reference that HEAD points to we enqueue
1120 * a second log-only update for HEAD so that its reflog is
1121 * updated accordingly.
1123 if (head_type
== REF_ISSYMREF
&&
1124 !(u
->flags
& REF_LOG_ONLY
) &&
1125 !(u
->flags
& REF_UPDATE_VIA_HEAD
) &&
1126 !strcmp(rewritten_ref
, head_referent
->buf
)) {
1128 * First make sure that HEAD is not already in the
1129 * transaction. This check is O(lg N) in the transaction
1130 * size, but it happens at most once per transaction.
1132 if (string_list_has_string(&transaction
->refnames
, "HEAD")) {
1133 /* An entry already existed */
1135 _("multiple updates for 'HEAD' (including one "
1136 "via its referent '%s') are not allowed"),
1138 return REF_TRANSACTION_ERROR_NAME_CONFLICT
;
1141 ref_transaction_add_update(
1142 transaction
, "HEAD",
1143 u
->flags
| REF_LOG_ONLY
| REF_NO_DEREF
,
1144 &u
->new_oid
, &u
->old_oid
, NULL
, NULL
, NULL
,
1148 ret
= reftable_backend_read_ref(be
, rewritten_ref
,
1149 ¤t_oid
, referent
, &u
->type
);
1151 return REF_TRANSACTION_ERROR_GENERIC
;
1152 if (ret
> 0 && !ref_update_expects_existing_old_ref(u
)) {
1153 struct string_list_item
*item
;
1155 * The reference does not exist, and we either have no
1156 * old object ID or expect the reference to not exist.
1157 * We can thus skip below safety checks as well as the
1158 * symref splitting. But we do want to verify that
1159 * there is no conflicting reference here so that we
1160 * can output a proper error message instead of failing
1163 item
= string_list_append(refnames_to_check
, u
->refname
);
1164 item
->util
= xmalloc(sizeof(update_idx
));
1165 memcpy(item
->util
, &update_idx
, sizeof(update_idx
));
1168 * There is no need to write the reference deletion
1169 * when the reference in question doesn't exist.
1171 if ((u
->flags
& REF_HAVE_NEW
) && !ref_update_has_null_new_value(u
)) {
1172 ret
= queue_transaction_update(refs
, tx_data
, u
,
1175 return REF_TRANSACTION_ERROR_GENERIC
;
1181 /* The reference does not exist, but we expected it to. */
1182 strbuf_addf(err
, _("cannot lock ref '%s': "
1185 "unable to resolve reference '%s'"),
1186 ref_update_original_update_refname(u
), u
->refname
);
1187 return REF_TRANSACTION_ERROR_NONEXISTENT_REF
;
1190 if (u
->type
& REF_ISSYMREF
) {
1192 * The reftable stack is locked at this point already,
1193 * so it is safe to call `refs_resolve_ref_unsafe()`
1194 * here without causing races.
1196 const char *resolved
= refs_resolve_ref_unsafe(&refs
->base
, u
->refname
, 0,
1197 ¤t_oid
, NULL
);
1199 if (u
->flags
& REF_NO_DEREF
) {
1200 if (u
->flags
& REF_HAVE_OLD
&& !resolved
) {
1201 strbuf_addf(err
, _("cannot lock ref '%s': "
1202 "error reading reference"), u
->refname
);
1203 return REF_TRANSACTION_ERROR_GENERIC
;
1206 struct ref_update
*new_update
;
1209 new_flags
= u
->flags
;
1210 if (!strcmp(rewritten_ref
, "HEAD"))
1211 new_flags
|= REF_UPDATE_VIA_HEAD
;
1213 if (string_list_has_string(&transaction
->refnames
, referent
->buf
)) {
1215 _("multiple updates for '%s' (including one "
1216 "via symref '%s') are not allowed"),
1217 referent
->buf
, u
->refname
);
1218 return REF_TRANSACTION_ERROR_NAME_CONFLICT
;
1222 * If we are updating a symref (eg. HEAD), we should also
1223 * update the branch that the symref points to.
1225 * This is generic functionality, and would be better
1226 * done in refs.c, but the current implementation is
1227 * intertwined with the locking in files-backend.c.
1229 new_update
= ref_transaction_add_update(
1230 transaction
, referent
->buf
, new_flags
,
1231 u
->new_target
? NULL
: &u
->new_oid
,
1232 u
->old_target
? NULL
: &u
->old_oid
,
1233 u
->new_target
, u
->old_target
,
1234 u
->committer_info
, u
->msg
);
1236 new_update
->parent_update
= u
;
1239 * Change the symbolic ref update to log only. Also, it
1240 * doesn't need to check its old OID value, as that will be
1241 * done when new_update is processed.
1243 u
->flags
|= REF_LOG_ONLY
| REF_NO_DEREF
;
1244 u
->flags
&= ~REF_HAVE_OLD
;
1249 * Verify that the old object matches our expectations. Note
1250 * that the error messages here do not make a lot of sense in
1251 * the context of the reftable backend as we never lock
1252 * individual refs. But the error messages match what the files
1253 * backend returns, which keeps our tests happy.
1255 if (u
->old_target
) {
1256 if (!(u
->type
& REF_ISSYMREF
)) {
1257 strbuf_addf(err
, _("cannot lock ref '%s': "
1258 "expected symref with target '%s': "
1259 "but is a regular ref"),
1260 ref_update_original_update_refname(u
),
1262 return REF_TRANSACTION_ERROR_EXPECTED_SYMREF
;
1265 ret
= ref_update_check_old_target(referent
->buf
, u
, err
);
1268 } else if ((u
->flags
& REF_HAVE_OLD
) && !oideq(¤t_oid
, &u
->old_oid
)) {
1269 if (is_null_oid(&u
->old_oid
)) {
1270 strbuf_addf(err
, _("cannot lock ref '%s': "
1271 "reference already exists"),
1272 ref_update_original_update_refname(u
));
1273 return REF_TRANSACTION_ERROR_CREATE_EXISTS
;
1274 } else if (is_null_oid(¤t_oid
)) {
1275 strbuf_addf(err
, _("cannot lock ref '%s': "
1276 "reference is missing but expected %s"),
1277 ref_update_original_update_refname(u
),
1278 oid_to_hex(&u
->old_oid
));
1279 return REF_TRANSACTION_ERROR_NONEXISTENT_REF
;
1281 strbuf_addf(err
, _("cannot lock ref '%s': "
1282 "is at %s but expected %s"),
1283 ref_update_original_update_refname(u
),
1284 oid_to_hex(¤t_oid
),
1285 oid_to_hex(&u
->old_oid
));
1286 return REF_TRANSACTION_ERROR_INCORRECT_OLD_VALUE
;
1291 * If all of the following conditions are true:
1293 * - We're not about to write a symref.
1294 * - We're not about to write a log-only entry.
1295 * - Old and new object ID are different.
1297 * Then we're essentially doing a no-op update that can be
1298 * skipped. This is not only for the sake of efficiency, but
1299 * also skips writing unneeded reflog entries.
1301 if ((u
->type
& REF_ISSYMREF
) ||
1302 (u
->flags
& REF_LOG_ONLY
) ||
1303 (u
->flags
& REF_HAVE_NEW
&& !oideq(¤t_oid
, &u
->new_oid
)))
1304 if (queue_transaction_update(refs
, tx_data
, u
, ¤t_oid
, err
))
1305 return REF_TRANSACTION_ERROR_GENERIC
;
1310 static int reftable_be_transaction_prepare(struct ref_store
*ref_store
,
1311 struct ref_transaction
*transaction
,
1314 struct reftable_ref_store
*refs
=
1315 reftable_be_downcast(ref_store
, REF_STORE_WRITE
|REF_STORE_MAIN
, "ref_transaction_prepare");
1316 struct strbuf referent
= STRBUF_INIT
, head_referent
= STRBUF_INIT
;
1317 struct string_list refnames_to_check
= STRING_LIST_INIT_NODUP
;
1318 struct reftable_transaction_data
*tx_data
= NULL
;
1319 struct reftable_backend
*be
;
1320 struct object_id head_oid
;
1321 unsigned int head_type
= 0;
1329 tx_data
= xcalloc(1, sizeof(*tx_data
));
1332 * Preprocess all updates. For one we check that there are no duplicate
1333 * reference updates in this transaction. Second, we lock all stacks
1334 * that will be modified during the transaction.
1336 for (i
= 0; i
< transaction
->nr
; i
++) {
1337 ret
= prepare_transaction_update(NULL
, refs
, tx_data
,
1338 transaction
->updates
[i
], err
);
1344 * Now that we have counted updates per stack we can preallocate their
1345 * arrays. This avoids having to reallocate many times.
1347 for (i
= 0; i
< tx_data
->args_nr
; i
++) {
1348 CALLOC_ARRAY(tx_data
->args
[i
].updates
, tx_data
->args
[i
].updates_expected
);
1349 tx_data
->args
[i
].updates_alloc
= tx_data
->args
[i
].updates_expected
;
1353 * TODO: it's dubious whether we should reload the stack that "HEAD"
1354 * belongs to or not. In theory, it may happen that we only modify
1355 * stacks which are _not_ part of the "HEAD" stack. In that case we
1356 * wouldn't have prepared any transaction for its stack and would not
1357 * have reloaded it, which may mean that it is stale.
1359 * On the other hand, reloading that stack without locking it feels
1360 * wrong, too, as the value of "HEAD" could be modified concurrently at
1361 * any point in time.
1363 ret
= backend_for(&be
, refs
, "HEAD", NULL
, 0);
1367 ret
= reftable_backend_read_ref(be
, "HEAD", &head_oid
,
1368 &head_referent
, &head_type
);
1373 for (i
= 0; i
< transaction
->nr
; i
++) {
1374 ret
= prepare_single_update(refs
, tx_data
, transaction
, be
,
1375 transaction
->updates
[i
], i
,
1376 &refnames_to_check
, head_type
,
1377 &head_referent
, &referent
, err
);
1379 if (ref_transaction_maybe_set_rejected(transaction
, i
, ret
)) {
1389 ret
= refs_verify_refnames_available(ref_store
, &refnames_to_check
,
1390 &transaction
->refnames
, NULL
,
1392 transaction
->flags
& REF_TRANSACTION_FLAG_INITIAL
,
1397 transaction
->backend_data
= tx_data
;
1398 transaction
->state
= REF_TRANSACTION_PREPARED
;
1402 free_transaction_data(tx_data
);
1403 transaction
->state
= REF_TRANSACTION_CLOSED
;
1405 strbuf_addf(err
, _("reftable: transaction prepare: %s"),
1406 reftable_error_str(ret
));
1408 strbuf_release(&referent
);
1409 strbuf_release(&head_referent
);
1410 string_list_clear(&refnames_to_check
, 1);
1415 static int reftable_be_transaction_abort(struct ref_store
*ref_store UNUSED
,
1416 struct ref_transaction
*transaction
,
1417 struct strbuf
*err UNUSED
)
1419 struct reftable_transaction_data
*tx_data
= transaction
->backend_data
;
1420 free_transaction_data(tx_data
);
1421 transaction
->state
= REF_TRANSACTION_CLOSED
;
1425 static int transaction_update_cmp(const void *a
, const void *b
)
1427 struct reftable_transaction_update
*update_a
= (struct reftable_transaction_update
*)a
;
1428 struct reftable_transaction_update
*update_b
= (struct reftable_transaction_update
*)b
;
1431 * If there is an index set, it should take preference (default is 0).
1432 * This ensures that updates with indexes are sorted amongst themselves.
1434 if (update_a
->update
->index
|| update_b
->update
->index
)
1435 return update_a
->update
->index
- update_b
->update
->index
;
1437 return strcmp(update_a
->update
->refname
, update_b
->update
->refname
);
1440 static int write_transaction_table(struct reftable_writer
*writer
, void *cb_data
)
1442 struct write_transaction_table_arg
*arg
= cb_data
;
1443 uint64_t ts
= reftable_stack_next_update_index(arg
->be
->stack
);
1444 struct reftable_log_record
*logs
= NULL
;
1445 struct ident_split committer_ident
= {0};
1446 size_t logs_nr
= 0, logs_alloc
= 0, i
;
1447 const char *committer_info
;
1450 committer_info
= git_committer_info(0);
1451 if (split_ident_line(&committer_ident
, committer_info
, strlen(committer_info
)))
1452 BUG("failed splitting committer info");
1454 QSORT(arg
->updates
, arg
->updates_nr
, transaction_update_cmp
);
1457 * During reflog migration, we add indexes for a single reflog with
1458 * multiple entries. Each entry will contain a different update_index,
1459 * so set the limits accordingly.
1461 ret
= reftable_writer_set_limits(writer
, ts
, ts
+ arg
->max_index
);
1465 for (i
= 0; i
< arg
->updates_nr
; i
++) {
1466 struct reftable_transaction_update
*tx_update
= &arg
->updates
[i
];
1467 struct ref_update
*u
= tx_update
->update
;
1469 if (u
->rejection_err
)
1473 * Write a reflog entry when updating a ref to point to
1474 * something new in either of the following cases:
1476 * - The reference is about to be deleted. We always want to
1477 * delete the reflog in that case.
1478 * - REF_FORCE_CREATE_REFLOG is set, asking us to always create
1480 * - `core.logAllRefUpdates` tells us to create the reflog for
1483 if ((u
->flags
& REF_HAVE_NEW
) &&
1484 !(u
->type
& REF_ISSYMREF
) &&
1485 ref_update_has_null_new_value(u
)) {
1486 struct reftable_log_record log
= {0};
1487 struct reftable_iterator it
= {0};
1489 ret
= reftable_stack_init_log_iterator(arg
->be
->stack
, &it
);
1494 * When deleting refs we also delete all reflog entries
1495 * with them. While it is not strictly required to
1496 * delete reflogs together with their refs, this
1497 * matches the behaviour of the files backend.
1499 * Unfortunately, we have no better way than to delete
1500 * all reflog entries one by one.
1502 ret
= reftable_iterator_seek_log(&it
, u
->refname
);
1504 struct reftable_log_record
*tombstone
;
1506 ret
= reftable_iterator_next_log(&it
, &log
);
1509 if (ret
> 0 || strcmp(log
.refname
, u
->refname
)) {
1514 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1515 tombstone
= &logs
[logs_nr
++];
1516 tombstone
->refname
= xstrdup(u
->refname
);
1517 tombstone
->value_type
= REFTABLE_LOG_DELETION
;
1518 tombstone
->update_index
= log
.update_index
;
1521 reftable_log_record_release(&log
);
1522 reftable_iterator_destroy(&it
);
1526 } else if (!(u
->flags
& REF_SKIP_CREATE_REFLOG
) &&
1527 (u
->flags
& REF_HAVE_NEW
) &&
1528 (u
->flags
& REF_FORCE_CREATE_REFLOG
||
1529 should_write_log(arg
->refs
, u
->refname
))) {
1530 struct reftable_log_record
*log
;
1531 int create_reflog
= 1;
1533 if (u
->new_target
) {
1534 if (!refs_resolve_ref_unsafe(&arg
->refs
->base
, u
->new_target
,
1535 RESOLVE_REF_READING
, &u
->new_oid
, NULL
)) {
1537 * TODO: currently we skip creating reflogs for dangling
1538 * symref updates. It would be nice to capture this as
1539 * zero oid updates however.
1545 if (create_reflog
) {
1546 struct ident_split c
;
1548 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1549 log
= &logs
[logs_nr
++];
1550 memset(log
, 0, sizeof(*log
));
1552 if (u
->committer_info
) {
1553 if (split_ident_line(&c
, u
->committer_info
,
1554 strlen(u
->committer_info
)))
1555 BUG("failed splitting committer info");
1557 c
= committer_ident
;
1560 fill_reftable_log_record(log
, &c
);
1563 * Updates are sorted by the writer. So updates for the same
1564 * refname need to contain different update indices.
1566 log
->update_index
= ts
+ u
->index
;
1568 log
->refname
= xstrdup(u
->refname
);
1569 memcpy(log
->value
.update
.new_hash
,
1570 u
->new_oid
.hash
, GIT_MAX_RAWSZ
);
1571 memcpy(log
->value
.update
.old_hash
,
1572 tx_update
->current_oid
.hash
, GIT_MAX_RAWSZ
);
1573 log
->value
.update
.message
=
1574 xstrndup(u
->msg
, arg
->refs
->write_options
.block_size
/ 2);
1578 if (u
->flags
& REF_LOG_ONLY
)
1581 if (u
->new_target
) {
1582 struct reftable_ref_record ref
= {
1583 .refname
= (char *)u
->refname
,
1584 .value_type
= REFTABLE_REF_SYMREF
,
1585 .value
.symref
= (char *)u
->new_target
,
1589 ret
= reftable_writer_add_ref(writer
, &ref
);
1592 } else if ((u
->flags
& REF_HAVE_NEW
) && ref_update_has_null_new_value(u
)) {
1593 struct reftable_ref_record ref
= {
1594 .refname
= (char *)u
->refname
,
1596 .value_type
= REFTABLE_REF_DELETION
,
1599 ret
= reftable_writer_add_ref(writer
, &ref
);
1602 } else if (u
->flags
& REF_HAVE_NEW
) {
1603 struct reftable_ref_record ref
= {0};
1604 struct object_id peeled
;
1607 ref
.refname
= (char *)u
->refname
;
1608 ref
.update_index
= ts
;
1610 peel_error
= peel_object(arg
->refs
->base
.repo
, &u
->new_oid
, &peeled
);
1612 ref
.value_type
= REFTABLE_REF_VAL2
;
1613 memcpy(ref
.value
.val2
.target_value
, peeled
.hash
, GIT_MAX_RAWSZ
);
1614 memcpy(ref
.value
.val2
.value
, u
->new_oid
.hash
, GIT_MAX_RAWSZ
);
1615 } else if (!is_null_oid(&u
->new_oid
)) {
1616 ref
.value_type
= REFTABLE_REF_VAL1
;
1617 memcpy(ref
.value
.val1
, u
->new_oid
.hash
, GIT_MAX_RAWSZ
);
1620 ret
= reftable_writer_add_ref(writer
, &ref
);
1627 * Logs are written at the end so that we do not have intermixed ref
1631 ret
= reftable_writer_add_logs(writer
, logs
, logs_nr
);
1637 assert(ret
!= REFTABLE_API_ERROR
);
1638 for (i
= 0; i
< logs_nr
; i
++)
1639 reftable_log_record_release(&logs
[i
]);
1644 static int reftable_be_transaction_finish(struct ref_store
*ref_store UNUSED
,
1645 struct ref_transaction
*transaction
,
1648 struct reftable_transaction_data
*tx_data
= transaction
->backend_data
;
1651 for (size_t i
= 0; i
< tx_data
->args_nr
; i
++) {
1652 tx_data
->args
[i
].max_index
= transaction
->max_index
;
1654 ret
= reftable_addition_add(tx_data
->args
[i
].addition
,
1655 write_transaction_table
, &tx_data
->args
[i
]);
1659 ret
= reftable_addition_commit(tx_data
->args
[i
].addition
);
1665 assert(ret
!= REFTABLE_API_ERROR
);
1666 free_transaction_data(tx_data
);
1667 transaction
->state
= REF_TRANSACTION_CLOSED
;
1670 strbuf_addf(err
, _("reftable: transaction failure: %s"),
1671 reftable_error_str(ret
));
1677 static int reftable_be_pack_refs(struct ref_store
*ref_store
,
1678 struct pack_refs_opts
*opts
)
1680 struct reftable_ref_store
*refs
=
1681 reftable_be_downcast(ref_store
, REF_STORE_WRITE
| REF_STORE_ODB
, "pack_refs");
1682 struct reftable_stack
*stack
;
1688 stack
= refs
->worktree_backend
.stack
;
1690 stack
= refs
->main_backend
.stack
;
1692 if (opts
->flags
& PACK_REFS_AUTO
)
1693 ret
= reftable_stack_auto_compact(stack
);
1695 ret
= reftable_stack_compact_all(stack
, NULL
);
1697 ret
= error(_("unable to compact stack: %s"),
1698 reftable_error_str(ret
));
1702 ret
= reftable_stack_clean(stack
);
1710 struct write_create_symref_arg
{
1711 struct reftable_ref_store
*refs
;
1712 struct reftable_stack
*stack
;
1714 const char *refname
;
1719 struct write_copy_arg
{
1720 struct reftable_ref_store
*refs
;
1721 struct reftable_backend
*be
;
1722 const char *oldname
;
1723 const char *newname
;
1728 static int write_copy_table(struct reftable_writer
*writer
, void *cb_data
)
1730 struct write_copy_arg
*arg
= cb_data
;
1731 uint64_t deletion_ts
, creation_ts
;
1732 struct reftable_ref_record old_ref
= {0}, refs
[2] = {0};
1733 struct reftable_log_record old_log
= {0}, *logs
= NULL
;
1734 struct reftable_iterator it
= {0};
1735 struct string_list skip
= STRING_LIST_INIT_NODUP
;
1736 struct ident_split committer_ident
= {0};
1737 struct strbuf errbuf
= STRBUF_INIT
;
1738 size_t logs_nr
= 0, logs_alloc
= 0, i
;
1739 const char *committer_info
;
1742 committer_info
= git_committer_info(0);
1743 if (split_ident_line(&committer_ident
, committer_info
, strlen(committer_info
)))
1744 BUG("failed splitting committer info");
1746 if (reftable_stack_read_ref(arg
->be
->stack
, arg
->oldname
, &old_ref
)) {
1747 ret
= error(_("refname %s not found"), arg
->oldname
);
1750 if (old_ref
.value_type
== REFTABLE_REF_SYMREF
) {
1751 ret
= error(_("refname %s is a symbolic ref, copying it is not supported"),
1757 * There's nothing to do in case the old and new name are the same, so
1758 * we exit early in that case.
1760 if (!strcmp(arg
->oldname
, arg
->newname
)) {
1766 * Verify that the new refname is available.
1768 if (arg
->delete_old
)
1769 string_list_insert(&skip
, arg
->oldname
);
1770 ret
= refs_verify_refname_available(&arg
->refs
->base
, arg
->newname
,
1771 NULL
, &skip
, 0, &errbuf
);
1773 error("%s", errbuf
.buf
);
1778 * When deleting the old reference we have to use two update indices:
1779 * once to delete the old ref and its reflog, and once to create the
1780 * new ref and its reflog. They need to be staged with two separate
1781 * indices because the new reflog needs to encode both the deletion of
1782 * the old branch and the creation of the new branch, and we cannot do
1783 * two changes to a reflog in a single update.
1785 deletion_ts
= creation_ts
= reftable_stack_next_update_index(arg
->be
->stack
);
1786 if (arg
->delete_old
)
1788 ret
= reftable_writer_set_limits(writer
, deletion_ts
, creation_ts
);
1793 * Add the new reference. If this is a rename then we also delete the
1797 refs
[0].refname
= xstrdup(arg
->newname
);
1798 refs
[0].update_index
= creation_ts
;
1799 if (arg
->delete_old
) {
1800 refs
[1].refname
= xstrdup(arg
->oldname
);
1801 refs
[1].value_type
= REFTABLE_REF_DELETION
;
1802 refs
[1].update_index
= deletion_ts
;
1804 ret
= reftable_writer_add_refs(writer
, refs
, arg
->delete_old
? 2 : 1);
1809 * When deleting the old branch we need to create a reflog entry on the
1810 * new branch name that indicates that the old branch has been deleted
1811 * and then recreated. This is a tad weird, but matches what the files
1814 if (arg
->delete_old
) {
1815 struct strbuf head_referent
= STRBUF_INIT
;
1816 struct object_id head_oid
;
1817 int append_head_reflog
;
1818 unsigned head_type
= 0;
1820 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1821 memset(&logs
[logs_nr
], 0, sizeof(logs
[logs_nr
]));
1822 fill_reftable_log_record(&logs
[logs_nr
], &committer_ident
);
1823 logs
[logs_nr
].refname
= xstrdup(arg
->newname
);
1824 logs
[logs_nr
].update_index
= deletion_ts
;
1825 logs
[logs_nr
].value
.update
.message
=
1826 xstrndup(arg
->logmsg
, arg
->refs
->write_options
.block_size
/ 2);
1827 memcpy(logs
[logs_nr
].value
.update
.old_hash
, old_ref
.value
.val1
, GIT_MAX_RAWSZ
);
1830 ret
= reftable_backend_read_ref(arg
->be
, "HEAD", &head_oid
,
1831 &head_referent
, &head_type
);
1834 append_head_reflog
= (head_type
& REF_ISSYMREF
) && !strcmp(head_referent
.buf
, arg
->oldname
);
1835 strbuf_release(&head_referent
);
1838 * The files backend uses `refs_delete_ref()` to delete the old
1839 * branch name, which will append a reflog entry for HEAD in
1840 * case it points to the old branch.
1842 if (append_head_reflog
) {
1843 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1844 logs
[logs_nr
] = logs
[logs_nr
- 1];
1845 logs
[logs_nr
].refname
= xstrdup("HEAD");
1846 logs
[logs_nr
].value
.update
.name
=
1847 xstrdup(logs
[logs_nr
].value
.update
.name
);
1848 logs
[logs_nr
].value
.update
.email
=
1849 xstrdup(logs
[logs_nr
].value
.update
.email
);
1850 logs
[logs_nr
].value
.update
.message
=
1851 xstrdup(logs
[logs_nr
].value
.update
.message
);
1857 * Create the reflog entry for the newly created branch.
1859 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1860 memset(&logs
[logs_nr
], 0, sizeof(logs
[logs_nr
]));
1861 fill_reftable_log_record(&logs
[logs_nr
], &committer_ident
);
1862 logs
[logs_nr
].refname
= xstrdup(arg
->newname
);
1863 logs
[logs_nr
].update_index
= creation_ts
;
1864 logs
[logs_nr
].value
.update
.message
=
1865 xstrndup(arg
->logmsg
, arg
->refs
->write_options
.block_size
/ 2);
1866 memcpy(logs
[logs_nr
].value
.update
.new_hash
, old_ref
.value
.val1
, GIT_MAX_RAWSZ
);
1870 * In addition to writing the reflog entry for the new branch, we also
1871 * copy over all log entries from the old reflog. Last but not least,
1872 * when renaming we also have to delete all the old reflog entries.
1874 ret
= reftable_stack_init_log_iterator(arg
->be
->stack
, &it
);
1878 ret
= reftable_iterator_seek_log(&it
, arg
->oldname
);
1883 ret
= reftable_iterator_next_log(&it
, &old_log
);
1886 if (ret
> 0 || strcmp(old_log
.refname
, arg
->oldname
)) {
1891 free(old_log
.refname
);
1894 * Copy over the old reflog entry with the new refname.
1896 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1897 logs
[logs_nr
] = old_log
;
1898 logs
[logs_nr
].refname
= xstrdup(arg
->newname
);
1902 * Delete the old reflog entry in case we are renaming.
1904 if (arg
->delete_old
) {
1905 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
1906 memset(&logs
[logs_nr
], 0, sizeof(logs
[logs_nr
]));
1907 logs
[logs_nr
].refname
= xstrdup(arg
->oldname
);
1908 logs
[logs_nr
].value_type
= REFTABLE_LOG_DELETION
;
1909 logs
[logs_nr
].update_index
= old_log
.update_index
;
1914 * Transfer ownership of the log record we're iterating over to
1915 * the array of log records. Otherwise, the pointers would get
1916 * free'd or reallocated by the iterator.
1918 memset(&old_log
, 0, sizeof(old_log
));
1921 ret
= reftable_writer_add_logs(writer
, logs
, logs_nr
);
1926 assert(ret
!= REFTABLE_API_ERROR
);
1927 reftable_iterator_destroy(&it
);
1928 string_list_clear(&skip
, 0);
1929 strbuf_release(&errbuf
);
1930 for (i
= 0; i
< logs_nr
; i
++)
1931 reftable_log_record_release(&logs
[i
]);
1933 for (i
= 0; i
< ARRAY_SIZE(refs
); i
++)
1934 reftable_ref_record_release(&refs
[i
]);
1935 reftable_ref_record_release(&old_ref
);
1936 reftable_log_record_release(&old_log
);
1940 static int reftable_be_rename_ref(struct ref_store
*ref_store
,
1941 const char *oldrefname
,
1942 const char *newrefname
,
1945 struct reftable_ref_store
*refs
=
1946 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "rename_ref");
1947 struct write_copy_arg arg
= {
1949 .oldname
= oldrefname
,
1950 .newname
= newrefname
,
1960 ret
= backend_for(&arg
.be
, refs
, newrefname
, &newrefname
, 1);
1963 ret
= reftable_stack_add(arg
.be
->stack
, &write_copy_table
, &arg
);
1966 assert(ret
!= REFTABLE_API_ERROR
);
1970 static int reftable_be_copy_ref(struct ref_store
*ref_store
,
1971 const char *oldrefname
,
1972 const char *newrefname
,
1975 struct reftable_ref_store
*refs
=
1976 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "copy_ref");
1977 struct write_copy_arg arg
= {
1979 .oldname
= oldrefname
,
1980 .newname
= newrefname
,
1989 ret
= backend_for(&arg
.be
, refs
, newrefname
, &newrefname
, 1);
1992 ret
= reftable_stack_add(arg
.be
->stack
, &write_copy_table
, &arg
);
1995 assert(ret
!= REFTABLE_API_ERROR
);
1999 struct reftable_reflog_iterator
{
2000 struct ref_iterator base
;
2001 struct reftable_ref_store
*refs
;
2002 struct reftable_iterator iter
;
2003 struct reftable_log_record log
;
2004 struct strbuf last_name
;
2008 static int reftable_reflog_iterator_advance(struct ref_iterator
*ref_iterator
)
2010 struct reftable_reflog_iterator
*iter
=
2011 (struct reftable_reflog_iterator
*)ref_iterator
;
2013 while (!iter
->err
) {
2014 iter
->err
= reftable_iterator_next_log(&iter
->iter
, &iter
->log
);
2019 * We want the refnames that we have reflogs for, so we skip if
2020 * we've already produced this name. This could be faster by
2021 * seeking directly to reflog@update_index==0.
2023 if (!strcmp(iter
->log
.refname
, iter
->last_name
.buf
))
2026 if (check_refname_format(iter
->log
.refname
,
2027 REFNAME_ALLOW_ONELEVEL
))
2030 strbuf_reset(&iter
->last_name
);
2031 strbuf_addstr(&iter
->last_name
, iter
->log
.refname
);
2032 iter
->base
.refname
= iter
->log
.refname
;
2044 static int reftable_reflog_iterator_seek(struct ref_iterator
*ref_iterator UNUSED
,
2045 const char *prefix UNUSED
)
2047 BUG("reftable reflog iterator cannot be seeked");
2051 static int reftable_reflog_iterator_peel(struct ref_iterator
*ref_iterator UNUSED
,
2052 struct object_id
*peeled UNUSED
)
2054 BUG("reftable reflog iterator cannot be peeled");
2058 static void reftable_reflog_iterator_release(struct ref_iterator
*ref_iterator
)
2060 struct reftable_reflog_iterator
*iter
=
2061 (struct reftable_reflog_iterator
*)ref_iterator
;
2062 reftable_log_record_release(&iter
->log
);
2063 reftable_iterator_destroy(&iter
->iter
);
2064 strbuf_release(&iter
->last_name
);
2067 static struct ref_iterator_vtable reftable_reflog_iterator_vtable
= {
2068 .advance
= reftable_reflog_iterator_advance
,
2069 .seek
= reftable_reflog_iterator_seek
,
2070 .peel
= reftable_reflog_iterator_peel
,
2071 .release
= reftable_reflog_iterator_release
,
2074 static struct reftable_reflog_iterator
*reflog_iterator_for_stack(struct reftable_ref_store
*refs
,
2075 struct reftable_stack
*stack
)
2077 struct reftable_reflog_iterator
*iter
;
2080 iter
= xcalloc(1, sizeof(*iter
));
2081 base_ref_iterator_init(&iter
->base
, &reftable_reflog_iterator_vtable
);
2082 strbuf_init(&iter
->last_name
, 0);
2089 ret
= reftable_stack_reload(stack
);
2093 ret
= reftable_stack_init_log_iterator(stack
, &iter
->iter
);
2097 ret
= reftable_iterator_seek_log(&iter
->iter
, "");
2106 static struct ref_iterator
*reftable_be_reflog_iterator_begin(struct ref_store
*ref_store
)
2108 struct reftable_ref_store
*refs
=
2109 reftable_be_downcast(ref_store
, REF_STORE_READ
, "reflog_iterator_begin");
2110 struct reftable_reflog_iterator
*main_iter
, *worktree_iter
;
2112 main_iter
= reflog_iterator_for_stack(refs
, refs
->main_backend
.stack
);
2113 if (!refs
->worktree_backend
.stack
)
2114 return &main_iter
->base
;
2116 worktree_iter
= reflog_iterator_for_stack(refs
, refs
->worktree_backend
.stack
);
2118 return merge_ref_iterator_begin(&worktree_iter
->base
, &main_iter
->base
,
2119 ref_iterator_select
, NULL
);
2122 static int yield_log_record(struct reftable_ref_store
*refs
,
2123 struct reftable_log_record
*log
,
2124 each_reflog_ent_fn fn
,
2127 struct object_id old_oid
, new_oid
;
2128 const char *full_committer
;
2130 oidread(&old_oid
, log
->value
.update
.old_hash
, refs
->base
.repo
->hash_algo
);
2131 oidread(&new_oid
, log
->value
.update
.new_hash
, refs
->base
.repo
->hash_algo
);
2134 * When both the old object ID and the new object ID are null
2135 * then this is the reflog existence marker. The caller must
2136 * not be aware of it.
2138 if (is_null_oid(&old_oid
) && is_null_oid(&new_oid
))
2141 full_committer
= fmt_ident(log
->value
.update
.name
, log
->value
.update
.email
,
2142 WANT_COMMITTER_IDENT
, NULL
, IDENT_NO_DATE
);
2143 return fn(&old_oid
, &new_oid
, full_committer
,
2144 log
->value
.update
.time
, log
->value
.update
.tz_offset
,
2145 log
->value
.update
.message
, cb_data
);
2148 static int reftable_be_for_each_reflog_ent_reverse(struct ref_store
*ref_store
,
2149 const char *refname
,
2150 each_reflog_ent_fn fn
,
2153 struct reftable_ref_store
*refs
=
2154 reftable_be_downcast(ref_store
, REF_STORE_READ
, "for_each_reflog_ent_reverse");
2155 struct reftable_log_record log
= {0};
2156 struct reftable_iterator it
= {0};
2157 struct reftable_backend
*be
;
2164 * TODO: we should adapt this callsite to reload the stack. There is no
2165 * obvious reason why we shouldn't.
2167 ret
= backend_for(&be
, refs
, refname
, &refname
, 0);
2171 ret
= reftable_stack_init_log_iterator(be
->stack
, &it
);
2175 ret
= reftable_iterator_seek_log(&it
, refname
);
2177 ret
= reftable_iterator_next_log(&it
, &log
);
2180 if (ret
> 0 || strcmp(log
.refname
, refname
)) {
2185 ret
= yield_log_record(refs
, &log
, fn
, cb_data
);
2191 reftable_log_record_release(&log
);
2192 reftable_iterator_destroy(&it
);
2196 static int reftable_be_for_each_reflog_ent(struct ref_store
*ref_store
,
2197 const char *refname
,
2198 each_reflog_ent_fn fn
,
2201 struct reftable_ref_store
*refs
=
2202 reftable_be_downcast(ref_store
, REF_STORE_READ
, "for_each_reflog_ent");
2203 struct reftable_log_record
*logs
= NULL
;
2204 struct reftable_iterator it
= {0};
2205 struct reftable_backend
*be
;
2206 size_t logs_alloc
= 0, logs_nr
= 0, i
;
2213 * TODO: we should adapt this callsite to reload the stack. There is no
2214 * obvious reason why we shouldn't.
2216 ret
= backend_for(&be
, refs
, refname
, &refname
, 0);
2220 ret
= reftable_stack_init_log_iterator(be
->stack
, &it
);
2224 ret
= reftable_iterator_seek_log(&it
, refname
);
2226 struct reftable_log_record log
= {0};
2228 ret
= reftable_iterator_next_log(&it
, &log
);
2231 if (ret
> 0 || strcmp(log
.refname
, refname
)) {
2232 reftable_log_record_release(&log
);
2237 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
2238 logs
[logs_nr
++] = log
;
2241 for (i
= logs_nr
; i
--;) {
2242 ret
= yield_log_record(refs
, &logs
[i
], fn
, cb_data
);
2248 reftable_iterator_destroy(&it
);
2249 for (i
= 0; i
< logs_nr
; i
++)
2250 reftable_log_record_release(&logs
[i
]);
2255 static int reftable_be_reflog_exists(struct ref_store
*ref_store
,
2256 const char *refname
)
2258 struct reftable_ref_store
*refs
=
2259 reftable_be_downcast(ref_store
, REF_STORE_READ
, "reflog_exists");
2260 struct reftable_log_record log
= {0};
2261 struct reftable_iterator it
= {0};
2262 struct reftable_backend
*be
;
2269 ret
= backend_for(&be
, refs
, refname
, &refname
, 1);
2273 ret
= reftable_stack_init_log_iterator(be
->stack
, &it
);
2277 ret
= reftable_iterator_seek_log(&it
, refname
);
2282 * Check whether we get at least one log record for the given ref name.
2283 * If so, the reflog exists, otherwise it doesn't.
2285 ret
= reftable_iterator_next_log(&it
, &log
);
2293 ret
= strcmp(log
.refname
, refname
) == 0;
2296 reftable_iterator_destroy(&it
);
2297 reftable_log_record_release(&log
);
2303 struct write_reflog_existence_arg
{
2304 struct reftable_ref_store
*refs
;
2305 const char *refname
;
2306 struct reftable_stack
*stack
;
2309 static int write_reflog_existence_table(struct reftable_writer
*writer
,
2312 struct write_reflog_existence_arg
*arg
= cb_data
;
2313 uint64_t ts
= reftable_stack_next_update_index(arg
->stack
);
2314 struct reftable_log_record log
= {0};
2317 ret
= reftable_stack_read_log(arg
->stack
, arg
->refname
, &log
);
2321 ret
= reftable_writer_set_limits(writer
, ts
, ts
);
2326 * The existence entry has both old and new object ID set to the
2327 * null object ID. Our iterators are aware of this and will not present
2328 * them to their callers.
2330 log
.refname
= xstrdup(arg
->refname
);
2331 log
.update_index
= ts
;
2332 log
.value_type
= REFTABLE_LOG_UPDATE
;
2333 ret
= reftable_writer_add_log(writer
, &log
);
2336 assert(ret
!= REFTABLE_API_ERROR
);
2337 reftable_log_record_release(&log
);
2341 static int reftable_be_create_reflog(struct ref_store
*ref_store
,
2342 const char *refname
,
2343 struct strbuf
*errmsg UNUSED
)
2345 struct reftable_ref_store
*refs
=
2346 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "create_reflog");
2347 struct reftable_backend
*be
;
2348 struct write_reflog_existence_arg arg
= {
2358 ret
= backend_for(&be
, refs
, refname
, &refname
, 1);
2361 arg
.stack
= be
->stack
;
2363 ret
= reftable_stack_add(be
->stack
, &write_reflog_existence_table
, &arg
);
2369 struct write_reflog_delete_arg
{
2370 struct reftable_stack
*stack
;
2371 const char *refname
;
2374 static int write_reflog_delete_table(struct reftable_writer
*writer
, void *cb_data
)
2376 struct write_reflog_delete_arg
*arg
= cb_data
;
2377 struct reftable_log_record log
= {0}, tombstone
= {0};
2378 struct reftable_iterator it
= {0};
2379 uint64_t ts
= reftable_stack_next_update_index(arg
->stack
);
2382 ret
= reftable_writer_set_limits(writer
, ts
, ts
);
2386 ret
= reftable_stack_init_log_iterator(arg
->stack
, &it
);
2391 * In order to delete a table we need to delete all reflog entries one
2392 * by one. This is inefficient, but the reftable format does not have a
2393 * better marker right now.
2395 ret
= reftable_iterator_seek_log(&it
, arg
->refname
);
2397 ret
= reftable_iterator_next_log(&it
, &log
);
2400 if (ret
> 0 || strcmp(log
.refname
, arg
->refname
)) {
2405 tombstone
.refname
= (char *)arg
->refname
;
2406 tombstone
.value_type
= REFTABLE_LOG_DELETION
;
2407 tombstone
.update_index
= log
.update_index
;
2409 ret
= reftable_writer_add_log(writer
, &tombstone
);
2413 reftable_log_record_release(&log
);
2414 reftable_iterator_destroy(&it
);
2418 static int reftable_be_delete_reflog(struct ref_store
*ref_store
,
2419 const char *refname
)
2421 struct reftable_ref_store
*refs
=
2422 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "delete_reflog");
2423 struct reftable_backend
*be
;
2424 struct write_reflog_delete_arg arg
= {
2429 ret
= backend_for(&be
, refs
, refname
, &refname
, 1);
2432 arg
.stack
= be
->stack
;
2434 ret
= reftable_stack_add(be
->stack
, &write_reflog_delete_table
, &arg
);
2436 assert(ret
!= REFTABLE_API_ERROR
);
2440 struct reflog_expiry_arg
{
2441 struct reftable_ref_store
*refs
;
2442 struct reftable_stack
*stack
;
2443 struct reftable_log_record
*records
;
2444 struct object_id update_oid
;
2445 const char *refname
;
2449 static int write_reflog_expiry_table(struct reftable_writer
*writer
, void *cb_data
)
2451 struct reflog_expiry_arg
*arg
= cb_data
;
2452 uint64_t ts
= reftable_stack_next_update_index(arg
->stack
);
2453 uint64_t live_records
= 0;
2457 for (i
= 0; i
< arg
->len
; i
++)
2458 if (arg
->records
[i
].value_type
== REFTABLE_LOG_UPDATE
)
2461 ret
= reftable_writer_set_limits(writer
, ts
, ts
);
2465 if (!is_null_oid(&arg
->update_oid
)) {
2466 struct reftable_ref_record ref
= {0};
2467 struct object_id peeled
;
2469 ref
.refname
= (char *)arg
->refname
;
2470 ref
.update_index
= ts
;
2472 if (!peel_object(arg
->refs
->base
.repo
, &arg
->update_oid
, &peeled
)) {
2473 ref
.value_type
= REFTABLE_REF_VAL2
;
2474 memcpy(ref
.value
.val2
.target_value
, peeled
.hash
, GIT_MAX_RAWSZ
);
2475 memcpy(ref
.value
.val2
.value
, arg
->update_oid
.hash
, GIT_MAX_RAWSZ
);
2477 ref
.value_type
= REFTABLE_REF_VAL1
;
2478 memcpy(ref
.value
.val1
, arg
->update_oid
.hash
, GIT_MAX_RAWSZ
);
2481 ret
= reftable_writer_add_ref(writer
, &ref
);
2487 * When there are no more entries left in the reflog we empty it
2488 * completely, but write a placeholder reflog entry that indicates that
2489 * the reflog still exists.
2491 if (!live_records
) {
2492 struct reftable_log_record log
= {
2493 .refname
= (char *)arg
->refname
,
2494 .value_type
= REFTABLE_LOG_UPDATE
,
2498 ret
= reftable_writer_add_log(writer
, &log
);
2503 for (i
= 0; i
< arg
->len
; i
++) {
2504 ret
= reftable_writer_add_log(writer
, &arg
->records
[i
]);
2512 static int reftable_be_reflog_expire(struct ref_store
*ref_store
,
2513 const char *refname
,
2515 reflog_expiry_prepare_fn prepare_fn
,
2516 reflog_expiry_should_prune_fn should_prune_fn
,
2517 reflog_expiry_cleanup_fn cleanup_fn
,
2518 void *policy_cb_data
)
2521 * For log expiry, we write tombstones for every single reflog entry
2522 * that is to be expired. This means that the entries are still
2523 * retrievable by delving into the stack, and expiring entries
2524 * paradoxically takes extra memory. This memory is only reclaimed when
2525 * compacting the reftable stack.
2527 * It would be better if the refs backend supported an API that sets a
2528 * criterion for all refs, passing the criterion to pack_refs().
2530 * On the plus side, because we do the expiration per ref, we can easily
2531 * insert the reflog existence dummies.
2533 struct reftable_ref_store
*refs
=
2534 reftable_be_downcast(ref_store
, REF_STORE_WRITE
, "reflog_expire");
2535 struct reftable_log_record
*logs
= NULL
;
2536 struct reftable_log_record
*rewritten
= NULL
;
2537 struct reftable_iterator it
= {0};
2538 struct reftable_addition
*add
= NULL
;
2539 struct reflog_expiry_arg arg
= {0};
2540 struct reftable_backend
*be
;
2541 struct object_id oid
= {0};
2542 struct strbuf referent
= STRBUF_INIT
;
2543 uint8_t *last_hash
= NULL
;
2544 size_t logs_nr
= 0, logs_alloc
= 0, i
;
2545 unsigned int type
= 0;
2551 ret
= backend_for(&be
, refs
, refname
, &refname
, 1);
2555 ret
= reftable_stack_init_log_iterator(be
->stack
, &it
);
2559 ret
= reftable_iterator_seek_log(&it
, refname
);
2563 ret
= reftable_stack_new_addition(&add
, be
->stack
, 0);
2567 ret
= reftable_backend_read_ref(be
, refname
, &oid
, &referent
, &type
);
2570 prepare_fn(refname
, &oid
, policy_cb_data
);
2573 struct reftable_log_record log
= {0};
2574 struct object_id old_oid
, new_oid
;
2576 ret
= reftable_iterator_next_log(&it
, &log
);
2579 if (ret
> 0 || strcmp(log
.refname
, refname
)) {
2580 reftable_log_record_release(&log
);
2584 oidread(&old_oid
, log
.value
.update
.old_hash
,
2585 ref_store
->repo
->hash_algo
);
2586 oidread(&new_oid
, log
.value
.update
.new_hash
,
2587 ref_store
->repo
->hash_algo
);
2590 * Skip over the reflog existence marker. We will add it back
2591 * in when there are no live reflog records.
2593 if (is_null_oid(&old_oid
) && is_null_oid(&new_oid
)) {
2594 reftable_log_record_release(&log
);
2598 ALLOC_GROW(logs
, logs_nr
+ 1, logs_alloc
);
2599 logs
[logs_nr
++] = log
;
2603 * We need to rewrite all reflog entries according to the pruning
2604 * callback function:
2606 * - If a reflog entry shall be pruned we mark the record for
2609 * - Otherwise we may have to rewrite the chain of reflog entries so
2610 * that gaps created by just-deleted records get backfilled.
2612 CALLOC_ARRAY(rewritten
, logs_nr
);
2613 for (i
= logs_nr
; i
--;) {
2614 struct reftable_log_record
*dest
= &rewritten
[i
];
2615 struct object_id old_oid
, new_oid
;
2618 oidread(&old_oid
, logs
[i
].value
.update
.old_hash
,
2619 ref_store
->repo
->hash_algo
);
2620 oidread(&new_oid
, logs
[i
].value
.update
.new_hash
,
2621 ref_store
->repo
->hash_algo
);
2623 if (should_prune_fn(&old_oid
, &new_oid
, logs
[i
].value
.update
.email
,
2624 (timestamp_t
)logs
[i
].value
.update
.time
,
2625 logs
[i
].value
.update
.tz_offset
,
2626 logs
[i
].value
.update
.message
,
2628 dest
->value_type
= REFTABLE_LOG_DELETION
;
2630 if ((flags
& EXPIRE_REFLOGS_REWRITE
) && last_hash
)
2631 memcpy(dest
->value
.update
.old_hash
, last_hash
, GIT_MAX_RAWSZ
);
2632 last_hash
= logs
[i
].value
.update
.new_hash
;
2636 if (flags
& EXPIRE_REFLOGS_UPDATE_REF
&& last_hash
&& !is_null_oid(&oid
))
2637 oidread(&arg
.update_oid
, last_hash
, ref_store
->repo
->hash_algo
);
2640 arg
.records
= rewritten
;
2642 arg
.stack
= be
->stack
;
2643 arg
.refname
= refname
;
2645 ret
= reftable_addition_add(add
, &write_reflog_expiry_table
, &arg
);
2650 * Future improvement: we could skip writing records that were
2653 if (!(flags
& EXPIRE_REFLOGS_DRY_RUN
))
2654 ret
= reftable_addition_commit(add
);
2658 cleanup_fn(policy_cb_data
);
2659 assert(ret
!= REFTABLE_API_ERROR
);
2661 reftable_iterator_destroy(&it
);
2662 reftable_addition_destroy(add
);
2663 for (i
= 0; i
< logs_nr
; i
++)
2664 reftable_log_record_release(&logs
[i
]);
2665 strbuf_release(&referent
);
2671 static int reftable_be_fsck(struct ref_store
*ref_store UNUSED
,
2672 struct fsck_options
*o UNUSED
,
2673 struct worktree
*wt UNUSED
)
2678 struct ref_storage_be refs_be_reftable
= {
2680 .init
= reftable_be_init
,
2681 .release
= reftable_be_release
,
2682 .create_on_disk
= reftable_be_create_on_disk
,
2683 .remove_on_disk
= reftable_be_remove_on_disk
,
2685 .transaction_prepare
= reftable_be_transaction_prepare
,
2686 .transaction_finish
= reftable_be_transaction_finish
,
2687 .transaction_abort
= reftable_be_transaction_abort
,
2689 .pack_refs
= reftable_be_pack_refs
,
2690 .rename_ref
= reftable_be_rename_ref
,
2691 .copy_ref
= reftable_be_copy_ref
,
2693 .iterator_begin
= reftable_be_iterator_begin
,
2694 .read_raw_ref
= reftable_be_read_raw_ref
,
2695 .read_symbolic_ref
= reftable_be_read_symbolic_ref
,
2697 .reflog_iterator_begin
= reftable_be_reflog_iterator_begin
,
2698 .for_each_reflog_ent
= reftable_be_for_each_reflog_ent
,
2699 .for_each_reflog_ent_reverse
= reftable_be_for_each_reflog_ent_reverse
,
2700 .reflog_exists
= reftable_be_reflog_exists
,
2701 .create_reflog
= reftable_be_create_reflog
,
2702 .delete_reflog
= reftable_be_delete_reflog
,
2703 .reflog_expire
= reftable_be_reflog_expire
,
2705 .fsck
= reftable_be_fsck
,