return refs->be->optimize(refs, opts);
}
+int refs_optimize_required(struct ref_store *refs,
+ struct refs_optimize_opts *opts,
+ bool *required)
+{
+ return refs->be->optimize_required(refs, opts, required);
+}
+
int reference_get_peeled_oid(struct repository *repo,
const struct reference *ref,
struct object_id *peeled_oid)
*/
int refs_optimize(struct ref_store *refs, struct refs_optimize_opts *opts);
+/*
+ * Check if refs backend can be optimized by calling 'refs_optimize'.
+ */
+int refs_optimize_required(struct ref_store *ref_store,
+ struct refs_optimize_opts *opts,
+ bool *required);
+
/*
* Setup reflog before using. Fill in err and return -1 on failure.
*/
return res;
}
+static int debug_optimize_required(struct ref_store *ref_store,
+ struct refs_optimize_opts *opts,
+ bool *required)
+{
+ struct debug_ref_store *drefs = (struct debug_ref_store *)ref_store;
+ int res = drefs->refs->be->optimize_required(drefs->refs, opts, required);
+ trace_printf_key(&trace_refs, "optimize_required: %s, res: %d\n",
+ required ? "yes" : "no", res);
+ return res;
+}
+
static int debug_rename_ref(struct ref_store *ref_store, const char *oldref,
const char *newref, const char *logmsg)
{
.transaction_abort = debug_transaction_abort,
.optimize = debug_optimize,
+ .optimize_required = debug_optimize_required,
+
.rename_ref = debug_rename_ref,
.copy_ref = debug_copy_ref,
return 0;
}
+static int files_optimize_required(struct ref_store *ref_store,
+ struct refs_optimize_opts *opts,
+ bool *required)
+{
+ struct files_ref_store *refs = files_downcast(ref_store, REF_STORE_READ,
+ "optimize_required");
+ *required = should_pack_refs(refs, opts);
+ return 0;
+}
+
/*
* People using contrib's git-new-workdir have .git/logs/refs ->
* /some/other/path/.git/logs/refs, and that may live on another device.
.transaction_abort = files_transaction_abort,
.optimize = files_optimize,
+ .optimize_required = files_optimize_required,
.rename_ref = files_rename_ref,
.copy_ref = files_copy_ref,
return 0;
}
+static int packed_optimize_required(struct ref_store *ref_store UNUSED,
+ struct refs_optimize_opts *opts UNUSED,
+ bool *required)
+{
+ /*
+ * Packed refs are already optimized.
+ */
+ *required = false;
+ return 0;
+}
+
static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store UNUSED)
{
return empty_ref_iterator_begin();
.transaction_abort = packed_transaction_abort,
.optimize = packed_optimize,
+ .optimize_required = packed_optimize_required,
+
.rename_ref = NULL,
.copy_ref = NULL,
typedef int optimize_fn(struct ref_store *ref_store,
struct refs_optimize_opts *opts);
+
+typedef int optimize_required_fn(struct ref_store *ref_store,
+ struct refs_optimize_opts *opts,
+ bool *required);
+
typedef int rename_ref_fn(struct ref_store *ref_store,
const char *oldref, const char *newref,
const char *logmsg);
ref_transaction_abort_fn *transaction_abort;
optimize_fn *optimize;
+ optimize_required_fn *optimize_required;
rename_ref_fn *rename_ref;
copy_ref_fn *copy_ref;
return ret;
}
+static int reftable_be_optimize_required(struct ref_store *ref_store,
+ struct refs_optimize_opts *opts,
+ bool *required)
+{
+ struct reftable_ref_store *refs = reftable_be_downcast(ref_store, REF_STORE_READ,
+ "optimize_refs_required");
+ struct reftable_stack *stack;
+ bool use_heuristics = false;
+
+ if (refs->err)
+ return refs->err;
+
+ stack = refs->worktree_backend.stack;
+ if (!stack)
+ stack = refs->main_backend.stack;
+
+ if (opts->flags & REFS_OPTIMIZE_AUTO)
+ use_heuristics = true;
+
+ return reftable_stack_compaction_required(stack, use_heuristics,
+ required);
+}
+
struct write_create_symref_arg {
struct reftable_ref_store *refs;
struct reftable_stack *stack;
.transaction_abort = reftable_be_transaction_abort,
.optimize = reftable_be_optimize,
+ .optimize_required = reftable_be_optimize_required,
+
.rename_ref = reftable_be_rename_ref,
.copy_ref = reftable_be_copy_ref,