{
int i;
- printf("* %d-way merge\n", o->merge_size);
+ printf("* %d-way merge\n", o->internal.merge_size);
debug_stage("index", stages[0], o);
- for (i = 1; i <= o->merge_size; i++) {
+ for (i = 1; i <= o->internal.merge_size; i++) {
char buf[24];
xsnprintf(buf, sizeof(buf), "ent#%d", i);
debug_stage(buf, stages[i], o);
OPT__DRY_RUN(&opts.dry_run, N_("don't update the index or the work tree")),
OPT_BOOL(0, "no-sparse-checkout", &opts.skip_sparse_checkout,
N_("skip applying sparse checkout filter")),
- OPT_BOOL(0, "debug-unpack", &opts.debug_unpack,
+ OPT_BOOL(0, "debug-unpack", &opts.internal.debug_unpack,
N_("debug unpack-trees")),
OPT_CALLBACK_F(0, "recurse-submodules", NULL,
"checkout", "control recursive updating of submodules",
opts.head_idx = 1;
}
- if (opts.debug_unpack)
+ if (opts.internal.debug_unpack)
opts.fn = debug_merge;
/* If we're going to prime_cache_tree later, skip cache tree update */
if (unpack_trees(nr_trees, t, &opts))
return 128;
- if (opts.debug_unpack || opts.dry_run)
+ if (opts.internal.debug_unpack || opts.dry_run)
return 0; /* do not write the index out */
/*
mark_ce_used(src[0], o);
}
free(tree_ce);
- if (o->debug_unpack)
+ if (o->internal.debug_unpack)
printf("Unpacked %d entries from %s to %s using cache-tree\n",
nr_entries,
o->src_index->cache[pos]->name,
while (!p->mode)
p++;
- if (o->debug_unpack)
+ if (o->internal.debug_unpack)
debug_unpack_callback(n, mask, dirmask, names, info);
/* Are we supposed to look at the index too? */
init_split_index(&o->internal.result);
}
oidcpy(&o->internal.result.oid, &o->src_index->oid);
- o->merge_size = len;
+ o->internal.merge_size = len;
mark_all_ce_unused(o->src_index);
o->internal.result.fsmonitor_last_update =
const struct cache_entry *oldtree = src[1];
const struct cache_entry *newtree = src[2];
- if (o->merge_size != 2)
+ if (o->internal.merge_size != 2)
return error("Cannot do a twoway merge of %d trees",
- o->merge_size);
+ o->internal.merge_size);
if (oldtree == o->df_conflict_entry)
oldtree = NULL;
const struct cache_entry *old = src[0];
const struct cache_entry *a = src[1];
- if (o->merge_size != 1)
+ if (o->internal.merge_size != 1)
return error("Cannot do a bind merge of %d trees",
- o->merge_size);
+ o->internal.merge_size);
if (a && old)
return o->quiet ? -1 :
error(ERRORMSG(o, ERROR_BIND_OVERLAP),
const struct cache_entry *old = src[0];
const struct cache_entry *a = src[1];
- if (o->merge_size != 1)
+ if (o->internal.merge_size != 1)
return error("Cannot do a oneway merge of %d trees",
- o->merge_size);
+ o->internal.merge_size);
if (!a || a == o->df_conflict_entry)
return deleted_entry(old, old, o);
const struct cache_entry *worktree = src[1];
const struct cache_entry *untracked = src[2];
- if (o->merge_size != 2)
- BUG("invalid merge_size: %d", o->merge_size);
+ if (o->internal.merge_size != 2)
+ BUG("invalid merge_size: %d", o->internal.merge_size);
if (worktree && untracked)
return error(_("worktree and untracked commit have duplicate entries: %s"),