Conversion from uchar[20] to struct object_id continues.
* bc/object-id: (42 commits)
merge-one-file: compute empty blob object ID
add--interactive: compute the empty tree value
Update shell scripts to compute empty tree object ID
sha1_file: only expose empty object constants through git_hash_algo
dir: use the_hash_algo for empty blob object ID
sequencer: use the_hash_algo for empty tree object ID
cache-tree: use is_empty_tree_oid
sha1_file: convert cached object code to struct object_id
builtin/reset: convert use of EMPTY_TREE_SHA1_BIN
builtin/receive-pack: convert one use of EMPTY_TREE_SHA1_HEX
wt-status: convert two uses of EMPTY_TREE_SHA1_HEX
submodule: convert several uses of EMPTY_TREE_SHA1_HEX
sequencer: convert one use of EMPTY_TREE_SHA1_HEX
merge: convert empty tree constant to the_hash_algo
builtin/merge: switch tree functions to use object_id
builtin/am: convert uses of EMPTY_TREE_SHA1_BIN to the_hash_algo
sha1-file: add functions for hex empty tree and blob OIDs
builtin/receive-pack: avoid hard-coded constants for push certs
diff: specify abbreviation size in terms of the_hash_algo
upload-pack: replace use of several hard-coded constants
...
enum object_type type;
void *buf;
struct git_istream *st = NULL;
+ const unsigned hashsz = the_hash_algo->rawsz;
if (!usable_delta) {
- if (entry->type == OBJ_BLOB &&
- entry->size > big_file_threshold &&
+ if (oe_type(entry) == OBJ_BLOB &&
+ oe_size_greater_than(&to_pack, entry, big_file_threshold) &&
(st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
buf = NULL;
else {
return 0;
}
hashwrite(f, header, hdrlen);
- hashwrite(f, DELTA(entry)->idx.oid.hash, 20);
- hdrlen += 20;
- hashwrite(f, entry->delta->idx.oid.hash, hashsz);
++ hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
+ hdrlen += hashsz;
} else {
- if (limit && hdrlen + datalen + 20 >= limit) {
+ if (limit && hdrlen + datalen + hashsz >= limit) {
if (st)
close_istream(st);
free(buf);
unsigned char header[MAX_PACK_OBJECT_HEADER],
dheader[MAX_PACK_OBJECT_HEADER];
unsigned hdrlen;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ unsigned long entry_size = SIZE(entry);
- if (entry->delta)
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ if (DELTA(entry))
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
hdrlen = encode_in_pack_object_header(header, sizeof(header),
- type, entry->size);
+ type, entry_size);
offset = entry->in_pack_offset;
revidx = find_pack_revindex(p, offset);
return 0;
}
hashwrite(f, header, hdrlen);
- hashwrite(f, DELTA(entry)->idx.oid.hash, 20);
- hdrlen += 20;
- hashwrite(f, entry->delta->idx.oid.hash, hashsz);
++ hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
+ hdrlen += hashsz;
reused_delta++;
} else {
- if (limit && hdrlen + datalen + 20 >= limit) {
+ if (limit && hdrlen + datalen + hashsz >= limit) {
unuse_pack(&w_curs);
return 0;
}
return 0;
/* Now some size filtering heuristics. */
- trg_size = trg_entry->size;
- if (!trg_entry->delta) {
+ trg_size = SIZE(trg_entry);
+ if (!DELTA(trg_entry)) {
- max_size = trg_size/2 - 20;
+ max_size = trg_size/2 - the_hash_algo->rawsz;
ref_depth = 1;
} else {
- max_size = trg_entry->delta_size;
+ max_size = DELTA_SIZE(trg_entry);
ref_depth = trg->depth;
}
max_size = (uint64_t)max_size * (max_depth - src->depth) /
if (tail <= bufptr + tree_entry_len + 1 || memcmp(bufptr, "tree ", 5) ||
bufptr[tree_entry_len] != '\n')
return error("bogus commit object %s", oid_to_hex(&item->object.oid));
- if (get_sha1_hex(bufptr + 5, parent.hash) < 0)
+ if (get_oid_hex(bufptr + 5, &parent) < 0)
return error("bad tree pointer in commit %s",
oid_to_hex(&item->object.oid));
- item->tree = lookup_tree(&parent);
+ item->maybe_tree = lookup_tree(&parent);
bufptr += tree_entry_len + 1; /* "tree " + "hex sha1" + "\n" */
pptr = &item->parents;
err = fsck_ident(&buffer, &commit->object, options);
if (err)
return err;
- if (!commit->tree) {
+ if (!get_commit_tree(commit)) {
- err = report(options, &commit->object, FSCK_MSG_BAD_TREE, "could not load commit's tree %s", sha1_to_hex(tree_sha1));
+ err = report(options, &commit->object, FSCK_MSG_BAD_TREE, "could not load commit's tree %s", oid_to_hex(&tree_oid));
if (err)
return err;
}
goto out;
}
- if (getenv("GIT_TEST_SPLIT_INDEX")) {
+ if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0)) {
- int v = si->base_sha1[0];
+ int v = si->base_oid.hash[0];
if ((v & 15) < 6)
istate->cache_changed |= SPLIT_INDEX_ORDERED;
}
}
if (!(flags & ALLOW_EMPTY) && !oidcmp(current_head ?
- ¤t_head->tree->object.oid :
+ get_commit_tree_oid(current_head) :
- &empty_tree_oid, &tree)) {
+ the_hash_algo->empty_tree, &tree)) {
res = 1; /* run 'git commit' to display error message */
goto out;
}
return error(_("your index file is unmerged."));
} else {
unborn = get_oid("HEAD", &head);
- if (unborn)
+ /* Do we want to generate a root commit? */
+ if (is_pick_or_similar(command) && opts->have_squash_onto &&
+ !oidcmp(&head, &opts->squash_onto)) {
+ if (is_fixup(command))
+ return error(_("cannot fixup root commit"));
+ flags |= CREATE_ROOT_COMMIT;
+ unborn = 1;
+ } else if (unborn)
oidcpy(&head, the_hash_algo->empty_tree);
- if (index_differs_from(unborn ? EMPTY_TREE_SHA1_HEX : "HEAD",
+ if (index_differs_from(unborn ? empty_tree_oid_hex() : "HEAD",
NULL, 0))
return error_dirty_index(opts);
}
}
while (1) {
- if (find_pack_entry(r, real->hash, &e))
- if (find_pack_entry(the_repository, real, &e))
++ if (find_pack_entry(r, real, &e))
break;
if (flags & OBJECT_INFO_IGNORE_LOOSE)
/* Not a loose object; someone else may have just packed it. */
if (!(flags & OBJECT_INFO_QUICK)) {
- reprepare_packed_git(the_repository);
- if (find_pack_entry(the_repository, real, &e))
+ reprepare_packed_git(r);
- if (find_pack_entry(r, real->hash, &e))
++ if (find_pack_entry(r, real, &e))
break;
}
o->result.timestamp.sec = o->src_index->timestamp.sec;
o->result.timestamp.nsec = o->src_index->timestamp.nsec;
o->result.version = o->src_index->version;
- o->result.split_index = o->src_index->split_index;
- if (o->result.split_index)
+ if (!o->src_index->split_index) {
+ o->result.split_index = NULL;
+ } else if (o->src_index == o->dst_index) {
+ /*
+ * o->dst_index (and thus o->src_index) will be discarded
+ * and overwritten with o->result at the end of this function,
+ * so just use src_index's split_index to avoid having to
+ * create a new one.
+ */
+ o->result.split_index = o->src_index->split_index;
o->result.split_index->refcount++;
- hashcpy(o->result.sha1, o->src_index->sha1);
+ } else {
+ o->result.split_index = init_split_index(&o->result);
+ }
+ oidcpy(&o->result.oid, &o->src_index->oid);
o->merge_size = len;
mark_all_ce_unused(o->src_index);
parse_list_objects_filter(&filter_options, arg);
continue;
}
+
if (!skip_prefix(line, "want ", &arg) ||
- get_oid_hex(arg, &oid_buf))
+ parse_oid_hex(arg, &oid_buf, &features))
die("git upload-pack: protocol error, "
- "expected to get sha, not '%s'", line);
-
- features = arg + 40;
+ "expected to get object ID, not '%s'", line);
if (parse_feature_request(features, "deepen-relative"))
deepen_relative = 1;