the_hash_algo->init_fn(&tmp_ctx);
the_hash_algo->clone_fn(&tmp_ctx, &input_ctx);
the_hash_algo->final_fn(hash, &tmp_ctx);
- if (!hasheq(fill(the_hash_algo->rawsz), hash))
+ if (!hasheq(fill(the_hash_algo->rawsz), hash, the_repository->hash_algo))
die(_("pack is corrupted (SHA1 mismatch)"));
use(the_hash_algo->rawsz);
stop_progress_msg(&progress, msg.buf);
strbuf_release(&msg);
finalize_hashfile(f, tail_hash, FSYNC_COMPONENT_PACK, 0);
- hashcpy(read_hash, pack_hash);
+ hashcpy(read_hash, pack_hash, the_repository->hash_algo);
fixup_pack_header_footer(output_fd, pack_hash,
curr_pack, nr_objects,
read_hash, consumed_bytes-the_hash_algo->rawsz);
- if (!hasheq(read_hash, tail_hash))
+ if (!hasheq(read_hash, tail_hash, the_repository->hash_algo))
die(_("Unexpected tail checksum for %s "
"(disk corruption?)"), curr_pack);
}
l = (hint == NULL) ? list->front : hint;
prev = NULL;
while (l) {
- const int cmp = hashcmp(l->oid.hash, oid);
+ const int cmp = hashcmp(l->oid.hash, oid, the_repository->hash_algo);
if (cmp > 0) /* not in list, since sorted */
return prev;
if (!cmp) { /* found */
while (p1_off < p1->pack->num_objects * p1_step &&
p2_off < p2->pack->num_objects * p2_step)
{
- const int cmp = hashcmp(p1_base + p1_off, p2_base + p2_off);
+ const int cmp = hashcmp(p1_base + p1_off, p2_base + p2_off,
+ the_repository->hash_algo);
/* cmp ~ p1 - p2 */
if (cmp == 0) {
p1_hint = llist_sorted_remove(p1->unique_objects,
while (p1_off < p1->num_objects * p1_step &&
p2_off < p2->num_objects * p2_step)
{
- int cmp = hashcmp(p1_base + p1_off, p2_base + p2_off);
+ int cmp = hashcmp(p1_base + p1_off, p2_base + p2_off,
+ the_repository->hash_algo);
/* cmp ~ p1 - p2 */
if (cmp == 0) {
ret++;
if (fsck_finish(&fsck_options))
die(_("fsck error in pack objects"));
}
- if (!hasheq(fill(the_hash_algo->rawsz), oid.hash))
+ if (!hasheq(fill(the_hash_algo->rawsz), oid.hash,
+ the_repository->hash_algo))
die("final sha1 did not match");
use(the_hash_algo->rawsz);
if (!cur_g ||
!oideq(&oids[n], &cur_g->oid) ||
- !hasheq(oids[n].hash, g->chunk_base_graphs + st_mult(g->hash_len, n))) {
+ !hasheq(oids[n].hash, g->chunk_base_graphs + st_mult(g->hash_len, n),
+ the_repository->hash_algo)) {
warning(_("commit-graph chain does not match"));
return 0;
}
hashflush(f);
if (f->skip_hash)
- hashclr(f->buffer);
+ hashclr(f->buffer, the_repository->hash_algo);
else
the_hash_algo->final_fn(f->buffer, &f->ctx);
if (result)
- hashcpy(result, f->buffer);
+ hashcpy(result, f->buffer, the_repository->hash_algo);
if (flags & CSUM_HASH_IN_STREAM)
flush(f, f->buffer, the_hash_algo->rawsz);
if (flags & CSUM_FSYNC)
the_hash_algo->update_fn(&ctx, data, data_len);
the_hash_algo->final_fn(got, &ctx);
- return hasheq(got, data + data_len);
+ return hasheq(got, data + data_len, the_repository->hash_algo);
}
const struct object_id *null_oid(void);
-static inline int hashcmp_algop(const unsigned char *sha1, const unsigned char *sha2, const struct git_hash_algo *algop)
+static inline int hashcmp(const unsigned char *sha1, const unsigned char *sha2, const struct git_hash_algo *algop)
{
/*
* Teach the compiler that there are only two possibilities of hash size
return memcmp(sha1, sha2, GIT_SHA1_RAWSZ);
}
-static inline int hasheq_algop(const unsigned char *sha1, const unsigned char *sha2, const struct git_hash_algo *algop)
+static inline int hasheq(const unsigned char *sha1, const unsigned char *sha2, const struct git_hash_algo *algop)
{
/*
* We write this here instead of deferring to hashcmp so that the
return !memcmp(sha1, sha2, GIT_SHA1_RAWSZ);
}
+static inline void hashcpy(unsigned char *sha_dst, const unsigned char *sha_src,
+ const struct git_hash_algo *algop)
+{
+ memcpy(sha_dst, sha_src, algop->rawsz);
+}
+
+static inline void hashclr(unsigned char *hash, const struct git_hash_algo *algop)
+{
+ memset(hash, 0, algop->rawsz);
+}
+
static inline void oidcpy(struct object_id *dst, const struct object_id *src)
{
memcpy(dst->hash, src->hash, GIT_MAX_RAWSZ);
while (lo < hi) {
unsigned mi = lo + (hi - lo) / 2;
- int cmp = hashcmp(table + mi * stride, hash);
+ int cmp = hashcmp(table + mi * stride, hash,
+ the_repository->hash_algo);
if (!cmp) {
if (result)
#define the_hash_algo the_repository->hash_algo
-static inline int hashcmp(const unsigned char *sha1, const unsigned char *sha2)
-{
- return hashcmp_algop(sha1, sha2, the_hash_algo);
-}
-
static inline int oidcmp(const struct object_id *oid1, const struct object_id *oid2)
{
const struct git_hash_algo *algop;
algop = the_hash_algo;
else
algop = &hash_algos[oid1->algo];
- return hashcmp_algop(oid1->hash, oid2->hash, algop);
-}
-
-static inline int hasheq(const unsigned char *sha1, const unsigned char *sha2)
-{
- return hasheq_algop(sha1, sha2, the_hash_algo);
+ return hashcmp(oid1->hash, oid2->hash, algop);
}
static inline int oideq(const struct object_id *oid1, const struct object_id *oid2)
algop = the_hash_algo;
else
algop = &hash_algos[oid1->algo];
- return hasheq_algop(oid1->hash, oid2->hash, algop);
+ return hasheq(oid1->hash, oid2->hash, algop);
}
static inline int is_null_oid(const struct object_id *oid)
return oideq(oid, null_oid());
}
-static inline void hashcpy(unsigned char *sha_dst, const unsigned char *sha_src)
-{
- memcpy(sha_dst, sha_src, the_hash_algo->rawsz);
-}
-
/* Like oidcpy() but zero-pads the unused bytes in dst's hash array. */
static inline void oidcpy_with_padding(struct object_id *dst,
const struct object_id *src)
dst->algo = src->algo;
}
-static inline void hashclr(unsigned char *hash)
-{
- memset(hash, 0, the_hash_algo->rawsz);
-}
-
static inline void oidclr(struct object_id *oid)
{
memset(oid->hash, 0, GIT_MAX_RAWSZ);
list_for_each(pos, head) {
obj_req = list_entry(pos, struct object_request, node);
- if (hasheq(obj_req->oid.hash, hash))
+ if (hasheq(obj_req->oid.hash, hash, the_repository->hash_algo))
break;
}
if (!obj_req)
} else {
rewrite_with = oid2;
}
- hashcpy(rewrite_here, rewrite_with->hash);
+ hashcpy(rewrite_here, rewrite_with->hash, the_repository->hash_algo);
status = write_object_file(buf, sz, OBJ_TREE, result);
free(buf);
return status;
void **p = note_tree_search(t, &tree, &n, key_sha1);
if (GET_PTR_TYPE(*p) == PTR_TYPE_NOTE) {
struct leaf_node *l = (struct leaf_node *) CLR_PTR_TYPE(*p);
- if (hasheq(key_sha1, l->key_oid.hash))
+ if (hasheq(key_sha1, l->key_oid.hash, the_repository->hash_algo))
return l;
}
return NULL;
void bitmap_writer_set_checksum(struct bitmap_writer *writer,
const unsigned char *sha1)
{
- hashcpy(writer->pack_checksum, sha1);
+ hashcpy(writer->pack_checksum, sha1, the_repository->hash_algo);
}
void bitmap_writer_finish(struct bitmap_writer *writer,
header.version = htons(default_version);
header.options = htons(flags | options);
header.entry_count = htonl(writer->selected_nr);
- hashcpy(header.checksum, writer->pack_checksum);
+ hashcpy(header.checksum, writer->pack_checksum, the_repository->hash_algo);
hashwrite(f, &header, sizeof(header) - GIT_MAX_RAWSZ + the_hash_algo->rawsz);
dump_bitmap(f, writer->commits);
if (load_bitmap_header(bitmap_git) < 0)
goto cleanup;
- if (!hasheq(get_midx_checksum(bitmap_git->midx), bitmap_git->checksum)) {
+ if (!hasheq(get_midx_checksum(bitmap_git->midx), bitmap_git->checksum,
+ the_repository->hash_algo)) {
error(_("checksum doesn't match in MIDX and bitmap"));
goto cleanup;
}
} while (offset < pack_sig_ofs);
r->hash_algo->final_fn(hash, &ctx);
pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL);
- if (!hasheq(hash, pack_sig))
+ if (!hasheq(hash, pack_sig, the_repository->hash_algo))
err = error("%s pack checksum mismatch",
p->pack_name);
- if (!hasheq(index_base + index_size - r->hash_algo->hexsz, pack_sig))
+ if (!hasheq(index_base + index_size - r->hash_algo->hexsz, pack_sig,
+ the_repository->hash_algo))
err = error("%s pack checksum does not match its index",
p->pack_name);
unuse_pack(w_curs);
if (partial_pack_offset == 0) {
unsigned char hash[GIT_MAX_RAWSZ];
the_hash_algo->final_fn(hash, &old_hash_ctx);
- if (!hasheq(hash, partial_pack_hash))
+ if (!hasheq(hash, partial_pack_hash,
+ the_repository->hash_algo))
die("Unexpected checksum for %s "
"(disk corruption?)", pack_name);
/*
struct packed_git *p = alloc_packed_git(alloc);
memcpy(p->pack_name, path, alloc); /* includes NUL */
- hashcpy(p->hash, sha1);
+ hashcpy(p->hash, sha1, the_repository->hash_algo);
if (check_packed_git_idx(idx_path, p)) {
free(p);
return NULL;
if (read_result != hashsz)
return error("packfile %s signature is unavailable", p->pack_name);
idx_hash = ((unsigned char *)p->index_data) + p->index_size - hashsz * 2;
- if (!hasheq(hash, idx_hash))
+ if (!hasheq(hash, idx_hash, the_repository->hash_algo))
return error("packfile %s does not match index", p->pack_name);
return 0;
}
p->mtime = st.st_mtime;
if (path_len < the_hash_algo->hexsz ||
get_hash_hex(path + path_len - the_hash_algo->hexsz, p->hash))
- hashclr(p->hash);
+ hashclr(p->hash, the_repository->hash_algo);
return p;
}
return 0;
}
- hashcpy(oid.hash, sha1);
+ hashcpy(oid.hash, sha1, the_repository->hash_algo);
if (bsearch_pack(&oid, p, &result))
return nth_packed_object_offset(p, result);
return 0;
the_hash_algo->init_fn(&c);
the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
the_hash_algo->final_fn(hash, &c);
- if (!hasheq(hash, start))
+ if (!hasheq(hash, start, the_repository->hash_algo))
return error(_("bad index file sha1 signature"));
return 0;
}
ondisk->uid = htonl(ce->ce_stat_data.sd_uid);
ondisk->gid = htonl(ce->ce_stat_data.sd_gid);
ondisk->size = htonl(ce->ce_stat_data.sd_size);
- hashcpy(ondisk->data, ce->oid.hash);
+ hashcpy(ondisk->data, ce->oid.hash, the_repository->hash_algo);
flags = ce->ce_flags & ~CE_NAMEMASK;
flags |= (ce_namelen(ce) >= CE_NAMEMASK ? CE_NAMEMASK : ce_namelen(ce));
if (n != the_hash_algo->rawsz)
goto out;
- if (!hasheq(istate->oid.hash, hash))
+ if (!hasheq(istate->oid.hash, hash, the_repository->hash_algo))
goto out;
close(fd);
src_offset += extsize;
}
the_hash_algo->final_fn(hash, &c);
- if (!hasheq(hash, (const unsigned char *)index))
+ if (!hasheq(hash, (const unsigned char *)index, the_repository->hash_algo))
return 0;
/* Validate that the extension offsets returned us back to the eoie extension. */