From: Junio C Hamano Date: Mon, 17 Sep 2018 20:53:57 +0000 (-0700) Subject: Merge branch 'jk/cocci' X-Git-Tag: v2.20.0-rc0~237 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=769af0fd9ea0b4de172b51ae4b9ea41b57c02fd3;p=thirdparty%2Fgit.git Merge branch 'jk/cocci' spatch transformation to replace boolean uses of !hashcmp() to newly introduced oideq() is added, and applied, to regain performance lost due to support of multiple hash algorithms. * jk/cocci: show_dirstat: simplify same-content check read-cache: use oideq() in ce_compare functions convert hashmap comparison functions to oideq() convert "hashcmp() != 0" to "!hasheq()" convert "oidcmp() != 0" to "!oideq()" convert "hashcmp() == 0" to hasheq() convert "oidcmp() == 0" to oideq() introduce hasheq() and oideq() coccinelle: use <...> for function exclusion --- 769af0fd9ea0b4de172b51ae4b9ea41b57c02fd3 diff --cc builtin/log.c index f09a5789f8,98d668b56f..1dbb9d829b --- a/builtin/log.c +++ b/builtin/log.c @@@ -1764,8 -1703,8 +1764,8 @@@ int cmd_format_patch(int argc, const ch /* Don't say anything if head and upstream are the same. */ if (rev.pending.nr == 2) { struct object_array_entry *o = rev.pending.objects; - if (oidcmp(&o[0].item->oid, &o[1].item->oid) == 0) + if (oideq(&o[0].item->oid, &o[1].item->oid)) - return 0; + goto done; } get_patch_ids(&rev, &ids); } diff --cc unpack-trees.c index 55f864ac57,4056a92d55..65f157b9de --- a/unpack-trees.c +++ b/unpack-trees.c @@@ -679,116 -630,9 +679,116 @@@ static int switch_cache_bottom(struct t static inline int are_same_oid(struct name_entry *name_j, struct name_entry *name_k) { - return name_j->oid && name_k->oid && !oidcmp(name_j->oid, name_k->oid); + return name_j->oid && name_k->oid && oideq(name_j->oid, name_k->oid); } +static int all_trees_same_as_cache_tree(int n, unsigned long dirmask, + struct name_entry *names, + struct traverse_info *info) +{ + struct unpack_trees_options *o = info->data; + int i; + + if (!o->merge || dirmask != ((1 << n) - 1)) + return 0; + + for (i = 1; i < n; i++) + if (!are_same_oid(names, names + i)) + return 0; + + return cache_tree_matches_traversal(o->src_index->cache_tree, names, info); +} + +static int index_pos_by_traverse_info(struct name_entry *names, + struct traverse_info *info) +{ + struct unpack_trees_options *o = info->data; + int len = traverse_path_len(info, names); + char *name = xmalloc(len + 1 /* slash */ + 1 /* NUL */); + int pos; + + make_traverse_path(name, info, names); + name[len++] = '/'; + name[len] = '\0'; + pos = index_name_pos(o->src_index, name, len); + if (pos >= 0) + BUG("This is a directory and should not exist in index"); + pos = -pos - 1; + if (!starts_with(o->src_index->cache[pos]->name, name) || + (pos > 0 && starts_with(o->src_index->cache[pos-1]->name, name))) + BUG("pos must point at the first entry in this directory"); + free(name); + return pos; +} + +/* + * Fast path if we detect that all trees are the same as cache-tree at this + * path. We'll walk these trees in an iterative loop using cache-tree/index + * instead of ODB since we already know what these trees contain. + */ +static int traverse_by_cache_tree(int pos, int nr_entries, int nr_names, + struct name_entry *names, + struct traverse_info *info) +{ + struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; + struct unpack_trees_options *o = info->data; + struct cache_entry *tree_ce = NULL; + int ce_len = 0; + int i, d; + + if (!o->merge) + BUG("We need cache-tree to do this optimization"); + + /* + * Do what unpack_callback() and unpack_nondirectories() normally + * do. But we walk all paths in an iterative loop instead. + * + * D/F conflicts and higher stage entries are not a concern + * because cache-tree would be invalidated and we would never + * get here in the first place. + */ + for (i = 0; i < nr_entries; i++) { + int new_ce_len, len, rc; + + src[0] = o->src_index->cache[pos + i]; + + len = ce_namelen(src[0]); + new_ce_len = cache_entry_size(len); + + if (new_ce_len > ce_len) { + new_ce_len <<= 1; + tree_ce = xrealloc(tree_ce, new_ce_len); + memset(tree_ce, 0, new_ce_len); + ce_len = new_ce_len; + + tree_ce->ce_flags = create_ce_flags(0); + + for (d = 1; d <= nr_names; d++) + src[d] = tree_ce; + } + + tree_ce->ce_mode = src[0]->ce_mode; + tree_ce->ce_namelen = len; + oidcpy(&tree_ce->oid, &src[0]->oid); + memcpy(tree_ce->name, src[0]->name, len + 1); + + rc = call_unpack_fn((const struct cache_entry * const *)src, o); + if (rc < 0) { + free(tree_ce); + return rc; + } + + mark_ce_used(src[0], o); + } + free(tree_ce); + if (o->debug_unpack) + printf("Unpacked %d entries from %s to %s using cache-tree\n", + nr_entries, + o->src_index->cache[pos]->name, + o->src_index->cache[pos + nr_entries - 1]->name); + return 0; +} + static int traverse_trees_recursive(int n, unsigned long dirmask, unsigned long df_conflicts, struct name_entry *names,