]>
git.ipfire.org Git - thirdparty/git.git/blob - t/helper/test-lazy-init-name-hash.c
1 #define USE_THE_INDEX_VARIABLE
4 #include "environment.h"
5 #include "parse-options.h"
13 static int analyze_step
;
16 * Dump the contents of the "dir" and "name" hash tables to stdout.
17 * If you sort the result, you can compare it with the other type
18 * mode and verify that both single and multi produce the same set.
20 static void dump_run(void)
22 struct hashmap_iter iter_dir
;
23 struct hashmap_iter iter_cache
;
25 /* Stolen from name-hash.c */
27 struct hashmap_entry ent
;
28 struct dir_entry
*parent
;
31 char name
[FLEX_ARRAY
];
34 struct dir_entry
*dir
;
35 struct cache_entry
*ce
;
37 repo_read_index(the_repository
);
39 test_lazy_init_name_hash(&the_index
, 0);
41 int nr_threads_used
= test_lazy_init_name_hash(&the_index
, 1);
43 die("non-threaded code path used");
46 hashmap_for_each_entry(&the_index
.dir_hash
, &iter_dir
, dir
,
47 ent
/* member name */)
48 printf("dir %08x %7d %s\n", dir
->ent
.hash
, dir
->nr
, dir
->name
);
50 hashmap_for_each_entry(&the_index
.name_hash
, &iter_cache
, ce
,
51 ent
/* member name */)
52 printf("name %08x %s\n", ce
->ent
.hash
, ce
->name
);
54 discard_index(&the_index
);
58 * Run the single or multi threaded version "count" times and
59 * report on the time taken.
61 static uint64_t time_runs(int try_threaded
)
69 for (i
= 0; i
< count
; i
++) {
71 repo_read_index(the_repository
);
73 nr_threads_used
= test_lazy_init_name_hash(&the_index
, try_threaded
);
78 if (try_threaded
&& !nr_threads_used
)
79 die("non-threaded code path used");
82 printf("%f %f %d multi %d\n",
83 ((double)(t1
- t0
))/1000000000,
84 ((double)(t2
- t1
))/1000000000,
88 printf("%f %f %d single\n",
89 ((double)(t1
- t0
))/1000000000,
90 ((double)(t2
- t1
))/1000000000,
94 discard_index(&the_index
);
100 (double)avg
/1000000000,
101 (try_threaded
) ? "multi" : "single");
107 * Try a series of runs varying the "istate->cache_nr" and
108 * try to find a good value for the multi-threaded criteria.
110 static void analyze_run(void)
112 uint64_t t1s
, t1m
, t2s
, t2m
;
114 int nr_threads_used
= 0;
118 repo_read_index(the_repository
);
119 cache_nr_limit
= the_index
.cache_nr
;
120 discard_index(&the_index
);
124 uint64_t sum_single
= 0;
125 uint64_t sum_multi
= 0;
129 if (nr
> cache_nr_limit
)
132 for (i
= 0; i
< count
; i
++) {
133 repo_read_index(the_repository
);
134 the_index
.cache_nr
= nr
; /* cheap truncate of index */
136 test_lazy_init_name_hash(&the_index
, 0);
138 sum_single
+= (t2s
- t1s
);
139 the_index
.cache_nr
= cache_nr_limit
;
140 discard_index(&the_index
);
142 repo_read_index(the_repository
);
143 the_index
.cache_nr
= nr
; /* cheap truncate of index */
145 nr_threads_used
= test_lazy_init_name_hash(&the_index
, 1);
147 sum_multi
+= (t2m
- t1m
);
148 the_index
.cache_nr
= cache_nr_limit
;
149 discard_index(&the_index
);
151 if (!nr_threads_used
)
152 printf(" [size %8d] [single %f] non-threaded code path used\n",
153 nr
, ((double)(t2s
- t1s
))/1000000000);
155 printf(" [size %8d] [single %f] %c [multi %f %d]\n",
157 ((double)(t2s
- t1s
))/1000000000,
158 (((t2s
- t1s
) < (t2m
- t1m
)) ? '<' : '>'),
159 ((double)(t2m
- t1m
))/1000000000,
164 avg_single
= sum_single
/ count
;
165 avg_multi
= sum_multi
/ count
;
166 if (!nr_threads_used
)
167 printf("avg [size %8d] [single %f]\n",
169 (double)avg_single
/1000000000);
171 printf("avg [size %8d] [single %f] %c [multi %f %d]\n",
173 (double)avg_single
/1000000000,
174 (avg_single
< avg_multi
? '<' : '>'),
175 (double)avg_multi
/1000000000,
180 if (nr
>= cache_nr_limit
)
186 int cmd__lazy_init_name_hash(int argc
, const char **argv
)
188 const char *usage
[] = {
189 "test-tool lazy-init-name-hash -d (-s | -m)",
190 "test-tool lazy-init-name-hash -p [-c c]",
191 "test-tool lazy-init-name-hash -a a [--step s] [-c c]",
192 "test-tool lazy-init-name-hash (-s | -m) [-c c]",
193 "test-tool lazy-init-name-hash -s -m [-c c]",
196 struct option options
[] = {
197 OPT_BOOL('s', "single", &single
, "run single-threaded code"),
198 OPT_BOOL('m', "multi", &multi
, "run multi-threaded code"),
199 OPT_INTEGER('c', "count", &count
, "number of passes"),
200 OPT_BOOL('d', "dump", &dump
, "dump hash tables"),
201 OPT_BOOL('p', "perf", &perf
, "compare single vs multi"),
202 OPT_INTEGER('a', "analyze", &analyze
, "analyze different multi sizes"),
203 OPT_INTEGER(0, "step", &analyze_step
, "analyze step factor"),
207 uint64_t avg_single
, avg_multi
;
209 prefix
= setup_git_directory();
211 argc
= parse_options(argc
, argv
, prefix
, options
, usage
, 0);
214 * istate->dir_hash is only created when ignore_case is set.
219 if (perf
|| analyze
> 0)
220 die("cannot combine dump, perf, or analyze");
222 die("count not valid with dump");
224 die("cannot use both single and multi with dump");
225 if (!single
&& !multi
)
226 die("dump requires either single or multi");
233 die("cannot combine dump, perf, or analyze");
235 die("cannot use single or multi with perf");
236 avg_single
= time_runs(0);
237 avg_multi
= time_runs(1);
238 if (avg_multi
> avg_single
)
239 die("multi is slower");
245 die("analyze must be at least 500");
247 analyze_step
= analyze
;
249 die("cannot use single or multi with analyze");
254 if (!single
&& !multi
)
255 die("require either -s or -m or both");