]> git.ipfire.org Git - thirdparty/git.git/blob - t/helper/test-lazy-init-name-hash.c
The fifth batch
[thirdparty/git.git] / t / helper / test-lazy-init-name-hash.c
1 #define USE_THE_INDEX_VARIABLE
2 #include "test-tool.h"
3 #include "environment.h"
4 #include "name-hash.h"
5 #include "parse-options.h"
6 #include "read-cache-ll.h"
7 #include "repository.h"
8 #include "setup.h"
9 #include "trace.h"
10
11 static int single;
12 static int multi;
13 static int count = 1;
14 static int dump;
15 static int perf;
16 static int analyze;
17 static int analyze_step;
18
19 /*
20 * Dump the contents of the "dir" and "name" hash tables to stdout.
21 * If you sort the result, you can compare it with the other type
22 * mode and verify that both single and multi produce the same set.
23 */
24 static void dump_run(void)
25 {
26 struct hashmap_iter iter_dir;
27 struct hashmap_iter iter_cache;
28
29 /* Stolen from name-hash.c */
30 struct dir_entry {
31 struct hashmap_entry ent;
32 struct dir_entry *parent;
33 int nr;
34 unsigned int namelen;
35 char name[FLEX_ARRAY];
36 };
37
38 struct dir_entry *dir;
39 struct cache_entry *ce;
40
41 repo_read_index(the_repository);
42 if (single) {
43 test_lazy_init_name_hash(&the_index, 0);
44 } else {
45 int nr_threads_used = test_lazy_init_name_hash(&the_index, 1);
46 if (!nr_threads_used)
47 die("non-threaded code path used");
48 }
49
50 hashmap_for_each_entry(&the_index.dir_hash, &iter_dir, dir,
51 ent /* member name */)
52 printf("dir %08x %7d %s\n", dir->ent.hash, dir->nr, dir->name);
53
54 hashmap_for_each_entry(&the_index.name_hash, &iter_cache, ce,
55 ent /* member name */)
56 printf("name %08x %s\n", ce->ent.hash, ce->name);
57
58 discard_index(&the_index);
59 }
60
61 /*
62 * Run the single or multi threaded version "count" times and
63 * report on the time taken.
64 */
65 static uint64_t time_runs(int try_threaded)
66 {
67 uint64_t t0, t1, t2;
68 uint64_t sum = 0;
69 uint64_t avg;
70 int nr_threads_used;
71 int i;
72
73 for (i = 0; i < count; i++) {
74 t0 = getnanotime();
75 repo_read_index(the_repository);
76 t1 = getnanotime();
77 nr_threads_used = test_lazy_init_name_hash(&the_index, try_threaded);
78 t2 = getnanotime();
79
80 sum += (t2 - t1);
81
82 if (try_threaded && !nr_threads_used)
83 die("non-threaded code path used");
84
85 if (nr_threads_used)
86 printf("%f %f %d multi %d\n",
87 ((double)(t1 - t0))/1000000000,
88 ((double)(t2 - t1))/1000000000,
89 the_index.cache_nr,
90 nr_threads_used);
91 else
92 printf("%f %f %d single\n",
93 ((double)(t1 - t0))/1000000000,
94 ((double)(t2 - t1))/1000000000,
95 the_index.cache_nr);
96 fflush(stdout);
97
98 discard_index(&the_index);
99 }
100
101 avg = sum / count;
102 if (count > 1)
103 printf("avg %f %s\n",
104 (double)avg/1000000000,
105 (try_threaded) ? "multi" : "single");
106
107 return avg;
108 }
109
110 /*
111 * Try a series of runs varying the "istate->cache_nr" and
112 * try to find a good value for the multi-threaded criteria.
113 */
114 static void analyze_run(void)
115 {
116 uint64_t t1s, t1m, t2s, t2m;
117 int cache_nr_limit;
118 int nr_threads_used = 0;
119 int i;
120 int nr;
121
122 repo_read_index(the_repository);
123 cache_nr_limit = the_index.cache_nr;
124 discard_index(&the_index);
125
126 nr = analyze;
127 while (1) {
128 uint64_t sum_single = 0;
129 uint64_t sum_multi = 0;
130 uint64_t avg_single;
131 uint64_t avg_multi;
132
133 if (nr > cache_nr_limit)
134 nr = cache_nr_limit;
135
136 for (i = 0; i < count; i++) {
137 repo_read_index(the_repository);
138 the_index.cache_nr = nr; /* cheap truncate of index */
139 t1s = getnanotime();
140 test_lazy_init_name_hash(&the_index, 0);
141 t2s = getnanotime();
142 sum_single += (t2s - t1s);
143 the_index.cache_nr = cache_nr_limit;
144 discard_index(&the_index);
145
146 repo_read_index(the_repository);
147 the_index.cache_nr = nr; /* cheap truncate of index */
148 t1m = getnanotime();
149 nr_threads_used = test_lazy_init_name_hash(&the_index, 1);
150 t2m = getnanotime();
151 sum_multi += (t2m - t1m);
152 the_index.cache_nr = cache_nr_limit;
153 discard_index(&the_index);
154
155 if (!nr_threads_used)
156 printf(" [size %8d] [single %f] non-threaded code path used\n",
157 nr, ((double)(t2s - t1s))/1000000000);
158 else
159 printf(" [size %8d] [single %f] %c [multi %f %d]\n",
160 nr,
161 ((double)(t2s - t1s))/1000000000,
162 (((t2s - t1s) < (t2m - t1m)) ? '<' : '>'),
163 ((double)(t2m - t1m))/1000000000,
164 nr_threads_used);
165 fflush(stdout);
166 }
167 if (count > 1) {
168 avg_single = sum_single / count;
169 avg_multi = sum_multi / count;
170 if (!nr_threads_used)
171 printf("avg [size %8d] [single %f]\n",
172 nr,
173 (double)avg_single/1000000000);
174 else
175 printf("avg [size %8d] [single %f] %c [multi %f %d]\n",
176 nr,
177 (double)avg_single/1000000000,
178 (avg_single < avg_multi ? '<' : '>'),
179 (double)avg_multi/1000000000,
180 nr_threads_used);
181 fflush(stdout);
182 }
183
184 if (nr >= cache_nr_limit)
185 return;
186 nr += analyze_step;
187 }
188 }
189
190 int cmd__lazy_init_name_hash(int argc, const char **argv)
191 {
192 const char *usage[] = {
193 "test-tool lazy-init-name-hash -d (-s | -m)",
194 "test-tool lazy-init-name-hash -p [-c c]",
195 "test-tool lazy-init-name-hash -a a [--step s] [-c c]",
196 "test-tool lazy-init-name-hash (-s | -m) [-c c]",
197 "test-tool lazy-init-name-hash -s -m [-c c]",
198 NULL
199 };
200 struct option options[] = {
201 OPT_BOOL('s', "single", &single, "run single-threaded code"),
202 OPT_BOOL('m', "multi", &multi, "run multi-threaded code"),
203 OPT_INTEGER('c', "count", &count, "number of passes"),
204 OPT_BOOL('d', "dump", &dump, "dump hash tables"),
205 OPT_BOOL('p', "perf", &perf, "compare single vs multi"),
206 OPT_INTEGER('a', "analyze", &analyze, "analyze different multi sizes"),
207 OPT_INTEGER(0, "step", &analyze_step, "analyze step factor"),
208 OPT_END(),
209 };
210 const char *prefix;
211 uint64_t avg_single, avg_multi;
212
213 prefix = setup_git_directory();
214
215 argc = parse_options(argc, argv, prefix, options, usage, 0);
216
217 /*
218 * istate->dir_hash is only created when ignore_case is set.
219 */
220 ignore_case = 1;
221
222 if (dump) {
223 if (perf || analyze > 0)
224 die("cannot combine dump, perf, or analyze");
225 if (count > 1)
226 die("count not valid with dump");
227 if (single && multi)
228 die("cannot use both single and multi with dump");
229 if (!single && !multi)
230 die("dump requires either single or multi");
231 dump_run();
232 return 0;
233 }
234
235 if (perf) {
236 if (analyze > 0)
237 die("cannot combine dump, perf, or analyze");
238 if (single || multi)
239 die("cannot use single or multi with perf");
240 avg_single = time_runs(0);
241 avg_multi = time_runs(1);
242 if (avg_multi > avg_single)
243 die("multi is slower");
244 return 0;
245 }
246
247 if (analyze) {
248 if (analyze < 500)
249 die("analyze must be at least 500");
250 if (!analyze_step)
251 analyze_step = analyze;
252 if (single || multi)
253 die("cannot use single or multi with analyze");
254 analyze_run();
255 return 0;
256 }
257
258 if (!single && !multi)
259 die("require either -s or -m or both");
260
261 if (single)
262 time_runs(0);
263 if (multi)
264 time_runs(1);
265
266 return 0;
267 }