]> git.ipfire.org Git - thirdparty/git.git/blob - t/helper/test-lazy-init-name-hash.c
mv: fix error for moving directory to another
[thirdparty/git.git] / t / helper / test-lazy-init-name-hash.c
1 #define USE_THE_INDEX_VARIABLE
2 #include "test-tool.h"
3 #include "cache.h"
4 #include "environment.h"
5 #include "parse-options.h"
6 #include "repository.h"
7 #include "setup.h"
8 #include "trace.h"
9
10 static int single;
11 static int multi;
12 static int count = 1;
13 static int dump;
14 static int perf;
15 static int analyze;
16 static int analyze_step;
17
18 /*
19 * Dump the contents of the "dir" and "name" hash tables to stdout.
20 * If you sort the result, you can compare it with the other type
21 * mode and verify that both single and multi produce the same set.
22 */
23 static void dump_run(void)
24 {
25 struct hashmap_iter iter_dir;
26 struct hashmap_iter iter_cache;
27
28 /* Stolen from name-hash.c */
29 struct dir_entry {
30 struct hashmap_entry ent;
31 struct dir_entry *parent;
32 int nr;
33 unsigned int namelen;
34 char name[FLEX_ARRAY];
35 };
36
37 struct dir_entry *dir;
38 struct cache_entry *ce;
39
40 repo_read_index(the_repository);
41 if (single) {
42 test_lazy_init_name_hash(&the_index, 0);
43 } else {
44 int nr_threads_used = test_lazy_init_name_hash(&the_index, 1);
45 if (!nr_threads_used)
46 die("non-threaded code path used");
47 }
48
49 hashmap_for_each_entry(&the_index.dir_hash, &iter_dir, dir,
50 ent /* member name */)
51 printf("dir %08x %7d %s\n", dir->ent.hash, dir->nr, dir->name);
52
53 hashmap_for_each_entry(&the_index.name_hash, &iter_cache, ce,
54 ent /* member name */)
55 printf("name %08x %s\n", ce->ent.hash, ce->name);
56
57 discard_index(&the_index);
58 }
59
60 /*
61 * Run the single or multi threaded version "count" times and
62 * report on the time taken.
63 */
64 static uint64_t time_runs(int try_threaded)
65 {
66 uint64_t t0, t1, t2;
67 uint64_t sum = 0;
68 uint64_t avg;
69 int nr_threads_used;
70 int i;
71
72 for (i = 0; i < count; i++) {
73 t0 = getnanotime();
74 repo_read_index(the_repository);
75 t1 = getnanotime();
76 nr_threads_used = test_lazy_init_name_hash(&the_index, try_threaded);
77 t2 = getnanotime();
78
79 sum += (t2 - t1);
80
81 if (try_threaded && !nr_threads_used)
82 die("non-threaded code path used");
83
84 if (nr_threads_used)
85 printf("%f %f %d multi %d\n",
86 ((double)(t1 - t0))/1000000000,
87 ((double)(t2 - t1))/1000000000,
88 the_index.cache_nr,
89 nr_threads_used);
90 else
91 printf("%f %f %d single\n",
92 ((double)(t1 - t0))/1000000000,
93 ((double)(t2 - t1))/1000000000,
94 the_index.cache_nr);
95 fflush(stdout);
96
97 discard_index(&the_index);
98 }
99
100 avg = sum / count;
101 if (count > 1)
102 printf("avg %f %s\n",
103 (double)avg/1000000000,
104 (try_threaded) ? "multi" : "single");
105
106 return avg;
107 }
108
109 /*
110 * Try a series of runs varying the "istate->cache_nr" and
111 * try to find a good value for the multi-threaded criteria.
112 */
113 static void analyze_run(void)
114 {
115 uint64_t t1s, t1m, t2s, t2m;
116 int cache_nr_limit;
117 int nr_threads_used = 0;
118 int i;
119 int nr;
120
121 repo_read_index(the_repository);
122 cache_nr_limit = the_index.cache_nr;
123 discard_index(&the_index);
124
125 nr = analyze;
126 while (1) {
127 uint64_t sum_single = 0;
128 uint64_t sum_multi = 0;
129 uint64_t avg_single;
130 uint64_t avg_multi;
131
132 if (nr > cache_nr_limit)
133 nr = cache_nr_limit;
134
135 for (i = 0; i < count; i++) {
136 repo_read_index(the_repository);
137 the_index.cache_nr = nr; /* cheap truncate of index */
138 t1s = getnanotime();
139 test_lazy_init_name_hash(&the_index, 0);
140 t2s = getnanotime();
141 sum_single += (t2s - t1s);
142 the_index.cache_nr = cache_nr_limit;
143 discard_index(&the_index);
144
145 repo_read_index(the_repository);
146 the_index.cache_nr = nr; /* cheap truncate of index */
147 t1m = getnanotime();
148 nr_threads_used = test_lazy_init_name_hash(&the_index, 1);
149 t2m = getnanotime();
150 sum_multi += (t2m - t1m);
151 the_index.cache_nr = cache_nr_limit;
152 discard_index(&the_index);
153
154 if (!nr_threads_used)
155 printf(" [size %8d] [single %f] non-threaded code path used\n",
156 nr, ((double)(t2s - t1s))/1000000000);
157 else
158 printf(" [size %8d] [single %f] %c [multi %f %d]\n",
159 nr,
160 ((double)(t2s - t1s))/1000000000,
161 (((t2s - t1s) < (t2m - t1m)) ? '<' : '>'),
162 ((double)(t2m - t1m))/1000000000,
163 nr_threads_used);
164 fflush(stdout);
165 }
166 if (count > 1) {
167 avg_single = sum_single / count;
168 avg_multi = sum_multi / count;
169 if (!nr_threads_used)
170 printf("avg [size %8d] [single %f]\n",
171 nr,
172 (double)avg_single/1000000000);
173 else
174 printf("avg [size %8d] [single %f] %c [multi %f %d]\n",
175 nr,
176 (double)avg_single/1000000000,
177 (avg_single < avg_multi ? '<' : '>'),
178 (double)avg_multi/1000000000,
179 nr_threads_used);
180 fflush(stdout);
181 }
182
183 if (nr >= cache_nr_limit)
184 return;
185 nr += analyze_step;
186 }
187 }
188
189 int cmd__lazy_init_name_hash(int argc, const char **argv)
190 {
191 const char *usage[] = {
192 "test-tool lazy-init-name-hash -d (-s | -m)",
193 "test-tool lazy-init-name-hash -p [-c c]",
194 "test-tool lazy-init-name-hash -a a [--step s] [-c c]",
195 "test-tool lazy-init-name-hash (-s | -m) [-c c]",
196 "test-tool lazy-init-name-hash -s -m [-c c]",
197 NULL
198 };
199 struct option options[] = {
200 OPT_BOOL('s', "single", &single, "run single-threaded code"),
201 OPT_BOOL('m', "multi", &multi, "run multi-threaded code"),
202 OPT_INTEGER('c', "count", &count, "number of passes"),
203 OPT_BOOL('d', "dump", &dump, "dump hash tables"),
204 OPT_BOOL('p', "perf", &perf, "compare single vs multi"),
205 OPT_INTEGER('a', "analyze", &analyze, "analyze different multi sizes"),
206 OPT_INTEGER(0, "step", &analyze_step, "analyze step factor"),
207 OPT_END(),
208 };
209 const char *prefix;
210 uint64_t avg_single, avg_multi;
211
212 prefix = setup_git_directory();
213
214 argc = parse_options(argc, argv, prefix, options, usage, 0);
215
216 /*
217 * istate->dir_hash is only created when ignore_case is set.
218 */
219 ignore_case = 1;
220
221 if (dump) {
222 if (perf || analyze > 0)
223 die("cannot combine dump, perf, or analyze");
224 if (count > 1)
225 die("count not valid with dump");
226 if (single && multi)
227 die("cannot use both single and multi with dump");
228 if (!single && !multi)
229 die("dump requires either single or multi");
230 dump_run();
231 return 0;
232 }
233
234 if (perf) {
235 if (analyze > 0)
236 die("cannot combine dump, perf, or analyze");
237 if (single || multi)
238 die("cannot use single or multi with perf");
239 avg_single = time_runs(0);
240 avg_multi = time_runs(1);
241 if (avg_multi > avg_single)
242 die("multi is slower");
243 return 0;
244 }
245
246 if (analyze) {
247 if (analyze < 500)
248 die("analyze must be at least 500");
249 if (!analyze_step)
250 analyze_step = analyze;
251 if (single || multi)
252 die("cannot use single or multi with analyze");
253 analyze_run();
254 return 0;
255 }
256
257 if (!single && !multi)
258 die("require either -s or -m or both");
259
260 if (single)
261 time_runs(0);
262 if (multi)
263 time_runs(1);
264
265 return 0;
266 }