]> git.ipfire.org Git - thirdparty/git.git/blame - t/helper/test-lazy-init-name-hash.c
Merge branch 'js/update-index-ignore-removal-for-skip-worktree'
[thirdparty/git.git] / t / helper / test-lazy-init-name-hash.c
CommitLineData
64eb82fe 1#include "test-tool.h"
ea194895
JH
2#include "cache.h"
3#include "parse-options.h"
4
5static int single;
6static int multi;
7static int count = 1;
8static int dump;
9static int perf;
10static int analyze;
11static int analyze_step;
12
13/*
14 * Dump the contents of the "dir" and "name" hash tables to stdout.
15 * If you sort the result, you can compare it with the other type
16 * mode and verify that both single and multi produce the same set.
17 */
18static void dump_run(void)
19{
20 struct hashmap_iter iter_dir;
21 struct hashmap_iter iter_cache;
22
23 /* Stolen from name-hash.c */
24 struct dir_entry {
25 struct hashmap_entry ent;
26 struct dir_entry *parent;
27 int nr;
28 unsigned int namelen;
29 char name[FLEX_ARRAY];
30 };
31
32 struct dir_entry *dir;
33 struct cache_entry *ce;
34
35 read_cache();
36 if (single) {
37 test_lazy_init_name_hash(&the_index, 0);
38 } else {
39 int nr_threads_used = test_lazy_init_name_hash(&the_index, 1);
40 if (!nr_threads_used)
41 die("non-threaded code path used");
42 }
43
87571c3f 44 hashmap_for_each_entry(&the_index.dir_hash, &iter_dir, dir,
23dee69f 45 ent /* member name */)
ea194895 46 printf("dir %08x %7d %s\n", dir->ent.hash, dir->nr, dir->name);
ea194895 47
87571c3f 48 hashmap_for_each_entry(&the_index.name_hash, &iter_cache, ce,
23dee69f 49 ent /* member name */)
ea194895 50 printf("name %08x %s\n", ce->ent.hash, ce->name);
ea194895
JH
51
52 discard_cache();
53}
54
55/*
56 * Run the single or multi threaded version "count" times and
57 * report on the time taken.
58 */
59static uint64_t time_runs(int try_threaded)
60{
61 uint64_t t0, t1, t2;
62 uint64_t sum = 0;
63 uint64_t avg;
64 int nr_threads_used;
65 int i;
66
67 for (i = 0; i < count; i++) {
68 t0 = getnanotime();
69 read_cache();
70 t1 = getnanotime();
71 nr_threads_used = test_lazy_init_name_hash(&the_index, try_threaded);
72 t2 = getnanotime();
73
74 sum += (t2 - t1);
75
76 if (try_threaded && !nr_threads_used)
77 die("non-threaded code path used");
78
79 if (nr_threads_used)
80 printf("%f %f %d multi %d\n",
81 ((double)(t1 - t0))/1000000000,
82 ((double)(t2 - t1))/1000000000,
83 the_index.cache_nr,
84 nr_threads_used);
85 else
86 printf("%f %f %d single\n",
87 ((double)(t1 - t0))/1000000000,
88 ((double)(t2 - t1))/1000000000,
89 the_index.cache_nr);
90 fflush(stdout);
91
92 discard_cache();
93 }
94
95 avg = sum / count;
96 if (count > 1)
97 printf("avg %f %s\n",
98 (double)avg/1000000000,
99 (try_threaded) ? "multi" : "single");
100
101 return avg;
102}
103
104/*
105 * Try a series of runs varying the "istate->cache_nr" and
106 * try to find a good value for the multi-threaded criteria.
107 */
108static void analyze_run(void)
109{
110 uint64_t t1s, t1m, t2s, t2m;
111 int cache_nr_limit;
74dea0e1 112 int nr_threads_used = 0;
ea194895
JH
113 int i;
114 int nr;
115
116 read_cache();
117 cache_nr_limit = the_index.cache_nr;
118 discard_cache();
119
120 nr = analyze;
121 while (1) {
122 uint64_t sum_single = 0;
123 uint64_t sum_multi = 0;
124 uint64_t avg_single;
125 uint64_t avg_multi;
126
127 if (nr > cache_nr_limit)
128 nr = cache_nr_limit;
129
130 for (i = 0; i < count; i++) {
131 read_cache();
132 the_index.cache_nr = nr; /* cheap truncate of index */
133 t1s = getnanotime();
134 test_lazy_init_name_hash(&the_index, 0);
135 t2s = getnanotime();
136 sum_single += (t2s - t1s);
137 the_index.cache_nr = cache_nr_limit;
138 discard_cache();
139
140 read_cache();
141 the_index.cache_nr = nr; /* cheap truncate of index */
142 t1m = getnanotime();
143 nr_threads_used = test_lazy_init_name_hash(&the_index, 1);
144 t2m = getnanotime();
145 sum_multi += (t2m - t1m);
146 the_index.cache_nr = cache_nr_limit;
147 discard_cache();
148
149 if (!nr_threads_used)
150 printf(" [size %8d] [single %f] non-threaded code path used\n",
151 nr, ((double)(t2s - t1s))/1000000000);
152 else
153 printf(" [size %8d] [single %f] %c [multi %f %d]\n",
154 nr,
155 ((double)(t2s - t1s))/1000000000,
156 (((t2s - t1s) < (t2m - t1m)) ? '<' : '>'),
157 ((double)(t2m - t1m))/1000000000,
158 nr_threads_used);
159 fflush(stdout);
160 }
161 if (count > 1) {
162 avg_single = sum_single / count;
163 avg_multi = sum_multi / count;
164 if (!nr_threads_used)
165 printf("avg [size %8d] [single %f]\n",
166 nr,
167 (double)avg_single/1000000000);
168 else
169 printf("avg [size %8d] [single %f] %c [multi %f %d]\n",
170 nr,
171 (double)avg_single/1000000000,
172 (avg_single < avg_multi ? '<' : '>'),
173 (double)avg_multi/1000000000,
174 nr_threads_used);
175 fflush(stdout);
176 }
177
178 if (nr >= cache_nr_limit)
179 return;
180 nr += analyze_step;
181 }
182}
183
64eb82fe 184int cmd__lazy_init_name_hash(int argc, const char **argv)
ea194895
JH
185{
186 const char *usage[] = {
64eb82fe
NTND
187 "test-tool lazy-init-name-hash -d (-s | -m)",
188 "test-tool lazy-init-name-hash -p [-c c]",
189 "test-tool lazy-init-name-hash -a a [--step s] [-c c]",
190 "test-tool lazy-init-name-hash (-s | -m) [-c c]",
191 "test-tool lazy-init-name-hash -s -m [-c c]",
ea194895
JH
192 NULL
193 };
194 struct option options[] = {
195 OPT_BOOL('s', "single", &single, "run single-threaded code"),
196 OPT_BOOL('m', "multi", &multi, "run multi-threaded code"),
197 OPT_INTEGER('c', "count", &count, "number of passes"),
198 OPT_BOOL('d', "dump", &dump, "dump hash tables"),
199 OPT_BOOL('p', "perf", &perf, "compare single vs multi"),
200 OPT_INTEGER('a', "analyze", &analyze, "analyze different multi sizes"),
201 OPT_INTEGER(0, "step", &analyze_step, "analyze step factor"),
202 OPT_END(),
203 };
204 const char *prefix;
205 uint64_t avg_single, avg_multi;
206
207 prefix = setup_git_directory();
208
209 argc = parse_options(argc, argv, prefix, options, usage, 0);
210
211 /*
212 * istate->dir_hash is only created when ignore_case is set.
213 */
214 ignore_case = 1;
215
216 if (dump) {
217 if (perf || analyze > 0)
218 die("cannot combine dump, perf, or analyze");
219 if (count > 1)
220 die("count not valid with dump");
221 if (single && multi)
222 die("cannot use both single and multi with dump");
223 if (!single && !multi)
224 die("dump requires either single or multi");
225 dump_run();
226 return 0;
227 }
228
229 if (perf) {
230 if (analyze > 0)
231 die("cannot combine dump, perf, or analyze");
232 if (single || multi)
233 die("cannot use single or multi with perf");
234 avg_single = time_runs(0);
235 avg_multi = time_runs(1);
236 if (avg_multi > avg_single)
237 die("multi is slower");
238 return 0;
239 }
240
241 if (analyze) {
242 if (analyze < 500)
243 die("analyze must be at least 500");
244 if (!analyze_step)
245 analyze_step = analyze;
246 if (single || multi)
247 die("cannot use single or multi with analyze");
248 analyze_run();
249 return 0;
250 }
251
252 if (!single && !multi)
253 die("require either -s or -m or both");
254
255 if (single)
256 time_runs(0);
257 if (multi)
258 time_runs(1);
259
260 return 0;
261}