]>
Commit | Line | Data |
---|---|---|
96872bc2 LT |
1 | /* |
2 | * name-hash.c | |
3 | * | |
4 | * Hashing names in the index state | |
5 | * | |
6 | * Copyright (C) 2008 Linus Torvalds | |
7 | */ | |
96872bc2 | 8 | #include "cache.h" |
07642942 | 9 | #include "thread-utils.h" |
6a9372f4 | 10 | #include "trace2.h" |
4589bca8 | 11 | #include "sparse-index.h" |
96872bc2 | 12 | |
2092678c | 13 | struct dir_entry { |
e05881a4 | 14 | struct hashmap_entry ent; |
2092678c | 15 | struct dir_entry *parent; |
2092678c KB |
16 | int nr; |
17 | unsigned int namelen; | |
41284eb0 | 18 | char name[FLEX_ARRAY]; |
2092678c KB |
19 | }; |
20 | ||
7663cdc8 | 21 | static int dir_entry_cmp(const void *unused_cmp_data, |
939af16e EW |
22 | const struct hashmap_entry *eptr, |
23 | const struct hashmap_entry *entry_or_key, | |
56a14ea7 | 24 | const void *keydata) |
e05881a4 | 25 | { |
939af16e | 26 | const struct dir_entry *e1, *e2; |
56a14ea7 SB |
27 | const char *name = keydata; |
28 | ||
939af16e EW |
29 | e1 = container_of(eptr, const struct dir_entry, ent); |
30 | e2 = container_of(entry_or_key, const struct dir_entry, ent); | |
31 | ||
41284eb0 DT |
32 | return e1->namelen != e2->namelen || strncasecmp(e1->name, |
33 | name ? name : e2->name, e1->namelen); | |
e05881a4 KB |
34 | } |
35 | ||
846df809 JH |
36 | static struct dir_entry *find_dir_entry__hash(struct index_state *istate, |
37 | const char *name, unsigned int namelen, unsigned int hash) | |
2092678c | 38 | { |
e05881a4 | 39 | struct dir_entry key; |
d22245a2 | 40 | hashmap_entry_init(&key.ent, hash); |
e05881a4 | 41 | key.namelen = namelen; |
404ab78e | 42 | return hashmap_get_entry(&istate->dir_hash, &key, ent, name); |
2092678c KB |
43 | } |
44 | ||
846df809 JH |
45 | static struct dir_entry *find_dir_entry(struct index_state *istate, |
46 | const char *name, unsigned int namelen) | |
47 | { | |
48 | return find_dir_entry__hash(istate, name, namelen, memihash(name, namelen)); | |
49 | } | |
50 | ||
2092678c KB |
51 | static struct dir_entry *hash_dir_entry(struct index_state *istate, |
52 | struct cache_entry *ce, int namelen) | |
5102c617 JJ |
53 | { |
54 | /* | |
55 | * Throw each directory component in the hash for quick lookup | |
d28eec26 | 56 | * during a git status. Directory components are stored without their |
5102c617 | 57 | * closing slash. Despite submodules being a directory, they never |
d28eec26 | 58 | * reach this point, because they are stored |
2092678c | 59 | * in index_state.name_hash (as ordinary cache_entries). |
5102c617 | 60 | */ |
2092678c KB |
61 | struct dir_entry *dir; |
62 | ||
63 | /* get length of parent directory */ | |
64 | while (namelen > 0 && !is_dir_sep(ce->name[namelen - 1])) | |
65 | namelen--; | |
66 | if (namelen <= 0) | |
67 | return NULL; | |
d28eec26 | 68 | namelen--; |
2092678c KB |
69 | |
70 | /* lookup existing entry for that directory */ | |
71 | dir = find_dir_entry(istate, ce->name, namelen); | |
72 | if (!dir) { | |
73 | /* not found, create it and add to hash table */ | |
96ffc06f | 74 | FLEX_ALLOC_MEM(dir, name, ce->name, namelen); |
d22245a2 | 75 | hashmap_entry_init(&dir->ent, memihash(ce->name, namelen)); |
2092678c | 76 | dir->namelen = namelen; |
b94e5c1d | 77 | hashmap_add(&istate->dir_hash, &dir->ent); |
2092678c KB |
78 | |
79 | /* recursively add missing parent directories */ | |
d28eec26 | 80 | dir->parent = hash_dir_entry(istate, ce, namelen); |
5102c617 | 81 | } |
2092678c KB |
82 | return dir; |
83 | } | |
84 | ||
85 | static void add_dir_entry(struct index_state *istate, struct cache_entry *ce) | |
86 | { | |
87 | /* Add reference to the directory entry (and parents if 0). */ | |
88 | struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce)); | |
89 | while (dir && !(dir->nr++)) | |
90 | dir = dir->parent; | |
91 | } | |
92 | ||
93 | static void remove_dir_entry(struct index_state *istate, struct cache_entry *ce) | |
94 | { | |
95 | /* | |
1c8cca19 KB |
96 | * Release reference to the directory entry. If 0, remove and continue |
97 | * with parent directory. | |
2092678c KB |
98 | */ |
99 | struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce)); | |
1c8cca19 KB |
100 | while (dir && !(--dir->nr)) { |
101 | struct dir_entry *parent = dir->parent; | |
28ee7941 | 102 | hashmap_remove(&istate->dir_hash, &dir->ent, NULL); |
1c8cca19 KB |
103 | free(dir); |
104 | dir = parent; | |
105 | } | |
5102c617 JJ |
106 | } |
107 | ||
96872bc2 LT |
108 | static void hash_index_entry(struct index_state *istate, struct cache_entry *ce) |
109 | { | |
96872bc2 LT |
110 | if (ce->ce_flags & CE_HASHED) |
111 | return; | |
112 | ce->ce_flags |= CE_HASHED; | |
5f116695 DS |
113 | |
114 | if (!S_ISSPARSEDIR(ce->ce_mode)) { | |
115 | hashmap_entry_init(&ce->ent, memihash(ce->name, ce_namelen(ce))); | |
116 | hashmap_add(&istate->name_hash, &ce->ent); | |
117 | } | |
5102c617 | 118 | |
419a597f | 119 | if (ignore_case) |
2092678c | 120 | add_dir_entry(istate, ce); |
96872bc2 LT |
121 | } |
122 | ||
7663cdc8 | 123 | static int cache_entry_cmp(const void *unused_cmp_data, |
939af16e EW |
124 | const struct hashmap_entry *eptr, |
125 | const struct hashmap_entry *entry_or_key, | |
7663cdc8 | 126 | const void *remove) |
419a597f | 127 | { |
939af16e EW |
128 | const struct cache_entry *ce1, *ce2; |
129 | ||
130 | ce1 = container_of(eptr, const struct cache_entry, ent); | |
131 | ce2 = container_of(entry_or_key, const struct cache_entry, ent); | |
132 | ||
419a597f KB |
133 | /* |
134 | * For remove_name_hash, find the exact entry (pointer equality); for | |
7b359ea6 | 135 | * index_file_exists, find all entries with matching hash code and |
419a597f KB |
136 | * decide whether the entry matches in same_name. |
137 | */ | |
138 | return remove ? !(ce1 == ce2) : 0; | |
139 | } | |
140 | ||
846df809 JH |
141 | static int lazy_try_threaded = 1; |
142 | static int lazy_nr_dir_threads; | |
143 | ||
846df809 JH |
144 | /* |
145 | * Set a minimum number of cache_entries that we will handle per | |
15beaaa3 | 146 | * thread and use that to decide how many threads to run (up to |
846df809 JH |
147 | * the number on the system). |
148 | * | |
149 | * For guidance setting the lower per-thread bound, see: | |
150 | * t/helper/test-lazy-init-name-hash --analyze | |
151 | */ | |
152 | #define LAZY_THREAD_COST (2000) | |
153 | ||
154 | /* | |
155 | * We use n mutexes to guard n partitions of the "istate->dir_hash" | |
156 | * hashtable. Since "find" and "insert" operations will hash to a | |
157 | * particular bucket and modify/search a single chain, we can say | |
158 | * that "all chains mod n" are guarded by the same mutex -- rather | |
159 | * than having a single mutex to guard the entire table. (This does | |
160 | * require that we disable "rehashing" on the hashtable.) | |
161 | * | |
162 | * So, a larger value here decreases the probability of a collision | |
163 | * and the time that each thread must wait for the mutex. | |
164 | */ | |
165 | #define LAZY_MAX_MUTEX (32) | |
166 | ||
167 | static pthread_mutex_t *lazy_dir_mutex_array; | |
168 | ||
169 | /* | |
170 | * An array of lazy_entry items is used by the n threads in | |
171 | * the directory parse (first) phase to (lock-free) store the | |
172 | * intermediate results. These values are then referenced by | |
173 | * the 2 threads in the second phase. | |
174 | */ | |
175 | struct lazy_entry { | |
176 | struct dir_entry *dir; | |
177 | unsigned int hash_dir; | |
178 | unsigned int hash_name; | |
179 | }; | |
180 | ||
181 | /* | |
182 | * Decide if we want to use threads (if available) to load | |
183 | * the hash tables. We set "lazy_nr_dir_threads" to zero when | |
184 | * it is not worth it. | |
185 | */ | |
186 | static int lookup_lazy_params(struct index_state *istate) | |
187 | { | |
188 | int nr_cpus; | |
189 | ||
190 | lazy_nr_dir_threads = 0; | |
191 | ||
192 | if (!lazy_try_threaded) | |
193 | return 0; | |
194 | ||
195 | /* | |
196 | * If we are respecting case, just use the original | |
197 | * code to build the "istate->name_hash". We don't | |
198 | * need the complexity here. | |
199 | */ | |
200 | if (!ignore_case) | |
201 | return 0; | |
202 | ||
203 | nr_cpus = online_cpus(); | |
204 | if (nr_cpus < 2) | |
205 | return 0; | |
206 | ||
207 | if (istate->cache_nr < 2 * LAZY_THREAD_COST) | |
208 | return 0; | |
96872bc2 | 209 | |
846df809 JH |
210 | if (istate->cache_nr < nr_cpus * LAZY_THREAD_COST) |
211 | nr_cpus = istate->cache_nr / LAZY_THREAD_COST; | |
212 | lazy_nr_dir_threads = nr_cpus; | |
213 | return lazy_nr_dir_threads; | |
214 | } | |
215 | ||
216 | /* | |
217 | * Initialize n mutexes for use when searching and inserting | |
218 | * into "istate->dir_hash". All "dir" threads are trying | |
219 | * to insert partial pathnames into the hash as they iterate | |
220 | * over their portions of the index, so lock contention is | |
221 | * high. | |
222 | * | |
223 | * However, the hashmap is going to put items into bucket | |
224 | * chains based on their hash values. Use that to create n | |
225 | * mutexes and lock on mutex[bucket(hash) % n]. This will | |
77363a51 | 226 | * decrease the collision rate by (hopefully) a factor of n. |
846df809 JH |
227 | */ |
228 | static void init_dir_mutex(void) | |
229 | { | |
230 | int j; | |
231 | ||
ca56dadb | 232 | CALLOC_ARRAY(lazy_dir_mutex_array, LAZY_MAX_MUTEX); |
846df809 JH |
233 | |
234 | for (j = 0; j < LAZY_MAX_MUTEX; j++) | |
235 | init_recursive_mutex(&lazy_dir_mutex_array[j]); | |
236 | } | |
237 | ||
238 | static void cleanup_dir_mutex(void) | |
239 | { | |
240 | int j; | |
241 | ||
242 | for (j = 0; j < LAZY_MAX_MUTEX; j++) | |
243 | pthread_mutex_destroy(&lazy_dir_mutex_array[j]); | |
244 | ||
245 | free(lazy_dir_mutex_array); | |
246 | } | |
247 | ||
248 | static void lock_dir_mutex(int j) | |
249 | { | |
250 | pthread_mutex_lock(&lazy_dir_mutex_array[j]); | |
251 | } | |
252 | ||
253 | static void unlock_dir_mutex(int j) | |
254 | { | |
255 | pthread_mutex_unlock(&lazy_dir_mutex_array[j]); | |
256 | } | |
257 | ||
258 | static inline int compute_dir_lock_nr( | |
259 | const struct hashmap *map, | |
260 | unsigned int hash) | |
261 | { | |
262 | return hashmap_bucket(map, hash) % LAZY_MAX_MUTEX; | |
263 | } | |
264 | ||
265 | static struct dir_entry *hash_dir_entry_with_parent_and_prefix( | |
266 | struct index_state *istate, | |
267 | struct dir_entry *parent, | |
268 | struct strbuf *prefix) | |
269 | { | |
270 | struct dir_entry *dir; | |
271 | unsigned int hash; | |
272 | int lock_nr; | |
273 | ||
274 | /* | |
275 | * Either we have a parent directory and path with slash(es) | |
276 | * or the directory is an immediate child of the root directory. | |
277 | */ | |
278 | assert((parent != NULL) ^ (strchr(prefix->buf, '/') == NULL)); | |
279 | ||
280 | if (parent) | |
281 | hash = memihash_cont(parent->ent.hash, | |
282 | prefix->buf + parent->namelen, | |
283 | prefix->len - parent->namelen); | |
284 | else | |
285 | hash = memihash(prefix->buf, prefix->len); | |
286 | ||
287 | lock_nr = compute_dir_lock_nr(&istate->dir_hash, hash); | |
288 | lock_dir_mutex(lock_nr); | |
289 | ||
290 | dir = find_dir_entry__hash(istate, prefix->buf, prefix->len, hash); | |
291 | if (!dir) { | |
292 | FLEX_ALLOC_MEM(dir, name, prefix->buf, prefix->len); | |
d22245a2 | 293 | hashmap_entry_init(&dir->ent, hash); |
846df809 JH |
294 | dir->namelen = prefix->len; |
295 | dir->parent = parent; | |
b94e5c1d | 296 | hashmap_add(&istate->dir_hash, &dir->ent); |
846df809 JH |
297 | |
298 | if (parent) { | |
299 | unlock_dir_mutex(lock_nr); | |
300 | ||
301 | /* All I really need here is an InterlockedIncrement(&(parent->nr)) */ | |
302 | lock_nr = compute_dir_lock_nr(&istate->dir_hash, parent->ent.hash); | |
303 | lock_dir_mutex(lock_nr); | |
304 | parent->nr++; | |
305 | } | |
306 | } | |
307 | ||
308 | unlock_dir_mutex(lock_nr); | |
309 | ||
310 | return dir; | |
311 | } | |
312 | ||
313 | /* | |
314 | * handle_range_1() and handle_range_dir() are derived from | |
315 | * clear_ce_flags_1() and clear_ce_flags_dir() in unpack-trees.c | |
316 | * and handle the iteration over the entire array of index entries. | |
317 | * They use recursion for adjacent entries in the same parent | |
318 | * directory. | |
319 | */ | |
320 | static int handle_range_1( | |
321 | struct index_state *istate, | |
322 | int k_start, | |
323 | int k_end, | |
324 | struct dir_entry *parent, | |
325 | struct strbuf *prefix, | |
326 | struct lazy_entry *lazy_entries); | |
327 | ||
328 | static int handle_range_dir( | |
329 | struct index_state *istate, | |
330 | int k_start, | |
331 | int k_end, | |
332 | struct dir_entry *parent, | |
333 | struct strbuf *prefix, | |
334 | struct lazy_entry *lazy_entries, | |
335 | struct dir_entry **dir_new_out) | |
336 | { | |
337 | int rc, k; | |
338 | int input_prefix_len = prefix->len; | |
339 | struct dir_entry *dir_new; | |
340 | ||
341 | dir_new = hash_dir_entry_with_parent_and_prefix(istate, parent, prefix); | |
342 | ||
343 | strbuf_addch(prefix, '/'); | |
344 | ||
345 | /* | |
346 | * Scan forward in the index array for index entries having the same | |
347 | * path prefix (that are also in this directory). | |
348 | */ | |
2a1bd45b KW |
349 | if (k_start + 1 >= k_end) |
350 | k = k_end; | |
351 | else if (strncmp(istate->cache[k_start + 1]->name, prefix->buf, prefix->len) > 0) | |
846df809 JH |
352 | k = k_start + 1; |
353 | else if (strncmp(istate->cache[k_end - 1]->name, prefix->buf, prefix->len) == 0) | |
354 | k = k_end; | |
355 | else { | |
356 | int begin = k_start; | |
357 | int end = k_end; | |
568a05c5 | 358 | assert(begin >= 0); |
846df809 | 359 | while (begin < end) { |
568a05c5 | 360 | int mid = begin + ((end - begin) >> 1); |
846df809 JH |
361 | int cmp = strncmp(istate->cache[mid]->name, prefix->buf, prefix->len); |
362 | if (cmp == 0) /* mid has same prefix; look in second part */ | |
363 | begin = mid + 1; | |
364 | else if (cmp > 0) /* mid is past group; look in first part */ | |
365 | end = mid; | |
366 | else | |
367 | die("cache entry out of order"); | |
368 | } | |
369 | k = begin; | |
370 | } | |
371 | ||
372 | /* | |
373 | * Recurse and process what we can of this subset [k_start, k). | |
374 | */ | |
375 | rc = handle_range_1(istate, k_start, k, dir_new, prefix, lazy_entries); | |
376 | ||
377 | strbuf_setlen(prefix, input_prefix_len); | |
378 | ||
379 | *dir_new_out = dir_new; | |
380 | return rc; | |
381 | } | |
382 | ||
383 | static int handle_range_1( | |
384 | struct index_state *istate, | |
385 | int k_start, | |
386 | int k_end, | |
387 | struct dir_entry *parent, | |
388 | struct strbuf *prefix, | |
389 | struct lazy_entry *lazy_entries) | |
390 | { | |
391 | int input_prefix_len = prefix->len; | |
392 | int k = k_start; | |
393 | ||
394 | while (k < k_end) { | |
395 | struct cache_entry *ce_k = istate->cache[k]; | |
396 | const char *name, *slash; | |
397 | ||
398 | if (prefix->len && strncmp(ce_k->name, prefix->buf, prefix->len)) | |
399 | break; | |
400 | ||
401 | name = ce_k->name + prefix->len; | |
402 | slash = strchr(name, '/'); | |
403 | ||
404 | if (slash) { | |
405 | int len = slash - name; | |
406 | int processed; | |
407 | struct dir_entry *dir_new; | |
408 | ||
409 | strbuf_add(prefix, name, len); | |
410 | processed = handle_range_dir(istate, k, k_end, parent, prefix, lazy_entries, &dir_new); | |
411 | if (processed) { | |
412 | k += processed; | |
413 | strbuf_setlen(prefix, input_prefix_len); | |
414 | continue; | |
415 | } | |
416 | ||
417 | strbuf_addch(prefix, '/'); | |
418 | processed = handle_range_1(istate, k, k_end, dir_new, prefix, lazy_entries); | |
419 | k += processed; | |
420 | strbuf_setlen(prefix, input_prefix_len); | |
421 | continue; | |
422 | } | |
423 | ||
424 | /* | |
425 | * It is too expensive to take a lock to insert "ce_k" | |
426 | * into "istate->name_hash" and increment the ref-count | |
427 | * on the "parent" dir. So we defer actually updating | |
428 | * permanent data structures until phase 2 (where we | |
429 | * can change the locking requirements) and simply | |
430 | * accumulate our current results into the lazy_entries | |
431 | * data array). | |
432 | * | |
433 | * We do not need to lock the lazy_entries array because | |
434 | * we have exclusive access to the cells in the range | |
435 | * [k_start,k_end) that this thread was given. | |
436 | */ | |
437 | lazy_entries[k].dir = parent; | |
438 | if (parent) { | |
439 | lazy_entries[k].hash_name = memihash_cont( | |
440 | parent->ent.hash, | |
441 | ce_k->name + parent->namelen, | |
442 | ce_namelen(ce_k) - parent->namelen); | |
443 | lazy_entries[k].hash_dir = parent->ent.hash; | |
444 | } else { | |
445 | lazy_entries[k].hash_name = memihash(ce_k->name, ce_namelen(ce_k)); | |
446 | } | |
447 | ||
448 | k++; | |
449 | } | |
450 | ||
451 | return k - k_start; | |
452 | } | |
453 | ||
454 | struct lazy_dir_thread_data { | |
455 | pthread_t pthread; | |
456 | struct index_state *istate; | |
457 | struct lazy_entry *lazy_entries; | |
458 | int k_start; | |
459 | int k_end; | |
460 | }; | |
461 | ||
462 | static void *lazy_dir_thread_proc(void *_data) | |
463 | { | |
464 | struct lazy_dir_thread_data *d = _data; | |
465 | struct strbuf prefix = STRBUF_INIT; | |
466 | handle_range_1(d->istate, d->k_start, d->k_end, NULL, &prefix, d->lazy_entries); | |
467 | strbuf_release(&prefix); | |
468 | return NULL; | |
469 | } | |
470 | ||
471 | struct lazy_name_thread_data { | |
472 | pthread_t pthread; | |
473 | struct index_state *istate; | |
474 | struct lazy_entry *lazy_entries; | |
475 | }; | |
476 | ||
477 | static void *lazy_name_thread_proc(void *_data) | |
478 | { | |
479 | struct lazy_name_thread_data *d = _data; | |
480 | int k; | |
481 | ||
482 | for (k = 0; k < d->istate->cache_nr; k++) { | |
483 | struct cache_entry *ce_k = d->istate->cache[k]; | |
484 | ce_k->ce_flags |= CE_HASHED; | |
d22245a2 | 485 | hashmap_entry_init(&ce_k->ent, d->lazy_entries[k].hash_name); |
b94e5c1d | 486 | hashmap_add(&d->istate->name_hash, &ce_k->ent); |
846df809 JH |
487 | } |
488 | ||
489 | return NULL; | |
490 | } | |
491 | ||
492 | static inline void lazy_update_dir_ref_counts( | |
493 | struct index_state *istate, | |
494 | struct lazy_entry *lazy_entries) | |
495 | { | |
496 | int k; | |
497 | ||
498 | for (k = 0; k < istate->cache_nr; k++) { | |
499 | if (lazy_entries[k].dir) | |
500 | lazy_entries[k].dir->nr++; | |
501 | } | |
502 | } | |
503 | ||
504 | static void threaded_lazy_init_name_hash( | |
505 | struct index_state *istate) | |
506 | { | |
2179045f | 507 | int err; |
846df809 JH |
508 | int nr_each; |
509 | int k_start; | |
510 | int t; | |
511 | struct lazy_entry *lazy_entries; | |
512 | struct lazy_dir_thread_data *td_dir; | |
513 | struct lazy_name_thread_data *td_name; | |
514 | ||
07642942 NTND |
515 | if (!HAVE_THREADS) |
516 | return; | |
517 | ||
846df809 JH |
518 | k_start = 0; |
519 | nr_each = DIV_ROUND_UP(istate->cache_nr, lazy_nr_dir_threads); | |
520 | ||
ca56dadb RS |
521 | CALLOC_ARRAY(lazy_entries, istate->cache_nr); |
522 | CALLOC_ARRAY(td_dir, lazy_nr_dir_threads); | |
523 | CALLOC_ARRAY(td_name, 1); | |
846df809 JH |
524 | |
525 | init_dir_mutex(); | |
526 | ||
527 | /* | |
528 | * Phase 1: | |
529 | * Build "istate->dir_hash" using n "dir" threads (and a read-only index). | |
530 | */ | |
531 | for (t = 0; t < lazy_nr_dir_threads; t++) { | |
532 | struct lazy_dir_thread_data *td_dir_t = td_dir + t; | |
533 | td_dir_t->istate = istate; | |
534 | td_dir_t->lazy_entries = lazy_entries; | |
535 | td_dir_t->k_start = k_start; | |
536 | k_start += nr_each; | |
537 | if (k_start > istate->cache_nr) | |
538 | k_start = istate->cache_nr; | |
539 | td_dir_t->k_end = k_start; | |
2179045f NTND |
540 | err = pthread_create(&td_dir_t->pthread, NULL, lazy_dir_thread_proc, td_dir_t); |
541 | if (err) | |
542 | die(_("unable to create lazy_dir thread: %s"), strerror(err)); | |
846df809 JH |
543 | } |
544 | for (t = 0; t < lazy_nr_dir_threads; t++) { | |
545 | struct lazy_dir_thread_data *td_dir_t = td_dir + t; | |
546 | if (pthread_join(td_dir_t->pthread, NULL)) | |
547 | die("unable to join lazy_dir_thread"); | |
548 | } | |
549 | ||
550 | /* | |
551 | * Phase 2: | |
552 | * Iterate over all index entries and add them to the "istate->name_hash" | |
553 | * using a single "name" background thread. | |
554 | * (Testing showed it wasn't worth running more than 1 thread for this.) | |
555 | * | |
556 | * Meanwhile, finish updating the parent directory ref-counts for each | |
557 | * index entry using the current thread. (This step is very fast and | |
558 | * doesn't need threading.) | |
559 | */ | |
560 | td_name->istate = istate; | |
561 | td_name->lazy_entries = lazy_entries; | |
2179045f NTND |
562 | err = pthread_create(&td_name->pthread, NULL, lazy_name_thread_proc, td_name); |
563 | if (err) | |
564 | die(_("unable to create lazy_name thread: %s"), strerror(err)); | |
846df809 JH |
565 | |
566 | lazy_update_dir_ref_counts(istate, lazy_entries); | |
567 | ||
2179045f NTND |
568 | err = pthread_join(td_name->pthread, NULL); |
569 | if (err) | |
570 | die(_("unable to join lazy_name thread: %s"), strerror(err)); | |
846df809 JH |
571 | |
572 | cleanup_dir_mutex(); | |
573 | ||
574 | free(td_name); | |
575 | free(td_dir); | |
576 | free(lazy_entries); | |
577 | } | |
578 | ||
846df809 JH |
579 | static void lazy_init_name_hash(struct index_state *istate) |
580 | { | |
ca54d9ba | 581 | |
96872bc2 LT |
582 | if (istate->name_hash_initialized) |
583 | return; | |
c46c406a | 584 | trace_performance_enter(); |
6a9372f4 | 585 | trace2_region_enter("index", "name-hash-init", istate->repo); |
56a14ea7 SB |
586 | hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr); |
587 | hashmap_init(&istate->dir_hash, dir_entry_cmp, NULL, istate->cache_nr); | |
846df809 JH |
588 | |
589 | if (lookup_lazy_params(istate)) { | |
8b604d19 JH |
590 | /* |
591 | * Disable item counting and automatic rehashing because | |
592 | * we do per-chain (mod n) locking rather than whole hashmap | |
593 | * locking and we need to prevent the table-size from changing | |
594 | * and bucket items from being redistributed. | |
595 | */ | |
596 | hashmap_disable_item_counting(&istate->dir_hash); | |
846df809 | 597 | threaded_lazy_init_name_hash(istate); |
8b604d19 | 598 | hashmap_enable_item_counting(&istate->dir_hash); |
846df809 JH |
599 | } else { |
600 | int nr; | |
601 | for (nr = 0; nr < istate->cache_nr; nr++) | |
602 | hash_index_entry(istate, istate->cache[nr]); | |
603 | } | |
604 | ||
96872bc2 | 605 | istate->name_hash_initialized = 1; |
6a9372f4 | 606 | trace2_region_leave("index", "name-hash-init", istate->repo); |
c46c406a | 607 | trace_performance_leave("initialize name hash"); |
96872bc2 LT |
608 | } |
609 | ||
846df809 JH |
610 | /* |
611 | * A test routine for t/helper/ sources. | |
612 | * | |
613 | * Returns the number of threads used or 0 when | |
614 | * the non-threaded code path was used. | |
615 | * | |
616 | * Requesting threading WILL NOT override guards | |
617 | * in lookup_lazy_params(). | |
618 | */ | |
619 | int test_lazy_init_name_hash(struct index_state *istate, int try_threaded) | |
620 | { | |
621 | lazy_nr_dir_threads = 0; | |
622 | lazy_try_threaded = try_threaded; | |
623 | ||
624 | lazy_init_name_hash(istate); | |
625 | ||
626 | return lazy_nr_dir_threads; | |
627 | } | |
628 | ||
96872bc2 LT |
629 | void add_name_hash(struct index_state *istate, struct cache_entry *ce) |
630 | { | |
96872bc2 LT |
631 | if (istate->name_hash_initialized) |
632 | hash_index_entry(istate, ce); | |
633 | } | |
634 | ||
2092678c KB |
635 | void remove_name_hash(struct index_state *istate, struct cache_entry *ce) |
636 | { | |
419a597f KB |
637 | if (!istate->name_hash_initialized || !(ce->ce_flags & CE_HASHED)) |
638 | return; | |
639 | ce->ce_flags &= ~CE_HASHED; | |
28ee7941 | 640 | hashmap_remove(&istate->name_hash, &ce->ent, ce); |
2092678c | 641 | |
419a597f KB |
642 | if (ignore_case) |
643 | remove_dir_entry(istate, ce); | |
2092678c KB |
644 | } |
645 | ||
cd2fef59 LT |
646 | static int slow_same_name(const char *name1, int len1, const char *name2, int len2) |
647 | { | |
648 | if (len1 != len2) | |
649 | return 0; | |
650 | ||
651 | while (len1) { | |
652 | unsigned char c1 = *name1++; | |
653 | unsigned char c2 = *name2++; | |
654 | len1--; | |
655 | if (c1 != c2) { | |
656 | c1 = toupper(c1); | |
657 | c2 = toupper(c2); | |
658 | if (c1 != c2) | |
659 | return 0; | |
660 | } | |
661 | } | |
662 | return 1; | |
663 | } | |
664 | ||
665 | static int same_name(const struct cache_entry *ce, const char *name, int namelen, int icase) | |
666 | { | |
667 | int len = ce_namelen(ce); | |
668 | ||
669 | /* | |
670 | * Always do exact compare, even if we want a case-ignoring comparison; | |
671 | * we do the quick exact one first, because it will be the common case. | |
672 | */ | |
be99ec97 | 673 | if (len == namelen && !memcmp(name, ce->name, len)) |
cd2fef59 LT |
674 | return 1; |
675 | ||
5102c617 JJ |
676 | if (!icase) |
677 | return 0; | |
678 | ||
2092678c | 679 | return slow_same_name(name, namelen, ce->name, len); |
cd2fef59 LT |
680 | } |
681 | ||
41284eb0 | 682 | int index_dir_exists(struct index_state *istate, const char *name, int namelen) |
db5360f3 | 683 | { |
db5360f3 ES |
684 | struct dir_entry *dir; |
685 | ||
686 | lazy_init_name_hash(istate); | |
4589bca8 | 687 | expand_to_path(istate, name, namelen, 0); |
db5360f3 | 688 | dir = find_dir_entry(istate, name, namelen); |
41284eb0 DT |
689 | return dir && dir->nr; |
690 | } | |
db5360f3 | 691 | |
41284eb0 DT |
692 | void adjust_dirname_case(struct index_state *istate, char *name) |
693 | { | |
694 | const char *startPtr = name; | |
695 | const char *ptr = startPtr; | |
db5360f3 | 696 | |
41284eb0 | 697 | lazy_init_name_hash(istate); |
4589bca8 | 698 | expand_to_path(istate, name, strlen(name), 0); |
41284eb0 DT |
699 | while (*ptr) { |
700 | while (*ptr && *ptr != '/') | |
701 | ptr++; | |
702 | ||
703 | if (*ptr == '/') { | |
704 | struct dir_entry *dir; | |
705 | ||
c95525e9 | 706 | dir = find_dir_entry(istate, name, ptr - name); |
41284eb0 DT |
707 | if (dir) { |
708 | memcpy((void *)startPtr, dir->name + (startPtr - name), ptr - startPtr); | |
c95525e9 | 709 | startPtr = ptr + 1; |
41284eb0 | 710 | } |
c95525e9 | 711 | ptr++; |
41284eb0 DT |
712 | } |
713 | } | |
db5360f3 ES |
714 | } |
715 | ||
716 | struct cache_entry *index_file_exists(struct index_state *istate, const char *name, int namelen, int icase) | |
96872bc2 | 717 | { |
96872bc2 | 718 | struct cache_entry *ce; |
f0e63c41 | 719 | unsigned int hash = memihash(name, namelen); |
96872bc2 LT |
720 | |
721 | lazy_init_name_hash(istate); | |
4589bca8 | 722 | expand_to_path(istate, name, namelen, icase); |
96872bc2 | 723 | |
f0e63c41 EW |
724 | ce = hashmap_get_entry_from_hash(&istate->name_hash, hash, NULL, |
725 | struct cache_entry, ent); | |
23dee69f | 726 | hashmap_for_each_entry_from(&istate->name_hash, ce, ent) { |
419a597f KB |
727 | if (same_name(ce, name, namelen, icase)) |
728 | return ce; | |
96872bc2 | 729 | } |
df292c79 | 730 | return NULL; |
96872bc2 | 731 | } |
2092678c | 732 | |
2092678c KB |
733 | void free_name_hash(struct index_state *istate) |
734 | { | |
735 | if (!istate->name_hash_initialized) | |
736 | return; | |
737 | istate->name_hash_initialized = 0; | |
2092678c | 738 | |
6da1a258 EN |
739 | hashmap_clear(&istate->name_hash); |
740 | hashmap_clear_and_free(&istate->dir_hash, struct dir_entry, ent); | |
2092678c | 741 | } |