]> git.ipfire.org Git - thirdparty/git.git/blob - preload-index.c
preload-index: use git_env_bool() not getenv() for customization
[thirdparty/git.git] / preload-index.c
1 /*
2 * Copyright (C) 2008 Linus Torvalds
3 */
4 #include "cache.h"
5 #include "pathspec.h"
6 #include "dir.h"
7 #include "fsmonitor.h"
8 #include "config.h"
9
10 #ifdef NO_PTHREADS
11 static void preload_index(struct index_state *index,
12 const struct pathspec *pathspec)
13 {
14 ; /* nothing */
15 }
16 #else
17
18 #include <pthread.h>
19
20 /*
21 * Mostly randomly chosen maximum thread counts: we
22 * cap the parallelism to 20 threads, and we want
23 * to have at least 500 lstat's per thread for it to
24 * be worth starting a thread.
25 */
26 #define MAX_PARALLEL (20)
27 #define THREAD_COST (500)
28
29 struct thread_data {
30 pthread_t pthread;
31 struct index_state *index;
32 struct pathspec pathspec;
33 int offset, nr;
34 };
35
36 static void *preload_thread(void *_data)
37 {
38 int nr;
39 struct thread_data *p = _data;
40 struct index_state *index = p->index;
41 struct cache_entry **cep = index->cache + p->offset;
42 struct cache_def cache = CACHE_DEF_INIT;
43
44 nr = p->nr;
45 if (nr + p->offset > index->cache_nr)
46 nr = index->cache_nr - p->offset;
47
48 do {
49 struct cache_entry *ce = *cep++;
50 struct stat st;
51
52 if (ce_stage(ce))
53 continue;
54 if (S_ISGITLINK(ce->ce_mode))
55 continue;
56 if (ce_uptodate(ce))
57 continue;
58 if (ce_skip_worktree(ce))
59 continue;
60 if (ce->ce_flags & CE_FSMONITOR_VALID)
61 continue;
62 if (!ce_path_match(index, ce, &p->pathspec, NULL))
63 continue;
64 if (threaded_has_symlink_leading_path(&cache, ce->name, ce_namelen(ce)))
65 continue;
66 if (lstat(ce->name, &st))
67 continue;
68 if (ie_match_stat(index, ce, &st, CE_MATCH_RACY_IS_DIRTY|CE_MATCH_IGNORE_FSMONITOR))
69 continue;
70 ce_mark_uptodate(ce);
71 mark_fsmonitor_valid(ce);
72 } while (--nr > 0);
73 cache_def_clear(&cache);
74 return NULL;
75 }
76
77 static void preload_index(struct index_state *index,
78 const struct pathspec *pathspec)
79 {
80 int threads, i, work, offset;
81 struct thread_data data[MAX_PARALLEL];
82 uint64_t start = getnanotime();
83
84 if (!core_preload_index)
85 return;
86
87 threads = index->cache_nr / THREAD_COST;
88 if ((index->cache_nr > 1) && (threads < 2) && git_env_bool("GIT_FORCE_PRELOAD_TEST", 0))
89 threads = 2;
90 if (threads < 2)
91 return;
92 if (threads > MAX_PARALLEL)
93 threads = MAX_PARALLEL;
94 offset = 0;
95 work = DIV_ROUND_UP(index->cache_nr, threads);
96 memset(&data, 0, sizeof(data));
97 for (i = 0; i < threads; i++) {
98 struct thread_data *p = data+i;
99 p->index = index;
100 if (pathspec)
101 copy_pathspec(&p->pathspec, pathspec);
102 p->offset = offset;
103 p->nr = work;
104 offset += work;
105 if (pthread_create(&p->pthread, NULL, preload_thread, p))
106 die("unable to create threaded lstat");
107 }
108 for (i = 0; i < threads; i++) {
109 struct thread_data *p = data+i;
110 if (pthread_join(p->pthread, NULL))
111 die("unable to join threaded lstat");
112 }
113 trace_performance_since(start, "preload index");
114 }
115 #endif
116
117 int read_index_preload(struct index_state *index,
118 const struct pathspec *pathspec)
119 {
120 int retval = read_index(index);
121
122 preload_index(index, pathspec);
123 return retval;
124 }