]> git.ipfire.org Git - thirdparty/git.git/blob - sparse-index.c
Merge branch 'rs/range-diff-avoid-segfault-with-I'
[thirdparty/git.git] / sparse-index.c
1 #include "cache.h"
2 #include "repository.h"
3 #include "sparse-index.h"
4 #include "tree.h"
5 #include "pathspec.h"
6 #include "trace2.h"
7 #include "cache-tree.h"
8 #include "config.h"
9 #include "dir.h"
10 #include "fsmonitor.h"
11
12 static struct cache_entry *construct_sparse_dir_entry(
13 struct index_state *istate,
14 const char *sparse_dir,
15 struct cache_tree *tree)
16 {
17 struct cache_entry *de;
18
19 de = make_cache_entry(istate, S_IFDIR, &tree->oid, sparse_dir, 0, 0);
20
21 de->ce_flags |= CE_SKIP_WORKTREE;
22 return de;
23 }
24
25 /*
26 * Returns the number of entries "inserted" into the index.
27 */
28 static int convert_to_sparse_rec(struct index_state *istate,
29 int num_converted,
30 int start, int end,
31 const char *ct_path, size_t ct_pathlen,
32 struct cache_tree *ct)
33 {
34 int i, can_convert = 1;
35 int start_converted = num_converted;
36 enum pattern_match_result match;
37 int dtype = DT_UNKNOWN;
38 struct strbuf child_path = STRBUF_INIT;
39 struct pattern_list *pl = istate->sparse_checkout_patterns;
40
41 /*
42 * Is the current path outside of the sparse cone?
43 * Then check if the region can be replaced by a sparse
44 * directory entry (everything is sparse and merged).
45 */
46 match = path_matches_pattern_list(ct_path, ct_pathlen,
47 NULL, &dtype, pl, istate);
48 if (match != NOT_MATCHED)
49 can_convert = 0;
50
51 for (i = start; can_convert && i < end; i++) {
52 struct cache_entry *ce = istate->cache[i];
53
54 if (ce_stage(ce) ||
55 S_ISGITLINK(ce->ce_mode) ||
56 !(ce->ce_flags & CE_SKIP_WORKTREE))
57 can_convert = 0;
58 }
59
60 if (can_convert) {
61 struct cache_entry *se;
62 se = construct_sparse_dir_entry(istate, ct_path, ct);
63
64 istate->cache[num_converted++] = se;
65 return 1;
66 }
67
68 for (i = start; i < end; ) {
69 int count, span, pos = -1;
70 const char *base, *slash;
71 struct cache_entry *ce = istate->cache[i];
72
73 /*
74 * Detect if this is a normal entry outside of any subtree
75 * entry.
76 */
77 base = ce->name + ct_pathlen;
78 slash = strchr(base, '/');
79
80 if (slash)
81 pos = cache_tree_subtree_pos(ct, base, slash - base);
82
83 if (pos < 0) {
84 istate->cache[num_converted++] = ce;
85 i++;
86 continue;
87 }
88
89 strbuf_setlen(&child_path, 0);
90 strbuf_add(&child_path, ce->name, slash - ce->name + 1);
91
92 span = ct->down[pos]->cache_tree->entry_count;
93 count = convert_to_sparse_rec(istate,
94 num_converted, i, i + span,
95 child_path.buf, child_path.len,
96 ct->down[pos]->cache_tree);
97 num_converted += count;
98 i += span;
99 }
100
101 strbuf_release(&child_path);
102 return num_converted - start_converted;
103 }
104
105 int set_sparse_index_config(struct repository *repo, int enable)
106 {
107 int res;
108 char *config_path = repo_git_path(repo, "config.worktree");
109 res = git_config_set_in_file_gently(config_path,
110 "index.sparse",
111 enable ? "true" : NULL);
112 free(config_path);
113
114 prepare_repo_settings(repo);
115 repo->settings.sparse_index = enable;
116 return res;
117 }
118
119 static int index_has_unmerged_entries(struct index_state *istate)
120 {
121 int i;
122 for (i = 0; i < istate->cache_nr; i++) {
123 if (ce_stage(istate->cache[i]))
124 return 1;
125 }
126
127 return 0;
128 }
129
130 int convert_to_sparse(struct index_state *istate)
131 {
132 int test_env;
133 if (istate->split_index || istate->sparse_index ||
134 !core_apply_sparse_checkout || !core_sparse_checkout_cone)
135 return 0;
136
137 if (!istate->repo)
138 istate->repo = the_repository;
139
140 /*
141 * The GIT_TEST_SPARSE_INDEX environment variable triggers the
142 * index.sparse config variable to be on.
143 */
144 test_env = git_env_bool("GIT_TEST_SPARSE_INDEX", -1);
145 if (test_env >= 0)
146 set_sparse_index_config(istate->repo, test_env);
147
148 /*
149 * Only convert to sparse if index.sparse is set.
150 */
151 prepare_repo_settings(istate->repo);
152 if (!istate->repo->settings.sparse_index)
153 return 0;
154
155 if (!istate->sparse_checkout_patterns) {
156 istate->sparse_checkout_patterns = xcalloc(1, sizeof(struct pattern_list));
157 if (get_sparse_checkout_patterns(istate->sparse_checkout_patterns) < 0)
158 return 0;
159 }
160
161 if (!istate->sparse_checkout_patterns->use_cone_patterns) {
162 warning(_("attempting to use sparse-index without cone mode"));
163 return -1;
164 }
165
166 /*
167 * NEEDSWORK: If we have unmerged entries, then stay full.
168 * Unmerged entries prevent the cache-tree extension from working.
169 */
170 if (index_has_unmerged_entries(istate))
171 return 0;
172
173 /* Clear and recompute the cache-tree */
174 cache_tree_free(&istate->cache_tree);
175 if (cache_tree_update(istate, 0)) {
176 warning(_("unable to update cache-tree, staying full"));
177 return -1;
178 }
179
180 remove_fsmonitor(istate);
181
182 trace2_region_enter("index", "convert_to_sparse", istate->repo);
183 istate->cache_nr = convert_to_sparse_rec(istate,
184 0, 0, istate->cache_nr,
185 "", 0, istate->cache_tree);
186
187 /* Clear and recompute the cache-tree */
188 cache_tree_free(&istate->cache_tree);
189 cache_tree_update(istate, 0);
190
191 istate->fsmonitor_has_run_once = 0;
192 FREE_AND_NULL(istate->fsmonitor_dirty);
193 FREE_AND_NULL(istate->fsmonitor_last_update);
194
195 istate->sparse_index = 1;
196 trace2_region_leave("index", "convert_to_sparse", istate->repo);
197 return 0;
198 }
199
200 static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
201 {
202 ALLOC_GROW(istate->cache, nr + 1, istate->cache_alloc);
203
204 istate->cache[nr] = ce;
205 add_name_hash(istate, ce);
206 }
207
208 static int add_path_to_index(const struct object_id *oid,
209 struct strbuf *base, const char *path,
210 unsigned int mode, void *context)
211 {
212 struct index_state *istate = (struct index_state *)context;
213 struct cache_entry *ce;
214 size_t len = base->len;
215
216 if (S_ISDIR(mode))
217 return READ_TREE_RECURSIVE;
218
219 strbuf_addstr(base, path);
220
221 ce = make_cache_entry(istate, mode, oid, base->buf, 0, 0);
222 ce->ce_flags |= CE_SKIP_WORKTREE | CE_EXTENDED;
223 set_index_entry(istate, istate->cache_nr++, ce);
224
225 strbuf_setlen(base, len);
226 return 0;
227 }
228
229 void ensure_full_index(struct index_state *istate)
230 {
231 int i;
232 struct index_state *full;
233 struct strbuf base = STRBUF_INIT;
234
235 if (!istate || !istate->sparse_index)
236 return;
237
238 if (!istate->repo)
239 istate->repo = the_repository;
240
241 trace2_region_enter("index", "ensure_full_index", istate->repo);
242
243 /* initialize basics of new index */
244 full = xcalloc(1, sizeof(struct index_state));
245 memcpy(full, istate, sizeof(struct index_state));
246
247 /* then change the necessary things */
248 full->sparse_index = 0;
249 full->cache_alloc = (3 * istate->cache_alloc) / 2;
250 full->cache_nr = 0;
251 ALLOC_ARRAY(full->cache, full->cache_alloc);
252
253 for (i = 0; i < istate->cache_nr; i++) {
254 struct cache_entry *ce = istate->cache[i];
255 struct tree *tree;
256 struct pathspec ps;
257
258 if (!S_ISSPARSEDIR(ce->ce_mode)) {
259 set_index_entry(full, full->cache_nr++, ce);
260 continue;
261 }
262 if (!(ce->ce_flags & CE_SKIP_WORKTREE))
263 warning(_("index entry is a directory, but not sparse (%08x)"),
264 ce->ce_flags);
265
266 /* recursively walk into cd->name */
267 tree = lookup_tree(istate->repo, &ce->oid);
268
269 memset(&ps, 0, sizeof(ps));
270 ps.recursive = 1;
271 ps.has_wildcard = 1;
272 ps.max_depth = -1;
273
274 strbuf_setlen(&base, 0);
275 strbuf_add(&base, ce->name, strlen(ce->name));
276
277 read_tree_at(istate->repo, tree, &base, &ps,
278 add_path_to_index, full);
279
280 /* free directory entries. full entries are re-used */
281 discard_cache_entry(ce);
282 }
283
284 /* Copy back into original index. */
285 memcpy(&istate->name_hash, &full->name_hash, sizeof(full->name_hash));
286 memcpy(&istate->dir_hash, &full->dir_hash, sizeof(full->dir_hash));
287 istate->sparse_index = 0;
288 free(istate->cache);
289 istate->cache = full->cache;
290 istate->cache_nr = full->cache_nr;
291 istate->cache_alloc = full->cache_alloc;
292 istate->fsmonitor_has_run_once = 0;
293 FREE_AND_NULL(istate->fsmonitor_dirty);
294 FREE_AND_NULL(istate->fsmonitor_last_update);
295
296 strbuf_release(&base);
297 free(full);
298
299 /* Clear and recompute the cache-tree */
300 cache_tree_free(&istate->cache_tree);
301 cache_tree_update(istate, 0);
302
303 trace2_region_leave("index", "ensure_full_index", istate->repo);
304 }
305
306 /*
307 * This static global helps avoid infinite recursion between
308 * expand_to_path() and index_file_exists().
309 */
310 static int in_expand_to_path = 0;
311
312 void expand_to_path(struct index_state *istate,
313 const char *path, size_t pathlen, int icase)
314 {
315 struct strbuf path_mutable = STRBUF_INIT;
316 size_t substr_len;
317
318 /* prevent extra recursion */
319 if (in_expand_to_path)
320 return;
321
322 if (!istate || !istate->sparse_index)
323 return;
324
325 if (!istate->repo)
326 istate->repo = the_repository;
327
328 in_expand_to_path = 1;
329
330 /*
331 * We only need to actually expand a region if the
332 * following are both true:
333 *
334 * 1. 'path' is not already in the index.
335 * 2. Some parent directory of 'path' is a sparse directory.
336 */
337
338 if (index_file_exists(istate, path, pathlen, icase))
339 goto cleanup;
340
341 strbuf_add(&path_mutable, path, pathlen);
342 strbuf_addch(&path_mutable, '/');
343
344 /* Check the name hash for all parent directories */
345 substr_len = 0;
346 while (substr_len < pathlen) {
347 char temp;
348 char *replace = strchr(path_mutable.buf + substr_len, '/');
349
350 if (!replace)
351 break;
352
353 /* replace the character _after_ the slash */
354 replace++;
355 temp = *replace;
356 *replace = '\0';
357 if (index_file_exists(istate, path_mutable.buf,
358 path_mutable.len, icase)) {
359 /*
360 * We found a parent directory in the name-hash
361 * hashtable, because only sparse directory entries
362 * have a trailing '/' character. Since "path" wasn't
363 * in the index, perhaps it exists within this
364 * sparse-directory. Expand accordingly.
365 */
366 ensure_full_index(istate);
367 break;
368 }
369
370 *replace = temp;
371 substr_len = replace - path_mutable.buf;
372 }
373
374 cleanup:
375 strbuf_release(&path_mutable);
376 in_expand_to_path = 0;
377 }