]>
Commit | Line | Data |
---|---|---|
1 | #include "cache.h" | |
2 | #include "lockfile.h" | |
3 | #include "tree.h" | |
4 | #include "tree-walk.h" | |
5 | #include "cache-tree.h" | |
6 | #include "object-store.h" | |
7 | #include "replace-object.h" | |
8 | #include "promisor-remote.h" | |
9 | ||
10 | #ifndef DEBUG_CACHE_TREE | |
11 | #define DEBUG_CACHE_TREE 0 | |
12 | #endif | |
13 | ||
14 | struct cache_tree *cache_tree(void) | |
15 | { | |
16 | struct cache_tree *it = xcalloc(1, sizeof(struct cache_tree)); | |
17 | it->entry_count = -1; | |
18 | return it; | |
19 | } | |
20 | ||
21 | void cache_tree_free(struct cache_tree **it_p) | |
22 | { | |
23 | int i; | |
24 | struct cache_tree *it = *it_p; | |
25 | ||
26 | if (!it) | |
27 | return; | |
28 | for (i = 0; i < it->subtree_nr; i++) | |
29 | if (it->down[i]) { | |
30 | cache_tree_free(&it->down[i]->cache_tree); | |
31 | free(it->down[i]); | |
32 | } | |
33 | free(it->down); | |
34 | free(it); | |
35 | *it_p = NULL; | |
36 | } | |
37 | ||
38 | static int subtree_name_cmp(const char *one, int onelen, | |
39 | const char *two, int twolen) | |
40 | { | |
41 | if (onelen < twolen) | |
42 | return -1; | |
43 | if (twolen < onelen) | |
44 | return 1; | |
45 | return memcmp(one, two, onelen); | |
46 | } | |
47 | ||
48 | static int subtree_pos(struct cache_tree *it, const char *path, int pathlen) | |
49 | { | |
50 | struct cache_tree_sub **down = it->down; | |
51 | int lo, hi; | |
52 | lo = 0; | |
53 | hi = it->subtree_nr; | |
54 | while (lo < hi) { | |
55 | int mi = lo + (hi - lo) / 2; | |
56 | struct cache_tree_sub *mdl = down[mi]; | |
57 | int cmp = subtree_name_cmp(path, pathlen, | |
58 | mdl->name, mdl->namelen); | |
59 | if (!cmp) | |
60 | return mi; | |
61 | if (cmp < 0) | |
62 | hi = mi; | |
63 | else | |
64 | lo = mi + 1; | |
65 | } | |
66 | return -lo-1; | |
67 | } | |
68 | ||
69 | static struct cache_tree_sub *find_subtree(struct cache_tree *it, | |
70 | const char *path, | |
71 | int pathlen, | |
72 | int create) | |
73 | { | |
74 | struct cache_tree_sub *down; | |
75 | int pos = subtree_pos(it, path, pathlen); | |
76 | if (0 <= pos) | |
77 | return it->down[pos]; | |
78 | if (!create) | |
79 | return NULL; | |
80 | ||
81 | pos = -pos-1; | |
82 | ALLOC_GROW(it->down, it->subtree_nr + 1, it->subtree_alloc); | |
83 | it->subtree_nr++; | |
84 | ||
85 | FLEX_ALLOC_MEM(down, name, path, pathlen); | |
86 | down->cache_tree = NULL; | |
87 | down->namelen = pathlen; | |
88 | ||
89 | if (pos < it->subtree_nr) | |
90 | MOVE_ARRAY(it->down + pos + 1, it->down + pos, | |
91 | it->subtree_nr - pos - 1); | |
92 | it->down[pos] = down; | |
93 | return down; | |
94 | } | |
95 | ||
96 | struct cache_tree_sub *cache_tree_sub(struct cache_tree *it, const char *path) | |
97 | { | |
98 | int pathlen = strlen(path); | |
99 | return find_subtree(it, path, pathlen, 1); | |
100 | } | |
101 | ||
102 | static int do_invalidate_path(struct cache_tree *it, const char *path) | |
103 | { | |
104 | /* a/b/c | |
105 | * ==> invalidate self | |
106 | * ==> find "a", have it invalidate "b/c" | |
107 | * a | |
108 | * ==> invalidate self | |
109 | * ==> if "a" exists as a subtree, remove it. | |
110 | */ | |
111 | const char *slash; | |
112 | int namelen; | |
113 | struct cache_tree_sub *down; | |
114 | ||
115 | #if DEBUG_CACHE_TREE | |
116 | fprintf(stderr, "cache-tree invalidate <%s>\n", path); | |
117 | #endif | |
118 | ||
119 | if (!it) | |
120 | return 0; | |
121 | slash = strchrnul(path, '/'); | |
122 | namelen = slash - path; | |
123 | it->entry_count = -1; | |
124 | if (!*slash) { | |
125 | int pos; | |
126 | pos = subtree_pos(it, path, namelen); | |
127 | if (0 <= pos) { | |
128 | cache_tree_free(&it->down[pos]->cache_tree); | |
129 | free(it->down[pos]); | |
130 | /* 0 1 2 3 4 5 | |
131 | * ^ ^subtree_nr = 6 | |
132 | * pos | |
133 | * move 4 and 5 up one place (2 entries) | |
134 | * 2 = 6 - 3 - 1 = subtree_nr - pos - 1 | |
135 | */ | |
136 | MOVE_ARRAY(it->down + pos, it->down + pos + 1, | |
137 | it->subtree_nr - pos - 1); | |
138 | it->subtree_nr--; | |
139 | } | |
140 | return 1; | |
141 | } | |
142 | down = find_subtree(it, path, namelen, 0); | |
143 | if (down) | |
144 | do_invalidate_path(down->cache_tree, slash + 1); | |
145 | return 1; | |
146 | } | |
147 | ||
148 | void cache_tree_invalidate_path(struct index_state *istate, const char *path) | |
149 | { | |
150 | if (do_invalidate_path(istate->cache_tree, path)) | |
151 | istate->cache_changed |= CACHE_TREE_CHANGED; | |
152 | } | |
153 | ||
154 | static int verify_cache(struct cache_entry **cache, | |
155 | int entries, int flags) | |
156 | { | |
157 | int i, funny; | |
158 | int silent = flags & WRITE_TREE_SILENT; | |
159 | ||
160 | /* Verify that the tree is merged */ | |
161 | funny = 0; | |
162 | for (i = 0; i < entries; i++) { | |
163 | const struct cache_entry *ce = cache[i]; | |
164 | if (ce_stage(ce)) { | |
165 | if (silent) | |
166 | return -1; | |
167 | if (10 < ++funny) { | |
168 | fprintf(stderr, "...\n"); | |
169 | break; | |
170 | } | |
171 | fprintf(stderr, "%s: unmerged (%s)\n", | |
172 | ce->name, oid_to_hex(&ce->oid)); | |
173 | } | |
174 | } | |
175 | if (funny) | |
176 | return -1; | |
177 | ||
178 | /* Also verify that the cache does not have path and path/file | |
179 | * at the same time. At this point we know the cache has only | |
180 | * stage 0 entries. | |
181 | */ | |
182 | funny = 0; | |
183 | for (i = 0; i < entries - 1; i++) { | |
184 | /* path/file always comes after path because of the way | |
185 | * the cache is sorted. Also path can appear only once, | |
186 | * which means conflicting one would immediately follow. | |
187 | */ | |
188 | const char *this_name = cache[i]->name; | |
189 | const char *next_name = cache[i+1]->name; | |
190 | int this_len = strlen(this_name); | |
191 | if (this_len < strlen(next_name) && | |
192 | strncmp(this_name, next_name, this_len) == 0 && | |
193 | next_name[this_len] == '/') { | |
194 | if (10 < ++funny) { | |
195 | fprintf(stderr, "...\n"); | |
196 | break; | |
197 | } | |
198 | fprintf(stderr, "You have both %s and %s\n", | |
199 | this_name, next_name); | |
200 | } | |
201 | } | |
202 | if (funny) | |
203 | return -1; | |
204 | return 0; | |
205 | } | |
206 | ||
207 | static void discard_unused_subtrees(struct cache_tree *it) | |
208 | { | |
209 | struct cache_tree_sub **down = it->down; | |
210 | int nr = it->subtree_nr; | |
211 | int dst, src; | |
212 | for (dst = src = 0; src < nr; src++) { | |
213 | struct cache_tree_sub *s = down[src]; | |
214 | if (s->used) | |
215 | down[dst++] = s; | |
216 | else { | |
217 | cache_tree_free(&s->cache_tree); | |
218 | free(s); | |
219 | it->subtree_nr--; | |
220 | } | |
221 | } | |
222 | } | |
223 | ||
224 | int cache_tree_fully_valid(struct cache_tree *it) | |
225 | { | |
226 | int i; | |
227 | if (!it) | |
228 | return 0; | |
229 | if (it->entry_count < 0 || !has_object_file(&it->oid)) | |
230 | return 0; | |
231 | for (i = 0; i < it->subtree_nr; i++) { | |
232 | if (!cache_tree_fully_valid(it->down[i]->cache_tree)) | |
233 | return 0; | |
234 | } | |
235 | return 1; | |
236 | } | |
237 | ||
238 | static int update_one(struct cache_tree *it, | |
239 | struct cache_entry **cache, | |
240 | int entries, | |
241 | const char *base, | |
242 | int baselen, | |
243 | int *skip_count, | |
244 | int flags) | |
245 | { | |
246 | struct strbuf buffer; | |
247 | int missing_ok = flags & WRITE_TREE_MISSING_OK; | |
248 | int dryrun = flags & WRITE_TREE_DRY_RUN; | |
249 | int repair = flags & WRITE_TREE_REPAIR; | |
250 | int to_invalidate = 0; | |
251 | int i; | |
252 | ||
253 | assert(!(dryrun && repair)); | |
254 | ||
255 | *skip_count = 0; | |
256 | ||
257 | if (0 <= it->entry_count && has_object_file(&it->oid)) | |
258 | return it->entry_count; | |
259 | ||
260 | /* | |
261 | * We first scan for subtrees and update them; we start by | |
262 | * marking existing subtrees -- the ones that are unmarked | |
263 | * should not be in the result. | |
264 | */ | |
265 | for (i = 0; i < it->subtree_nr; i++) | |
266 | it->down[i]->used = 0; | |
267 | ||
268 | /* | |
269 | * Find the subtrees and update them. | |
270 | */ | |
271 | i = 0; | |
272 | while (i < entries) { | |
273 | const struct cache_entry *ce = cache[i]; | |
274 | struct cache_tree_sub *sub; | |
275 | const char *path, *slash; | |
276 | int pathlen, sublen, subcnt, subskip; | |
277 | ||
278 | path = ce->name; | |
279 | pathlen = ce_namelen(ce); | |
280 | if (pathlen <= baselen || memcmp(base, path, baselen)) | |
281 | break; /* at the end of this level */ | |
282 | ||
283 | slash = strchr(path + baselen, '/'); | |
284 | if (!slash) { | |
285 | i++; | |
286 | continue; | |
287 | } | |
288 | /* | |
289 | * a/bbb/c (base = a/, slash = /c) | |
290 | * ==> | |
291 | * path+baselen = bbb/c, sublen = 3 | |
292 | */ | |
293 | sublen = slash - (path + baselen); | |
294 | sub = find_subtree(it, path + baselen, sublen, 1); | |
295 | if (!sub->cache_tree) | |
296 | sub->cache_tree = cache_tree(); | |
297 | subcnt = update_one(sub->cache_tree, | |
298 | cache + i, entries - i, | |
299 | path, | |
300 | baselen + sublen + 1, | |
301 | &subskip, | |
302 | flags); | |
303 | if (subcnt < 0) | |
304 | return subcnt; | |
305 | if (!subcnt) | |
306 | die("index cache-tree records empty sub-tree"); | |
307 | i += subcnt; | |
308 | sub->count = subcnt; /* to be used in the next loop */ | |
309 | *skip_count += subskip; | |
310 | sub->used = 1; | |
311 | } | |
312 | ||
313 | discard_unused_subtrees(it); | |
314 | ||
315 | /* | |
316 | * Then write out the tree object for this level. | |
317 | */ | |
318 | strbuf_init(&buffer, 8192); | |
319 | ||
320 | i = 0; | |
321 | while (i < entries) { | |
322 | const struct cache_entry *ce = cache[i]; | |
323 | struct cache_tree_sub *sub = NULL; | |
324 | const char *path, *slash; | |
325 | int pathlen, entlen; | |
326 | const struct object_id *oid; | |
327 | unsigned mode; | |
328 | int expected_missing = 0; | |
329 | int contains_ita = 0; | |
330 | int ce_missing_ok; | |
331 | ||
332 | path = ce->name; | |
333 | pathlen = ce_namelen(ce); | |
334 | if (pathlen <= baselen || memcmp(base, path, baselen)) | |
335 | break; /* at the end of this level */ | |
336 | ||
337 | slash = strchr(path + baselen, '/'); | |
338 | if (slash) { | |
339 | entlen = slash - (path + baselen); | |
340 | sub = find_subtree(it, path + baselen, entlen, 0); | |
341 | if (!sub) | |
342 | die("cache-tree.c: '%.*s' in '%s' not found", | |
343 | entlen, path + baselen, path); | |
344 | i += sub->count; | |
345 | oid = &sub->cache_tree->oid; | |
346 | mode = S_IFDIR; | |
347 | contains_ita = sub->cache_tree->entry_count < 0; | |
348 | if (contains_ita) { | |
349 | to_invalidate = 1; | |
350 | expected_missing = 1; | |
351 | } | |
352 | } | |
353 | else { | |
354 | oid = &ce->oid; | |
355 | mode = ce->ce_mode; | |
356 | entlen = pathlen - baselen; | |
357 | i++; | |
358 | } | |
359 | ||
360 | ce_missing_ok = mode == S_IFGITLINK || missing_ok || | |
361 | (has_promisor_remote() && | |
362 | ce_skip_worktree(ce)); | |
363 | if (is_null_oid(oid) || | |
364 | (!ce_missing_ok && !has_object_file(oid))) { | |
365 | strbuf_release(&buffer); | |
366 | if (expected_missing) | |
367 | return -1; | |
368 | return error("invalid object %06o %s for '%.*s'", | |
369 | mode, oid_to_hex(oid), entlen+baselen, path); | |
370 | } | |
371 | ||
372 | /* | |
373 | * CE_REMOVE entries are removed before the index is | |
374 | * written to disk. Skip them to remain consistent | |
375 | * with the future on-disk index. | |
376 | */ | |
377 | if (ce->ce_flags & CE_REMOVE) { | |
378 | *skip_count = *skip_count + 1; | |
379 | continue; | |
380 | } | |
381 | ||
382 | /* | |
383 | * CE_INTENT_TO_ADD entries exist on on-disk index but | |
384 | * they are not part of generated trees. Invalidate up | |
385 | * to root to force cache-tree users to read elsewhere. | |
386 | */ | |
387 | if (!sub && ce_intent_to_add(ce)) { | |
388 | to_invalidate = 1; | |
389 | continue; | |
390 | } | |
391 | ||
392 | /* | |
393 | * "sub" can be an empty tree if all subentries are i-t-a. | |
394 | */ | |
395 | if (contains_ita && is_empty_tree_oid(oid)) | |
396 | continue; | |
397 | ||
398 | strbuf_grow(&buffer, entlen + 100); | |
399 | strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0'); | |
400 | strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz); | |
401 | ||
402 | #if DEBUG_CACHE_TREE | |
403 | fprintf(stderr, "cache-tree update-one %o %.*s\n", | |
404 | mode, entlen, path + baselen); | |
405 | #endif | |
406 | } | |
407 | ||
408 | if (repair) { | |
409 | struct object_id oid; | |
410 | hash_object_file(the_hash_algo, buffer.buf, buffer.len, | |
411 | tree_type, &oid); | |
412 | if (has_object_file_with_flags(&oid, OBJECT_INFO_SKIP_FETCH_OBJECT)) | |
413 | oidcpy(&it->oid, &oid); | |
414 | else | |
415 | to_invalidate = 1; | |
416 | } else if (dryrun) { | |
417 | hash_object_file(the_hash_algo, buffer.buf, buffer.len, | |
418 | tree_type, &it->oid); | |
419 | } else if (write_object_file(buffer.buf, buffer.len, tree_type, | |
420 | &it->oid)) { | |
421 | strbuf_release(&buffer); | |
422 | return -1; | |
423 | } | |
424 | ||
425 | strbuf_release(&buffer); | |
426 | it->entry_count = to_invalidate ? -1 : i - *skip_count; | |
427 | #if DEBUG_CACHE_TREE | |
428 | fprintf(stderr, "cache-tree update-one (%d ent, %d subtree) %s\n", | |
429 | it->entry_count, it->subtree_nr, | |
430 | oid_to_hex(&it->oid)); | |
431 | #endif | |
432 | return i; | |
433 | } | |
434 | ||
435 | int cache_tree_update(struct index_state *istate, int flags) | |
436 | { | |
437 | struct cache_tree *it = istate->cache_tree; | |
438 | struct cache_entry **cache = istate->cache; | |
439 | int entries = istate->cache_nr; | |
440 | int skip, i = verify_cache(cache, entries, flags); | |
441 | ||
442 | if (i) | |
443 | return i; | |
444 | trace_performance_enter(); | |
445 | i = update_one(it, cache, entries, "", 0, &skip, flags); | |
446 | trace_performance_leave("cache_tree_update"); | |
447 | if (i < 0) | |
448 | return i; | |
449 | istate->cache_changed |= CACHE_TREE_CHANGED; | |
450 | return 0; | |
451 | } | |
452 | ||
453 | static void write_one(struct strbuf *buffer, struct cache_tree *it, | |
454 | const char *path, int pathlen) | |
455 | { | |
456 | int i; | |
457 | ||
458 | /* One "cache-tree" entry consists of the following: | |
459 | * path (NUL terminated) | |
460 | * entry_count, subtree_nr ("%d %d\n") | |
461 | * tree-sha1 (missing if invalid) | |
462 | * subtree_nr "cache-tree" entries for subtrees. | |
463 | */ | |
464 | strbuf_grow(buffer, pathlen + 100); | |
465 | strbuf_add(buffer, path, pathlen); | |
466 | strbuf_addf(buffer, "%c%d %d\n", 0, it->entry_count, it->subtree_nr); | |
467 | ||
468 | #if DEBUG_CACHE_TREE | |
469 | if (0 <= it->entry_count) | |
470 | fprintf(stderr, "cache-tree <%.*s> (%d ent, %d subtree) %s\n", | |
471 | pathlen, path, it->entry_count, it->subtree_nr, | |
472 | oid_to_hex(&it->oid)); | |
473 | else | |
474 | fprintf(stderr, "cache-tree <%.*s> (%d subtree) invalid\n", | |
475 | pathlen, path, it->subtree_nr); | |
476 | #endif | |
477 | ||
478 | if (0 <= it->entry_count) { | |
479 | strbuf_add(buffer, it->oid.hash, the_hash_algo->rawsz); | |
480 | } | |
481 | for (i = 0; i < it->subtree_nr; i++) { | |
482 | struct cache_tree_sub *down = it->down[i]; | |
483 | if (i) { | |
484 | struct cache_tree_sub *prev = it->down[i-1]; | |
485 | if (subtree_name_cmp(down->name, down->namelen, | |
486 | prev->name, prev->namelen) <= 0) | |
487 | die("fatal - unsorted cache subtree"); | |
488 | } | |
489 | write_one(buffer, down->cache_tree, down->name, down->namelen); | |
490 | } | |
491 | } | |
492 | ||
493 | void cache_tree_write(struct strbuf *sb, struct cache_tree *root) | |
494 | { | |
495 | write_one(sb, root, "", 0); | |
496 | } | |
497 | ||
498 | static struct cache_tree *read_one(const char **buffer, unsigned long *size_p) | |
499 | { | |
500 | const char *buf = *buffer; | |
501 | unsigned long size = *size_p; | |
502 | const char *cp; | |
503 | char *ep; | |
504 | struct cache_tree *it; | |
505 | int i, subtree_nr; | |
506 | const unsigned rawsz = the_hash_algo->rawsz; | |
507 | ||
508 | it = NULL; | |
509 | /* skip name, but make sure name exists */ | |
510 | while (size && *buf) { | |
511 | size--; | |
512 | buf++; | |
513 | } | |
514 | if (!size) | |
515 | goto free_return; | |
516 | buf++; size--; | |
517 | it = cache_tree(); | |
518 | ||
519 | cp = buf; | |
520 | it->entry_count = strtol(cp, &ep, 10); | |
521 | if (cp == ep) | |
522 | goto free_return; | |
523 | cp = ep; | |
524 | subtree_nr = strtol(cp, &ep, 10); | |
525 | if (cp == ep) | |
526 | goto free_return; | |
527 | while (size && *buf && *buf != '\n') { | |
528 | size--; | |
529 | buf++; | |
530 | } | |
531 | if (!size) | |
532 | goto free_return; | |
533 | buf++; size--; | |
534 | if (0 <= it->entry_count) { | |
535 | if (size < rawsz) | |
536 | goto free_return; | |
537 | oidread(&it->oid, (const unsigned char *)buf); | |
538 | buf += rawsz; | |
539 | size -= rawsz; | |
540 | } | |
541 | ||
542 | #if DEBUG_CACHE_TREE | |
543 | if (0 <= it->entry_count) | |
544 | fprintf(stderr, "cache-tree <%s> (%d ent, %d subtree) %s\n", | |
545 | *buffer, it->entry_count, subtree_nr, | |
546 | oid_to_hex(&it->oid)); | |
547 | else | |
548 | fprintf(stderr, "cache-tree <%s> (%d subtrees) invalid\n", | |
549 | *buffer, subtree_nr); | |
550 | #endif | |
551 | ||
552 | /* | |
553 | * Just a heuristic -- we do not add directories that often but | |
554 | * we do not want to have to extend it immediately when we do, | |
555 | * hence +2. | |
556 | */ | |
557 | it->subtree_alloc = subtree_nr + 2; | |
558 | it->down = xcalloc(it->subtree_alloc, sizeof(struct cache_tree_sub *)); | |
559 | for (i = 0; i < subtree_nr; i++) { | |
560 | /* read each subtree */ | |
561 | struct cache_tree *sub; | |
562 | struct cache_tree_sub *subtree; | |
563 | const char *name = buf; | |
564 | ||
565 | sub = read_one(&buf, &size); | |
566 | if (!sub) | |
567 | goto free_return; | |
568 | subtree = cache_tree_sub(it, name); | |
569 | subtree->cache_tree = sub; | |
570 | } | |
571 | if (subtree_nr != it->subtree_nr) | |
572 | die("cache-tree: internal error"); | |
573 | *buffer = buf; | |
574 | *size_p = size; | |
575 | return it; | |
576 | ||
577 | free_return: | |
578 | cache_tree_free(&it); | |
579 | return NULL; | |
580 | } | |
581 | ||
582 | struct cache_tree *cache_tree_read(const char *buffer, unsigned long size) | |
583 | { | |
584 | if (buffer[0]) | |
585 | return NULL; /* not the whole tree */ | |
586 | return read_one(&buffer, &size); | |
587 | } | |
588 | ||
589 | static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path) | |
590 | { | |
591 | if (!it) | |
592 | return NULL; | |
593 | while (*path) { | |
594 | const char *slash; | |
595 | struct cache_tree_sub *sub; | |
596 | ||
597 | slash = strchrnul(path, '/'); | |
598 | /* | |
599 | * Between path and slash is the name of the subtree | |
600 | * to look for. | |
601 | */ | |
602 | sub = find_subtree(it, path, slash - path, 0); | |
603 | if (!sub) | |
604 | return NULL; | |
605 | it = sub->cache_tree; | |
606 | ||
607 | path = slash; | |
608 | while (*path == '/') | |
609 | path++; | |
610 | } | |
611 | return it; | |
612 | } | |
613 | ||
614 | static int write_index_as_tree_internal(struct object_id *oid, | |
615 | struct index_state *index_state, | |
616 | int cache_tree_valid, | |
617 | int flags, | |
618 | const char *prefix) | |
619 | { | |
620 | if (flags & WRITE_TREE_IGNORE_CACHE_TREE) { | |
621 | cache_tree_free(&index_state->cache_tree); | |
622 | cache_tree_valid = 0; | |
623 | } | |
624 | ||
625 | if (!index_state->cache_tree) | |
626 | index_state->cache_tree = cache_tree(); | |
627 | ||
628 | if (!cache_tree_valid && cache_tree_update(index_state, flags) < 0) | |
629 | return WRITE_TREE_UNMERGED_INDEX; | |
630 | ||
631 | if (prefix) { | |
632 | struct cache_tree *subtree; | |
633 | subtree = cache_tree_find(index_state->cache_tree, prefix); | |
634 | if (!subtree) | |
635 | return WRITE_TREE_PREFIX_ERROR; | |
636 | oidcpy(oid, &subtree->oid); | |
637 | } | |
638 | else | |
639 | oidcpy(oid, &index_state->cache_tree->oid); | |
640 | ||
641 | return 0; | |
642 | } | |
643 | ||
644 | struct tree* write_in_core_index_as_tree(struct repository *repo) { | |
645 | struct object_id o; | |
646 | int was_valid, ret; | |
647 | ||
648 | struct index_state *index_state = repo->index; | |
649 | was_valid = index_state->cache_tree && | |
650 | cache_tree_fully_valid(index_state->cache_tree); | |
651 | ||
652 | ret = write_index_as_tree_internal(&o, index_state, was_valid, 0, NULL); | |
653 | if (ret == WRITE_TREE_UNMERGED_INDEX) { | |
654 | int i; | |
655 | fprintf(stderr, "BUG: There are unmerged index entries:\n"); | |
656 | for (i = 0; i < index_state->cache_nr; i++) { | |
657 | const struct cache_entry *ce = index_state->cache[i]; | |
658 | if (ce_stage(ce)) | |
659 | fprintf(stderr, "BUG: %d %.*s\n", ce_stage(ce), | |
660 | (int)ce_namelen(ce), ce->name); | |
661 | } | |
662 | BUG("unmerged index entries when writing inmemory index"); | |
663 | } | |
664 | ||
665 | return lookup_tree(repo, &index_state->cache_tree->oid); | |
666 | } | |
667 | ||
668 | ||
669 | int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix) | |
670 | { | |
671 | int entries, was_valid; | |
672 | struct lock_file lock_file = LOCK_INIT; | |
673 | int ret; | |
674 | ||
675 | hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR); | |
676 | ||
677 | entries = read_index_from(index_state, index_path, get_git_dir()); | |
678 | if (entries < 0) { | |
679 | ret = WRITE_TREE_UNREADABLE_INDEX; | |
680 | goto out; | |
681 | } | |
682 | ||
683 | was_valid = !(flags & WRITE_TREE_IGNORE_CACHE_TREE) && | |
684 | index_state->cache_tree && | |
685 | cache_tree_fully_valid(index_state->cache_tree); | |
686 | ||
687 | ret = write_index_as_tree_internal(oid, index_state, was_valid, flags, | |
688 | prefix); | |
689 | if (!ret && !was_valid) { | |
690 | write_locked_index(index_state, &lock_file, COMMIT_LOCK); | |
691 | /* Not being able to write is fine -- we are only interested | |
692 | * in updating the cache-tree part, and if the next caller | |
693 | * ends up using the old index with unupdated cache-tree part | |
694 | * it misses the work we did here, but that is just a | |
695 | * performance penalty and not a big deal. | |
696 | */ | |
697 | } | |
698 | ||
699 | out: | |
700 | rollback_lock_file(&lock_file); | |
701 | return ret; | |
702 | } | |
703 | ||
704 | static void prime_cache_tree_rec(struct repository *r, | |
705 | struct cache_tree *it, | |
706 | struct tree *tree) | |
707 | { | |
708 | struct tree_desc desc; | |
709 | struct name_entry entry; | |
710 | int cnt; | |
711 | ||
712 | oidcpy(&it->oid, &tree->object.oid); | |
713 | init_tree_desc(&desc, tree->buffer, tree->size); | |
714 | cnt = 0; | |
715 | while (tree_entry(&desc, &entry)) { | |
716 | if (!S_ISDIR(entry.mode)) | |
717 | cnt++; | |
718 | else { | |
719 | struct cache_tree_sub *sub; | |
720 | struct tree *subtree = lookup_tree(r, &entry.oid); | |
721 | if (!subtree->object.parsed) | |
722 | parse_tree(subtree); | |
723 | sub = cache_tree_sub(it, entry.path); | |
724 | sub->cache_tree = cache_tree(); | |
725 | prime_cache_tree_rec(r, sub->cache_tree, subtree); | |
726 | cnt += sub->cache_tree->entry_count; | |
727 | } | |
728 | } | |
729 | it->entry_count = cnt; | |
730 | } | |
731 | ||
732 | void prime_cache_tree(struct repository *r, | |
733 | struct index_state *istate, | |
734 | struct tree *tree) | |
735 | { | |
736 | cache_tree_free(&istate->cache_tree); | |
737 | istate->cache_tree = cache_tree(); | |
738 | prime_cache_tree_rec(r, istate->cache_tree, tree); | |
739 | istate->cache_changed |= CACHE_TREE_CHANGED; | |
740 | } | |
741 | ||
742 | /* | |
743 | * find the cache_tree that corresponds to the current level without | |
744 | * exploding the full path into textual form. The root of the | |
745 | * cache tree is given as "root", and our current level is "info". | |
746 | * (1) When at root level, info->prev is NULL, so it is "root" itself. | |
747 | * (2) Otherwise, find the cache_tree that corresponds to one level | |
748 | * above us, and find ourselves in there. | |
749 | */ | |
750 | static struct cache_tree *find_cache_tree_from_traversal(struct cache_tree *root, | |
751 | struct traverse_info *info) | |
752 | { | |
753 | struct cache_tree *our_parent; | |
754 | ||
755 | if (!info->prev) | |
756 | return root; | |
757 | our_parent = find_cache_tree_from_traversal(root, info->prev); | |
758 | return cache_tree_find(our_parent, info->name); | |
759 | } | |
760 | ||
761 | int cache_tree_matches_traversal(struct cache_tree *root, | |
762 | struct name_entry *ent, | |
763 | struct traverse_info *info) | |
764 | { | |
765 | struct cache_tree *it; | |
766 | ||
767 | it = find_cache_tree_from_traversal(root, info); | |
768 | it = cache_tree_find(it, ent->path); | |
769 | if (it && it->entry_count > 0 && oideq(&ent->oid, &it->oid)) | |
770 | return it->entry_count; | |
771 | return 0; | |
772 | } | |
773 | ||
774 | static void verify_one(struct repository *r, | |
775 | struct index_state *istate, | |
776 | struct cache_tree *it, | |
777 | struct strbuf *path) | |
778 | { | |
779 | int i, pos, len = path->len; | |
780 | struct strbuf tree_buf = STRBUF_INIT; | |
781 | struct object_id new_oid; | |
782 | ||
783 | for (i = 0; i < it->subtree_nr; i++) { | |
784 | strbuf_addf(path, "%s/", it->down[i]->name); | |
785 | verify_one(r, istate, it->down[i]->cache_tree, path); | |
786 | strbuf_setlen(path, len); | |
787 | } | |
788 | ||
789 | if (it->entry_count < 0 || | |
790 | /* no verification on tests (t7003) that replace trees */ | |
791 | lookup_replace_object(r, &it->oid) != &it->oid) | |
792 | return; | |
793 | ||
794 | if (path->len) { | |
795 | pos = index_name_pos(istate, path->buf, path->len); | |
796 | pos = -pos - 1; | |
797 | } else { | |
798 | pos = 0; | |
799 | } | |
800 | ||
801 | i = 0; | |
802 | while (i < it->entry_count) { | |
803 | struct cache_entry *ce = istate->cache[pos + i]; | |
804 | const char *slash; | |
805 | struct cache_tree_sub *sub = NULL; | |
806 | const struct object_id *oid; | |
807 | const char *name; | |
808 | unsigned mode; | |
809 | int entlen; | |
810 | ||
811 | if (ce->ce_flags & (CE_STAGEMASK | CE_INTENT_TO_ADD | CE_REMOVE)) | |
812 | BUG("%s with flags 0x%x should not be in cache-tree", | |
813 | ce->name, ce->ce_flags); | |
814 | name = ce->name + path->len; | |
815 | slash = strchr(name, '/'); | |
816 | if (slash) { | |
817 | entlen = slash - name; | |
818 | sub = find_subtree(it, ce->name + path->len, entlen, 0); | |
819 | if (!sub || sub->cache_tree->entry_count < 0) | |
820 | BUG("bad subtree '%.*s'", entlen, name); | |
821 | oid = &sub->cache_tree->oid; | |
822 | mode = S_IFDIR; | |
823 | i += sub->cache_tree->entry_count; | |
824 | } else { | |
825 | oid = &ce->oid; | |
826 | mode = ce->ce_mode; | |
827 | entlen = ce_namelen(ce) - path->len; | |
828 | i++; | |
829 | } | |
830 | strbuf_addf(&tree_buf, "%o %.*s%c", mode, entlen, name, '\0'); | |
831 | strbuf_add(&tree_buf, oid->hash, r->hash_algo->rawsz); | |
832 | } | |
833 | hash_object_file(r->hash_algo, tree_buf.buf, tree_buf.len, tree_type, | |
834 | &new_oid); | |
835 | if (!oideq(&new_oid, &it->oid)) | |
836 | BUG("cache-tree for path %.*s does not match. " | |
837 | "Expected %s got %s", len, path->buf, | |
838 | oid_to_hex(&new_oid), oid_to_hex(&it->oid)); | |
839 | strbuf_setlen(path, len); | |
840 | strbuf_release(&tree_buf); | |
841 | } | |
842 | ||
843 | void cache_tree_verify(struct repository *r, struct index_state *istate) | |
844 | { | |
845 | struct strbuf path = STRBUF_INIT; | |
846 | ||
847 | if (!istate->cache_tree) | |
848 | return; | |
849 | verify_one(r, istate, istate->cache_tree, &path); | |
850 | strbuf_release(&path); | |
851 | } |