]> git.ipfire.org Git - thirdparty/git.git/blob - read-cache.c
Git 2.23.3
[thirdparty/git.git] / read-cache.c
1 /*
2 * GIT - The information manager from hell
3 *
4 * Copyright (C) Linus Torvalds, 2005
5 */
6 #include "cache.h"
7 #include "config.h"
8 #include "diff.h"
9 #include "diffcore.h"
10 #include "tempfile.h"
11 #include "lockfile.h"
12 #include "cache-tree.h"
13 #include "refs.h"
14 #include "dir.h"
15 #include "object-store.h"
16 #include "tree.h"
17 #include "commit.h"
18 #include "blob.h"
19 #include "resolve-undo.h"
20 #include "run-command.h"
21 #include "strbuf.h"
22 #include "varint.h"
23 #include "split-index.h"
24 #include "utf8.h"
25 #include "fsmonitor.h"
26 #include "thread-utils.h"
27 #include "progress.h"
28
29 /* Mask for the name length in ce_flags in the on-disk index */
30
31 #define CE_NAMEMASK (0x0fff)
32
33 /* Index extensions.
34 *
35 * The first letter should be 'A'..'Z' for extensions that are not
36 * necessary for a correct operation (i.e. optimization data).
37 * When new extensions are added that _needs_ to be understood in
38 * order to correctly interpret the index file, pick character that
39 * is outside the range, to cause the reader to abort.
40 */
41
42 #define CACHE_EXT(s) ( (s[0]<<24)|(s[1]<<16)|(s[2]<<8)|(s[3]) )
43 #define CACHE_EXT_TREE 0x54524545 /* "TREE" */
44 #define CACHE_EXT_RESOLVE_UNDO 0x52455543 /* "REUC" */
45 #define CACHE_EXT_LINK 0x6c696e6b /* "link" */
46 #define CACHE_EXT_UNTRACKED 0x554E5452 /* "UNTR" */
47 #define CACHE_EXT_FSMONITOR 0x46534D4E /* "FSMN" */
48 #define CACHE_EXT_ENDOFINDEXENTRIES 0x454F4945 /* "EOIE" */
49 #define CACHE_EXT_INDEXENTRYOFFSETTABLE 0x49454F54 /* "IEOT" */
50
51 /* changes that can be kept in $GIT_DIR/index (basically all extensions) */
52 #define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \
53 CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \
54 SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED | FSMONITOR_CHANGED)
55
56
57 /*
58 * This is an estimate of the pathname length in the index. We use
59 * this for V4 index files to guess the un-deltafied size of the index
60 * in memory because of pathname deltafication. This is not required
61 * for V2/V3 index formats because their pathnames are not compressed.
62 * If the initial amount of memory set aside is not sufficient, the
63 * mem pool will allocate extra memory.
64 */
65 #define CACHE_ENTRY_PATH_LENGTH 80
66
67 static inline struct cache_entry *mem_pool__ce_alloc(struct mem_pool *mem_pool, size_t len)
68 {
69 struct cache_entry *ce;
70 ce = mem_pool_alloc(mem_pool, cache_entry_size(len));
71 ce->mem_pool_allocated = 1;
72 return ce;
73 }
74
75 static inline struct cache_entry *mem_pool__ce_calloc(struct mem_pool *mem_pool, size_t len)
76 {
77 struct cache_entry * ce;
78 ce = mem_pool_calloc(mem_pool, 1, cache_entry_size(len));
79 ce->mem_pool_allocated = 1;
80 return ce;
81 }
82
83 static struct mem_pool *find_mem_pool(struct index_state *istate)
84 {
85 struct mem_pool **pool_ptr;
86
87 if (istate->split_index && istate->split_index->base)
88 pool_ptr = &istate->split_index->base->ce_mem_pool;
89 else
90 pool_ptr = &istate->ce_mem_pool;
91
92 if (!*pool_ptr)
93 mem_pool_init(pool_ptr, 0);
94
95 return *pool_ptr;
96 }
97
98 static const char *alternate_index_output;
99
100 static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
101 {
102 istate->cache[nr] = ce;
103 add_name_hash(istate, ce);
104 }
105
106 static void replace_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
107 {
108 struct cache_entry *old = istate->cache[nr];
109
110 replace_index_entry_in_base(istate, old, ce);
111 remove_name_hash(istate, old);
112 discard_cache_entry(old);
113 ce->ce_flags &= ~CE_HASHED;
114 set_index_entry(istate, nr, ce);
115 ce->ce_flags |= CE_UPDATE_IN_BASE;
116 mark_fsmonitor_invalid(istate, ce);
117 istate->cache_changed |= CE_ENTRY_CHANGED;
118 }
119
120 void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name)
121 {
122 struct cache_entry *old_entry = istate->cache[nr], *new_entry;
123 int namelen = strlen(new_name);
124
125 new_entry = make_empty_cache_entry(istate, namelen);
126 copy_cache_entry(new_entry, old_entry);
127 new_entry->ce_flags &= ~CE_HASHED;
128 new_entry->ce_namelen = namelen;
129 new_entry->index = 0;
130 memcpy(new_entry->name, new_name, namelen + 1);
131
132 cache_tree_invalidate_path(istate, old_entry->name);
133 untracked_cache_remove_from_index(istate, old_entry->name);
134 remove_index_entry_at(istate, nr);
135 add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
136 }
137
138 void fill_stat_data(struct stat_data *sd, struct stat *st)
139 {
140 sd->sd_ctime.sec = (unsigned int)st->st_ctime;
141 sd->sd_mtime.sec = (unsigned int)st->st_mtime;
142 sd->sd_ctime.nsec = ST_CTIME_NSEC(*st);
143 sd->sd_mtime.nsec = ST_MTIME_NSEC(*st);
144 sd->sd_dev = st->st_dev;
145 sd->sd_ino = st->st_ino;
146 sd->sd_uid = st->st_uid;
147 sd->sd_gid = st->st_gid;
148 sd->sd_size = st->st_size;
149 }
150
151 int match_stat_data(const struct stat_data *sd, struct stat *st)
152 {
153 int changed = 0;
154
155 if (sd->sd_mtime.sec != (unsigned int)st->st_mtime)
156 changed |= MTIME_CHANGED;
157 if (trust_ctime && check_stat &&
158 sd->sd_ctime.sec != (unsigned int)st->st_ctime)
159 changed |= CTIME_CHANGED;
160
161 #ifdef USE_NSEC
162 if (check_stat && sd->sd_mtime.nsec != ST_MTIME_NSEC(*st))
163 changed |= MTIME_CHANGED;
164 if (trust_ctime && check_stat &&
165 sd->sd_ctime.nsec != ST_CTIME_NSEC(*st))
166 changed |= CTIME_CHANGED;
167 #endif
168
169 if (check_stat) {
170 if (sd->sd_uid != (unsigned int) st->st_uid ||
171 sd->sd_gid != (unsigned int) st->st_gid)
172 changed |= OWNER_CHANGED;
173 if (sd->sd_ino != (unsigned int) st->st_ino)
174 changed |= INODE_CHANGED;
175 }
176
177 #ifdef USE_STDEV
178 /*
179 * st_dev breaks on network filesystems where different
180 * clients will have different views of what "device"
181 * the filesystem is on
182 */
183 if (check_stat && sd->sd_dev != (unsigned int) st->st_dev)
184 changed |= INODE_CHANGED;
185 #endif
186
187 if (sd->sd_size != (unsigned int) st->st_size)
188 changed |= DATA_CHANGED;
189
190 return changed;
191 }
192
193 /*
194 * This only updates the "non-critical" parts of the directory
195 * cache, ie the parts that aren't tracked by GIT, and only used
196 * to validate the cache.
197 */
198 void fill_stat_cache_info(struct index_state *istate, struct cache_entry *ce, struct stat *st)
199 {
200 fill_stat_data(&ce->ce_stat_data, st);
201
202 if (assume_unchanged)
203 ce->ce_flags |= CE_VALID;
204
205 if (S_ISREG(st->st_mode)) {
206 ce_mark_uptodate(ce);
207 mark_fsmonitor_valid(istate, ce);
208 }
209 }
210
211 static int ce_compare_data(struct index_state *istate,
212 const struct cache_entry *ce,
213 struct stat *st)
214 {
215 int match = -1;
216 int fd = git_open_cloexec(ce->name, O_RDONLY);
217
218 if (fd >= 0) {
219 struct object_id oid;
220 if (!index_fd(istate, &oid, fd, st, OBJ_BLOB, ce->name, 0))
221 match = !oideq(&oid, &ce->oid);
222 /* index_fd() closed the file descriptor already */
223 }
224 return match;
225 }
226
227 static int ce_compare_link(const struct cache_entry *ce, size_t expected_size)
228 {
229 int match = -1;
230 void *buffer;
231 unsigned long size;
232 enum object_type type;
233 struct strbuf sb = STRBUF_INIT;
234
235 if (strbuf_readlink(&sb, ce->name, expected_size))
236 return -1;
237
238 buffer = read_object_file(&ce->oid, &type, &size);
239 if (buffer) {
240 if (size == sb.len)
241 match = memcmp(buffer, sb.buf, size);
242 free(buffer);
243 }
244 strbuf_release(&sb);
245 return match;
246 }
247
248 static int ce_compare_gitlink(const struct cache_entry *ce)
249 {
250 struct object_id oid;
251
252 /*
253 * We don't actually require that the .git directory
254 * under GITLINK directory be a valid git directory. It
255 * might even be missing (in case nobody populated that
256 * sub-project).
257 *
258 * If so, we consider it always to match.
259 */
260 if (resolve_gitlink_ref(ce->name, "HEAD", &oid) < 0)
261 return 0;
262 return !oideq(&oid, &ce->oid);
263 }
264
265 static int ce_modified_check_fs(struct index_state *istate,
266 const struct cache_entry *ce,
267 struct stat *st)
268 {
269 switch (st->st_mode & S_IFMT) {
270 case S_IFREG:
271 if (ce_compare_data(istate, ce, st))
272 return DATA_CHANGED;
273 break;
274 case S_IFLNK:
275 if (ce_compare_link(ce, xsize_t(st->st_size)))
276 return DATA_CHANGED;
277 break;
278 case S_IFDIR:
279 if (S_ISGITLINK(ce->ce_mode))
280 return ce_compare_gitlink(ce) ? DATA_CHANGED : 0;
281 /* else fallthrough */
282 default:
283 return TYPE_CHANGED;
284 }
285 return 0;
286 }
287
288 static int ce_match_stat_basic(const struct cache_entry *ce, struct stat *st)
289 {
290 unsigned int changed = 0;
291
292 if (ce->ce_flags & CE_REMOVE)
293 return MODE_CHANGED | DATA_CHANGED | TYPE_CHANGED;
294
295 switch (ce->ce_mode & S_IFMT) {
296 case S_IFREG:
297 changed |= !S_ISREG(st->st_mode) ? TYPE_CHANGED : 0;
298 /* We consider only the owner x bit to be relevant for
299 * "mode changes"
300 */
301 if (trust_executable_bit &&
302 (0100 & (ce->ce_mode ^ st->st_mode)))
303 changed |= MODE_CHANGED;
304 break;
305 case S_IFLNK:
306 if (!S_ISLNK(st->st_mode) &&
307 (has_symlinks || !S_ISREG(st->st_mode)))
308 changed |= TYPE_CHANGED;
309 break;
310 case S_IFGITLINK:
311 /* We ignore most of the st_xxx fields for gitlinks */
312 if (!S_ISDIR(st->st_mode))
313 changed |= TYPE_CHANGED;
314 else if (ce_compare_gitlink(ce))
315 changed |= DATA_CHANGED;
316 return changed;
317 default:
318 BUG("unsupported ce_mode: %o", ce->ce_mode);
319 }
320
321 changed |= match_stat_data(&ce->ce_stat_data, st);
322
323 /* Racily smudged entry? */
324 if (!ce->ce_stat_data.sd_size) {
325 if (!is_empty_blob_sha1(ce->oid.hash))
326 changed |= DATA_CHANGED;
327 }
328
329 return changed;
330 }
331
332 static int is_racy_stat(const struct index_state *istate,
333 const struct stat_data *sd)
334 {
335 return (istate->timestamp.sec &&
336 #ifdef USE_NSEC
337 /* nanosecond timestamped files can also be racy! */
338 (istate->timestamp.sec < sd->sd_mtime.sec ||
339 (istate->timestamp.sec == sd->sd_mtime.sec &&
340 istate->timestamp.nsec <= sd->sd_mtime.nsec))
341 #else
342 istate->timestamp.sec <= sd->sd_mtime.sec
343 #endif
344 );
345 }
346
347 int is_racy_timestamp(const struct index_state *istate,
348 const struct cache_entry *ce)
349 {
350 return (!S_ISGITLINK(ce->ce_mode) &&
351 is_racy_stat(istate, &ce->ce_stat_data));
352 }
353
354 int match_stat_data_racy(const struct index_state *istate,
355 const struct stat_data *sd, struct stat *st)
356 {
357 if (is_racy_stat(istate, sd))
358 return MTIME_CHANGED;
359 return match_stat_data(sd, st);
360 }
361
362 int ie_match_stat(struct index_state *istate,
363 const struct cache_entry *ce, struct stat *st,
364 unsigned int options)
365 {
366 unsigned int changed;
367 int ignore_valid = options & CE_MATCH_IGNORE_VALID;
368 int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;
369 int assume_racy_is_modified = options & CE_MATCH_RACY_IS_DIRTY;
370 int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR;
371
372 if (!ignore_fsmonitor)
373 refresh_fsmonitor(istate);
374 /*
375 * If it's marked as always valid in the index, it's
376 * valid whatever the checked-out copy says.
377 *
378 * skip-worktree has the same effect with higher precedence
379 */
380 if (!ignore_skip_worktree && ce_skip_worktree(ce))
381 return 0;
382 if (!ignore_valid && (ce->ce_flags & CE_VALID))
383 return 0;
384 if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID))
385 return 0;
386
387 /*
388 * Intent-to-add entries have not been added, so the index entry
389 * by definition never matches what is in the work tree until it
390 * actually gets added.
391 */
392 if (ce_intent_to_add(ce))
393 return DATA_CHANGED | TYPE_CHANGED | MODE_CHANGED;
394
395 changed = ce_match_stat_basic(ce, st);
396
397 /*
398 * Within 1 second of this sequence:
399 * echo xyzzy >file && git-update-index --add file
400 * running this command:
401 * echo frotz >file
402 * would give a falsely clean cache entry. The mtime and
403 * length match the cache, and other stat fields do not change.
404 *
405 * We could detect this at update-index time (the cache entry
406 * being registered/updated records the same time as "now")
407 * and delay the return from git-update-index, but that would
408 * effectively mean we can make at most one commit per second,
409 * which is not acceptable. Instead, we check cache entries
410 * whose mtime are the same as the index file timestamp more
411 * carefully than others.
412 */
413 if (!changed && is_racy_timestamp(istate, ce)) {
414 if (assume_racy_is_modified)
415 changed |= DATA_CHANGED;
416 else
417 changed |= ce_modified_check_fs(istate, ce, st);
418 }
419
420 return changed;
421 }
422
423 int ie_modified(struct index_state *istate,
424 const struct cache_entry *ce,
425 struct stat *st, unsigned int options)
426 {
427 int changed, changed_fs;
428
429 changed = ie_match_stat(istate, ce, st, options);
430 if (!changed)
431 return 0;
432 /*
433 * If the mode or type has changed, there's no point in trying
434 * to refresh the entry - it's not going to match
435 */
436 if (changed & (MODE_CHANGED | TYPE_CHANGED))
437 return changed;
438
439 /*
440 * Immediately after read-tree or update-index --cacheinfo,
441 * the length field is zero, as we have never even read the
442 * lstat(2) information once, and we cannot trust DATA_CHANGED
443 * returned by ie_match_stat() which in turn was returned by
444 * ce_match_stat_basic() to signal that the filesize of the
445 * blob changed. We have to actually go to the filesystem to
446 * see if the contents match, and if so, should answer "unchanged".
447 *
448 * The logic does not apply to gitlinks, as ce_match_stat_basic()
449 * already has checked the actual HEAD from the filesystem in the
450 * subproject. If ie_match_stat() already said it is different,
451 * then we know it is.
452 */
453 if ((changed & DATA_CHANGED) &&
454 (S_ISGITLINK(ce->ce_mode) || ce->ce_stat_data.sd_size != 0))
455 return changed;
456
457 changed_fs = ce_modified_check_fs(istate, ce, st);
458 if (changed_fs)
459 return changed | changed_fs;
460 return 0;
461 }
462
463 int base_name_compare(const char *name1, int len1, int mode1,
464 const char *name2, int len2, int mode2)
465 {
466 unsigned char c1, c2;
467 int len = len1 < len2 ? len1 : len2;
468 int cmp;
469
470 cmp = memcmp(name1, name2, len);
471 if (cmp)
472 return cmp;
473 c1 = name1[len];
474 c2 = name2[len];
475 if (!c1 && S_ISDIR(mode1))
476 c1 = '/';
477 if (!c2 && S_ISDIR(mode2))
478 c2 = '/';
479 return (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0;
480 }
481
482 /*
483 * df_name_compare() is identical to base_name_compare(), except it
484 * compares conflicting directory/file entries as equal. Note that
485 * while a directory name compares as equal to a regular file, they
486 * then individually compare _differently_ to a filename that has
487 * a dot after the basename (because '\0' < '.' < '/').
488 *
489 * This is used by routines that want to traverse the git namespace
490 * but then handle conflicting entries together when possible.
491 */
492 int df_name_compare(const char *name1, int len1, int mode1,
493 const char *name2, int len2, int mode2)
494 {
495 int len = len1 < len2 ? len1 : len2, cmp;
496 unsigned char c1, c2;
497
498 cmp = memcmp(name1, name2, len);
499 if (cmp)
500 return cmp;
501 /* Directories and files compare equal (same length, same name) */
502 if (len1 == len2)
503 return 0;
504 c1 = name1[len];
505 if (!c1 && S_ISDIR(mode1))
506 c1 = '/';
507 c2 = name2[len];
508 if (!c2 && S_ISDIR(mode2))
509 c2 = '/';
510 if (c1 == '/' && !c2)
511 return 0;
512 if (c2 == '/' && !c1)
513 return 0;
514 return c1 - c2;
515 }
516
517 int name_compare(const char *name1, size_t len1, const char *name2, size_t len2)
518 {
519 size_t min_len = (len1 < len2) ? len1 : len2;
520 int cmp = memcmp(name1, name2, min_len);
521 if (cmp)
522 return cmp;
523 if (len1 < len2)
524 return -1;
525 if (len1 > len2)
526 return 1;
527 return 0;
528 }
529
530 int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2)
531 {
532 int cmp;
533
534 cmp = name_compare(name1, len1, name2, len2);
535 if (cmp)
536 return cmp;
537
538 if (stage1 < stage2)
539 return -1;
540 if (stage1 > stage2)
541 return 1;
542 return 0;
543 }
544
545 static int index_name_stage_pos(const struct index_state *istate, const char *name, int namelen, int stage)
546 {
547 int first, last;
548
549 first = 0;
550 last = istate->cache_nr;
551 while (last > first) {
552 int next = first + ((last - first) >> 1);
553 struct cache_entry *ce = istate->cache[next];
554 int cmp = cache_name_stage_compare(name, namelen, stage, ce->name, ce_namelen(ce), ce_stage(ce));
555 if (!cmp)
556 return next;
557 if (cmp < 0) {
558 last = next;
559 continue;
560 }
561 first = next+1;
562 }
563 return -first-1;
564 }
565
566 int index_name_pos(const struct index_state *istate, const char *name, int namelen)
567 {
568 return index_name_stage_pos(istate, name, namelen, 0);
569 }
570
571 int remove_index_entry_at(struct index_state *istate, int pos)
572 {
573 struct cache_entry *ce = istate->cache[pos];
574
575 record_resolve_undo(istate, ce);
576 remove_name_hash(istate, ce);
577 save_or_free_index_entry(istate, ce);
578 istate->cache_changed |= CE_ENTRY_REMOVED;
579 istate->cache_nr--;
580 if (pos >= istate->cache_nr)
581 return 0;
582 MOVE_ARRAY(istate->cache + pos, istate->cache + pos + 1,
583 istate->cache_nr - pos);
584 return 1;
585 }
586
587 /*
588 * Remove all cache entries marked for removal, that is where
589 * CE_REMOVE is set in ce_flags. This is much more effective than
590 * calling remove_index_entry_at() for each entry to be removed.
591 */
592 void remove_marked_cache_entries(struct index_state *istate, int invalidate)
593 {
594 struct cache_entry **ce_array = istate->cache;
595 unsigned int i, j;
596
597 for (i = j = 0; i < istate->cache_nr; i++) {
598 if (ce_array[i]->ce_flags & CE_REMOVE) {
599 if (invalidate) {
600 cache_tree_invalidate_path(istate,
601 ce_array[i]->name);
602 untracked_cache_remove_from_index(istate,
603 ce_array[i]->name);
604 }
605 remove_name_hash(istate, ce_array[i]);
606 save_or_free_index_entry(istate, ce_array[i]);
607 }
608 else
609 ce_array[j++] = ce_array[i];
610 }
611 if (j == istate->cache_nr)
612 return;
613 istate->cache_changed |= CE_ENTRY_REMOVED;
614 istate->cache_nr = j;
615 }
616
617 int remove_file_from_index(struct index_state *istate, const char *path)
618 {
619 int pos = index_name_pos(istate, path, strlen(path));
620 if (pos < 0)
621 pos = -pos-1;
622 cache_tree_invalidate_path(istate, path);
623 untracked_cache_remove_from_index(istate, path);
624 while (pos < istate->cache_nr && !strcmp(istate->cache[pos]->name, path))
625 remove_index_entry_at(istate, pos);
626 return 0;
627 }
628
629 static int compare_name(struct cache_entry *ce, const char *path, int namelen)
630 {
631 return namelen != ce_namelen(ce) || memcmp(path, ce->name, namelen);
632 }
633
634 static int index_name_pos_also_unmerged(struct index_state *istate,
635 const char *path, int namelen)
636 {
637 int pos = index_name_pos(istate, path, namelen);
638 struct cache_entry *ce;
639
640 if (pos >= 0)
641 return pos;
642
643 /* maybe unmerged? */
644 pos = -1 - pos;
645 if (pos >= istate->cache_nr ||
646 compare_name((ce = istate->cache[pos]), path, namelen))
647 return -1;
648
649 /* order of preference: stage 2, 1, 3 */
650 if (ce_stage(ce) == 1 && pos + 1 < istate->cache_nr &&
651 ce_stage((ce = istate->cache[pos + 1])) == 2 &&
652 !compare_name(ce, path, namelen))
653 pos++;
654 return pos;
655 }
656
657 static int different_name(struct cache_entry *ce, struct cache_entry *alias)
658 {
659 int len = ce_namelen(ce);
660 return ce_namelen(alias) != len || memcmp(ce->name, alias->name, len);
661 }
662
663 /*
664 * If we add a filename that aliases in the cache, we will use the
665 * name that we already have - but we don't want to update the same
666 * alias twice, because that implies that there were actually two
667 * different files with aliasing names!
668 *
669 * So we use the CE_ADDED flag to verify that the alias was an old
670 * one before we accept it as
671 */
672 static struct cache_entry *create_alias_ce(struct index_state *istate,
673 struct cache_entry *ce,
674 struct cache_entry *alias)
675 {
676 int len;
677 struct cache_entry *new_entry;
678
679 if (alias->ce_flags & CE_ADDED)
680 die(_("will not add file alias '%s' ('%s' already exists in index)"),
681 ce->name, alias->name);
682
683 /* Ok, create the new entry using the name of the existing alias */
684 len = ce_namelen(alias);
685 new_entry = make_empty_cache_entry(istate, len);
686 memcpy(new_entry->name, alias->name, len);
687 copy_cache_entry(new_entry, ce);
688 save_or_free_index_entry(istate, ce);
689 return new_entry;
690 }
691
692 void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
693 {
694 struct object_id oid;
695 if (write_object_file("", 0, blob_type, &oid))
696 die(_("cannot create an empty blob in the object database"));
697 oidcpy(&ce->oid, &oid);
698 }
699
700 int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)
701 {
702 int namelen, was_same;
703 mode_t st_mode = st->st_mode;
704 struct cache_entry *ce, *alias = NULL;
705 unsigned ce_option = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE|CE_MATCH_RACY_IS_DIRTY;
706 int verbose = flags & (ADD_CACHE_VERBOSE | ADD_CACHE_PRETEND);
707 int pretend = flags & ADD_CACHE_PRETEND;
708 int intent_only = flags & ADD_CACHE_INTENT;
709 int add_option = (ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE|
710 (intent_only ? ADD_CACHE_NEW_ONLY : 0));
711 int hash_flags = HASH_WRITE_OBJECT;
712 struct object_id oid;
713
714 if (flags & ADD_CACHE_RENORMALIZE)
715 hash_flags |= HASH_RENORMALIZE;
716
717 if (!S_ISREG(st_mode) && !S_ISLNK(st_mode) && !S_ISDIR(st_mode))
718 return error(_("%s: can only add regular files, symbolic links or git-directories"), path);
719
720 namelen = strlen(path);
721 if (S_ISDIR(st_mode)) {
722 if (resolve_gitlink_ref(path, "HEAD", &oid) < 0)
723 return error(_("'%s' does not have a commit checked out"), path);
724 while (namelen && path[namelen-1] == '/')
725 namelen--;
726 }
727 ce = make_empty_cache_entry(istate, namelen);
728 memcpy(ce->name, path, namelen);
729 ce->ce_namelen = namelen;
730 if (!intent_only)
731 fill_stat_cache_info(istate, ce, st);
732 else
733 ce->ce_flags |= CE_INTENT_TO_ADD;
734
735
736 if (trust_executable_bit && has_symlinks) {
737 ce->ce_mode = create_ce_mode(st_mode);
738 } else {
739 /* If there is an existing entry, pick the mode bits and type
740 * from it, otherwise assume unexecutable regular file.
741 */
742 struct cache_entry *ent;
743 int pos = index_name_pos_also_unmerged(istate, path, namelen);
744
745 ent = (0 <= pos) ? istate->cache[pos] : NULL;
746 ce->ce_mode = ce_mode_from_stat(ent, st_mode);
747 }
748
749 /* When core.ignorecase=true, determine if a directory of the same name but differing
750 * case already exists within the Git repository. If it does, ensure the directory
751 * case of the file being added to the repository matches (is folded into) the existing
752 * entry's directory case.
753 */
754 if (ignore_case) {
755 adjust_dirname_case(istate, ce->name);
756 }
757 if (!(flags & ADD_CACHE_RENORMALIZE)) {
758 alias = index_file_exists(istate, ce->name,
759 ce_namelen(ce), ignore_case);
760 if (alias &&
761 !ce_stage(alias) &&
762 !ie_match_stat(istate, alias, st, ce_option)) {
763 /* Nothing changed, really */
764 if (!S_ISGITLINK(alias->ce_mode))
765 ce_mark_uptodate(alias);
766 alias->ce_flags |= CE_ADDED;
767
768 discard_cache_entry(ce);
769 return 0;
770 }
771 }
772 if (!intent_only) {
773 if (index_path(istate, &ce->oid, path, st, hash_flags)) {
774 discard_cache_entry(ce);
775 return error(_("unable to index file '%s'"), path);
776 }
777 } else
778 set_object_name_for_intent_to_add_entry(ce);
779
780 if (ignore_case && alias && different_name(ce, alias))
781 ce = create_alias_ce(istate, ce, alias);
782 ce->ce_flags |= CE_ADDED;
783
784 /* It was suspected to be racily clean, but it turns out to be Ok */
785 was_same = (alias &&
786 !ce_stage(alias) &&
787 oideq(&alias->oid, &ce->oid) &&
788 ce->ce_mode == alias->ce_mode);
789
790 if (pretend)
791 discard_cache_entry(ce);
792 else if (add_index_entry(istate, ce, add_option)) {
793 discard_cache_entry(ce);
794 return error(_("unable to add '%s' to index"), path);
795 }
796 if (verbose && !was_same)
797 printf("add '%s'\n", path);
798 return 0;
799 }
800
801 int add_file_to_index(struct index_state *istate, const char *path, int flags)
802 {
803 struct stat st;
804 if (lstat(path, &st))
805 die_errno(_("unable to stat '%s'"), path);
806 return add_to_index(istate, path, &st, flags);
807 }
808
809 struct cache_entry *make_empty_cache_entry(struct index_state *istate, size_t len)
810 {
811 return mem_pool__ce_calloc(find_mem_pool(istate), len);
812 }
813
814 struct cache_entry *make_empty_transient_cache_entry(size_t len)
815 {
816 return xcalloc(1, cache_entry_size(len));
817 }
818
819 struct cache_entry *make_cache_entry(struct index_state *istate,
820 unsigned int mode,
821 const struct object_id *oid,
822 const char *path,
823 int stage,
824 unsigned int refresh_options)
825 {
826 struct cache_entry *ce, *ret;
827 int len;
828
829 if (!verify_path(path, mode)) {
830 error(_("invalid path '%s'"), path);
831 return NULL;
832 }
833
834 len = strlen(path);
835 ce = make_empty_cache_entry(istate, len);
836
837 oidcpy(&ce->oid, oid);
838 memcpy(ce->name, path, len);
839 ce->ce_flags = create_ce_flags(stage);
840 ce->ce_namelen = len;
841 ce->ce_mode = create_ce_mode(mode);
842
843 ret = refresh_cache_entry(istate, ce, refresh_options);
844 if (ret != ce)
845 discard_cache_entry(ce);
846 return ret;
847 }
848
849 struct cache_entry *make_transient_cache_entry(unsigned int mode, const struct object_id *oid,
850 const char *path, int stage)
851 {
852 struct cache_entry *ce;
853 int len;
854
855 if (!verify_path(path, mode)) {
856 error(_("invalid path '%s'"), path);
857 return NULL;
858 }
859
860 len = strlen(path);
861 ce = make_empty_transient_cache_entry(len);
862
863 oidcpy(&ce->oid, oid);
864 memcpy(ce->name, path, len);
865 ce->ce_flags = create_ce_flags(stage);
866 ce->ce_namelen = len;
867 ce->ce_mode = create_ce_mode(mode);
868
869 return ce;
870 }
871
872 /*
873 * Chmod an index entry with either +x or -x.
874 *
875 * Returns -1 if the chmod for the particular cache entry failed (if it's
876 * not a regular file), -2 if an invalid flip argument is passed in, 0
877 * otherwise.
878 */
879 int chmod_index_entry(struct index_state *istate, struct cache_entry *ce,
880 char flip)
881 {
882 if (!S_ISREG(ce->ce_mode))
883 return -1;
884 switch (flip) {
885 case '+':
886 ce->ce_mode |= 0111;
887 break;
888 case '-':
889 ce->ce_mode &= ~0111;
890 break;
891 default:
892 return -2;
893 }
894 cache_tree_invalidate_path(istate, ce->name);
895 ce->ce_flags |= CE_UPDATE_IN_BASE;
896 mark_fsmonitor_invalid(istate, ce);
897 istate->cache_changed |= CE_ENTRY_CHANGED;
898
899 return 0;
900 }
901
902 int ce_same_name(const struct cache_entry *a, const struct cache_entry *b)
903 {
904 int len = ce_namelen(a);
905 return ce_namelen(b) == len && !memcmp(a->name, b->name, len);
906 }
907
908 /*
909 * We fundamentally don't like some paths: we don't want
910 * dot or dot-dot anywhere, and for obvious reasons don't
911 * want to recurse into ".git" either.
912 *
913 * Also, we don't want double slashes or slashes at the
914 * end that can make pathnames ambiguous.
915 */
916 static int verify_dotfile(const char *rest, unsigned mode)
917 {
918 /*
919 * The first character was '.', but that
920 * has already been discarded, we now test
921 * the rest.
922 */
923
924 /* "." is not allowed */
925 if (*rest == '\0' || is_dir_sep(*rest))
926 return 0;
927
928 switch (*rest) {
929 /*
930 * ".git" followed by NUL or slash is bad. Note that we match
931 * case-insensitively here, even if ignore_case is not set.
932 * This outlaws ".GIT" everywhere out of an abundance of caution,
933 * since there's really no good reason to allow it.
934 *
935 * Once we've seen ".git", we can also find ".gitmodules", etc (also
936 * case-insensitively).
937 */
938 case 'g':
939 case 'G':
940 if (rest[1] != 'i' && rest[1] != 'I')
941 break;
942 if (rest[2] != 't' && rest[2] != 'T')
943 break;
944 if (rest[3] == '\0' || is_dir_sep(rest[3]))
945 return 0;
946 if (S_ISLNK(mode)) {
947 rest += 3;
948 if (skip_iprefix(rest, "modules", &rest) &&
949 (*rest == '\0' || is_dir_sep(*rest)))
950 return 0;
951 }
952 break;
953 case '.':
954 if (rest[1] == '\0' || is_dir_sep(rest[1]))
955 return 0;
956 }
957 return 1;
958 }
959
960 int verify_path(const char *path, unsigned mode)
961 {
962 char c;
963
964 if (has_dos_drive_prefix(path))
965 return 0;
966
967 if (!is_valid_path(path))
968 return 0;
969
970 goto inside;
971 for (;;) {
972 if (!c)
973 return 1;
974 if (is_dir_sep(c)) {
975 inside:
976 if (protect_hfs) {
977 if (is_hfs_dotgit(path))
978 return 0;
979 if (S_ISLNK(mode)) {
980 if (is_hfs_dotgitmodules(path))
981 return 0;
982 }
983 }
984 if (protect_ntfs) {
985 if (is_ntfs_dotgit(path))
986 return 0;
987 if (S_ISLNK(mode)) {
988 if (is_ntfs_dotgitmodules(path))
989 return 0;
990 }
991 }
992
993 c = *path++;
994 if ((c == '.' && !verify_dotfile(path, mode)) ||
995 is_dir_sep(c) || c == '\0')
996 return 0;
997 } else if (c == '\\' && protect_ntfs) {
998 if (is_ntfs_dotgit(path))
999 return 0;
1000 if (S_ISLNK(mode)) {
1001 if (is_ntfs_dotgitmodules(path))
1002 return 0;
1003 }
1004 }
1005
1006 c = *path++;
1007 }
1008 }
1009
1010 /*
1011 * Do we have another file that has the beginning components being a
1012 * proper superset of the name we're trying to add?
1013 */
1014 static int has_file_name(struct index_state *istate,
1015 const struct cache_entry *ce, int pos, int ok_to_replace)
1016 {
1017 int retval = 0;
1018 int len = ce_namelen(ce);
1019 int stage = ce_stage(ce);
1020 const char *name = ce->name;
1021
1022 while (pos < istate->cache_nr) {
1023 struct cache_entry *p = istate->cache[pos++];
1024
1025 if (len >= ce_namelen(p))
1026 break;
1027 if (memcmp(name, p->name, len))
1028 break;
1029 if (ce_stage(p) != stage)
1030 continue;
1031 if (p->name[len] != '/')
1032 continue;
1033 if (p->ce_flags & CE_REMOVE)
1034 continue;
1035 retval = -1;
1036 if (!ok_to_replace)
1037 break;
1038 remove_index_entry_at(istate, --pos);
1039 }
1040 return retval;
1041 }
1042
1043
1044 /*
1045 * Like strcmp(), but also return the offset of the first change.
1046 * If strings are equal, return the length.
1047 */
1048 int strcmp_offset(const char *s1, const char *s2, size_t *first_change)
1049 {
1050 size_t k;
1051
1052 if (!first_change)
1053 return strcmp(s1, s2);
1054
1055 for (k = 0; s1[k] == s2[k]; k++)
1056 if (s1[k] == '\0')
1057 break;
1058
1059 *first_change = k;
1060 return (unsigned char)s1[k] - (unsigned char)s2[k];
1061 }
1062
1063 /*
1064 * Do we have another file with a pathname that is a proper
1065 * subset of the name we're trying to add?
1066 *
1067 * That is, is there another file in the index with a path
1068 * that matches a sub-directory in the given entry?
1069 */
1070 static int has_dir_name(struct index_state *istate,
1071 const struct cache_entry *ce, int pos, int ok_to_replace)
1072 {
1073 int retval = 0;
1074 int stage = ce_stage(ce);
1075 const char *name = ce->name;
1076 const char *slash = name + ce_namelen(ce);
1077 size_t len_eq_last;
1078 int cmp_last = 0;
1079
1080 /*
1081 * We are frequently called during an iteration on a sorted
1082 * list of pathnames and while building a new index. Therefore,
1083 * there is a high probability that this entry will eventually
1084 * be appended to the index, rather than inserted in the middle.
1085 * If we can confirm that, we can avoid binary searches on the
1086 * components of the pathname.
1087 *
1088 * Compare the entry's full path with the last path in the index.
1089 */
1090 if (istate->cache_nr > 0) {
1091 cmp_last = strcmp_offset(name,
1092 istate->cache[istate->cache_nr - 1]->name,
1093 &len_eq_last);
1094 if (cmp_last > 0) {
1095 if (len_eq_last == 0) {
1096 /*
1097 * The entry sorts AFTER the last one in the
1098 * index and their paths have no common prefix,
1099 * so there cannot be a F/D conflict.
1100 */
1101 return retval;
1102 } else {
1103 /*
1104 * The entry sorts AFTER the last one in the
1105 * index, but has a common prefix. Fall through
1106 * to the loop below to disect the entry's path
1107 * and see where the difference is.
1108 */
1109 }
1110 } else if (cmp_last == 0) {
1111 /*
1112 * The entry exactly matches the last one in the
1113 * index, but because of multiple stage and CE_REMOVE
1114 * items, we fall through and let the regular search
1115 * code handle it.
1116 */
1117 }
1118 }
1119
1120 for (;;) {
1121 size_t len;
1122
1123 for (;;) {
1124 if (*--slash == '/')
1125 break;
1126 if (slash <= ce->name)
1127 return retval;
1128 }
1129 len = slash - name;
1130
1131 if (cmp_last > 0) {
1132 /*
1133 * (len + 1) is a directory boundary (including
1134 * the trailing slash). And since the loop is
1135 * decrementing "slash", the first iteration is
1136 * the longest directory prefix; subsequent
1137 * iterations consider parent directories.
1138 */
1139
1140 if (len + 1 <= len_eq_last) {
1141 /*
1142 * The directory prefix (including the trailing
1143 * slash) also appears as a prefix in the last
1144 * entry, so the remainder cannot collide (because
1145 * strcmp said the whole path was greater).
1146 *
1147 * EQ: last: xxx/A
1148 * this: xxx/B
1149 *
1150 * LT: last: xxx/file_A
1151 * this: xxx/file_B
1152 */
1153 return retval;
1154 }
1155
1156 if (len > len_eq_last) {
1157 /*
1158 * This part of the directory prefix (excluding
1159 * the trailing slash) is longer than the known
1160 * equal portions, so this sub-directory cannot
1161 * collide with a file.
1162 *
1163 * GT: last: xxxA
1164 * this: xxxB/file
1165 */
1166 return retval;
1167 }
1168
1169 if (istate->cache_nr > 0 &&
1170 ce_namelen(istate->cache[istate->cache_nr - 1]) > len) {
1171 /*
1172 * The directory prefix lines up with part of
1173 * a longer file or directory name, but sorts
1174 * after it, so this sub-directory cannot
1175 * collide with a file.
1176 *
1177 * last: xxx/yy-file (because '-' sorts before '/')
1178 * this: xxx/yy/abc
1179 */
1180 return retval;
1181 }
1182
1183 /*
1184 * This is a possible collision. Fall through and
1185 * let the regular search code handle it.
1186 *
1187 * last: xxx
1188 * this: xxx/file
1189 */
1190 }
1191
1192 pos = index_name_stage_pos(istate, name, len, stage);
1193 if (pos >= 0) {
1194 /*
1195 * Found one, but not so fast. This could
1196 * be a marker that says "I was here, but
1197 * I am being removed". Such an entry is
1198 * not a part of the resulting tree, and
1199 * it is Ok to have a directory at the same
1200 * path.
1201 */
1202 if (!(istate->cache[pos]->ce_flags & CE_REMOVE)) {
1203 retval = -1;
1204 if (!ok_to_replace)
1205 break;
1206 remove_index_entry_at(istate, pos);
1207 continue;
1208 }
1209 }
1210 else
1211 pos = -pos-1;
1212
1213 /*
1214 * Trivial optimization: if we find an entry that
1215 * already matches the sub-directory, then we know
1216 * we're ok, and we can exit.
1217 */
1218 while (pos < istate->cache_nr) {
1219 struct cache_entry *p = istate->cache[pos];
1220 if ((ce_namelen(p) <= len) ||
1221 (p->name[len] != '/') ||
1222 memcmp(p->name, name, len))
1223 break; /* not our subdirectory */
1224 if (ce_stage(p) == stage && !(p->ce_flags & CE_REMOVE))
1225 /*
1226 * p is at the same stage as our entry, and
1227 * is a subdirectory of what we are looking
1228 * at, so we cannot have conflicts at our
1229 * level or anything shorter.
1230 */
1231 return retval;
1232 pos++;
1233 }
1234 }
1235 return retval;
1236 }
1237
1238 /* We may be in a situation where we already have path/file and path
1239 * is being added, or we already have path and path/file is being
1240 * added. Either one would result in a nonsense tree that has path
1241 * twice when git-write-tree tries to write it out. Prevent it.
1242 *
1243 * If ok-to-replace is specified, we remove the conflicting entries
1244 * from the cache so the caller should recompute the insert position.
1245 * When this happens, we return non-zero.
1246 */
1247 static int check_file_directory_conflict(struct index_state *istate,
1248 const struct cache_entry *ce,
1249 int pos, int ok_to_replace)
1250 {
1251 int retval;
1252
1253 /*
1254 * When ce is an "I am going away" entry, we allow it to be added
1255 */
1256 if (ce->ce_flags & CE_REMOVE)
1257 return 0;
1258
1259 /*
1260 * We check if the path is a sub-path of a subsequent pathname
1261 * first, since removing those will not change the position
1262 * in the array.
1263 */
1264 retval = has_file_name(istate, ce, pos, ok_to_replace);
1265
1266 /*
1267 * Then check if the path might have a clashing sub-directory
1268 * before it.
1269 */
1270 return retval + has_dir_name(istate, ce, pos, ok_to_replace);
1271 }
1272
1273 static int add_index_entry_with_check(struct index_state *istate, struct cache_entry *ce, int option)
1274 {
1275 int pos;
1276 int ok_to_add = option & ADD_CACHE_OK_TO_ADD;
1277 int ok_to_replace = option & ADD_CACHE_OK_TO_REPLACE;
1278 int skip_df_check = option & ADD_CACHE_SKIP_DFCHECK;
1279 int new_only = option & ADD_CACHE_NEW_ONLY;
1280
1281 if (!(option & ADD_CACHE_KEEP_CACHE_TREE))
1282 cache_tree_invalidate_path(istate, ce->name);
1283
1284 /*
1285 * If this entry's path sorts after the last entry in the index,
1286 * we can avoid searching for it.
1287 */
1288 if (istate->cache_nr > 0 &&
1289 strcmp(ce->name, istate->cache[istate->cache_nr - 1]->name) > 0)
1290 pos = -istate->cache_nr - 1;
1291 else
1292 pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce));
1293
1294 /* existing match? Just replace it. */
1295 if (pos >= 0) {
1296 if (!new_only)
1297 replace_index_entry(istate, pos, ce);
1298 return 0;
1299 }
1300 pos = -pos-1;
1301
1302 if (!(option & ADD_CACHE_KEEP_CACHE_TREE))
1303 untracked_cache_add_to_index(istate, ce->name);
1304
1305 /*
1306 * Inserting a merged entry ("stage 0") into the index
1307 * will always replace all non-merged entries..
1308 */
1309 if (pos < istate->cache_nr && ce_stage(ce) == 0) {
1310 while (ce_same_name(istate->cache[pos], ce)) {
1311 ok_to_add = 1;
1312 if (!remove_index_entry_at(istate, pos))
1313 break;
1314 }
1315 }
1316
1317 if (!ok_to_add)
1318 return -1;
1319 if (!verify_path(ce->name, ce->ce_mode))
1320 return error(_("invalid path '%s'"), ce->name);
1321
1322 if (!skip_df_check &&
1323 check_file_directory_conflict(istate, ce, pos, ok_to_replace)) {
1324 if (!ok_to_replace)
1325 return error(_("'%s' appears as both a file and as a directory"),
1326 ce->name);
1327 pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce));
1328 pos = -pos-1;
1329 }
1330 return pos + 1;
1331 }
1332
1333 int add_index_entry(struct index_state *istate, struct cache_entry *ce, int option)
1334 {
1335 int pos;
1336
1337 if (option & ADD_CACHE_JUST_APPEND)
1338 pos = istate->cache_nr;
1339 else {
1340 int ret;
1341 ret = add_index_entry_with_check(istate, ce, option);
1342 if (ret <= 0)
1343 return ret;
1344 pos = ret - 1;
1345 }
1346
1347 /* Make sure the array is big enough .. */
1348 ALLOC_GROW(istate->cache, istate->cache_nr + 1, istate->cache_alloc);
1349
1350 /* Add it in.. */
1351 istate->cache_nr++;
1352 if (istate->cache_nr > pos + 1)
1353 MOVE_ARRAY(istate->cache + pos + 1, istate->cache + pos,
1354 istate->cache_nr - pos - 1);
1355 set_index_entry(istate, pos, ce);
1356 istate->cache_changed |= CE_ENTRY_ADDED;
1357 return 0;
1358 }
1359
1360 /*
1361 * "refresh" does not calculate a new sha1 file or bring the
1362 * cache up-to-date for mode/content changes. But what it
1363 * _does_ do is to "re-match" the stat information of a file
1364 * with the cache, so that you can refresh the cache for a
1365 * file that hasn't been changed but where the stat entry is
1366 * out of date.
1367 *
1368 * For example, you'd want to do this after doing a "git-read-tree",
1369 * to link up the stat cache details with the proper files.
1370 */
1371 static struct cache_entry *refresh_cache_ent(struct index_state *istate,
1372 struct cache_entry *ce,
1373 unsigned int options, int *err,
1374 int *changed_ret)
1375 {
1376 struct stat st;
1377 struct cache_entry *updated;
1378 int changed;
1379 int refresh = options & CE_MATCH_REFRESH;
1380 int ignore_valid = options & CE_MATCH_IGNORE_VALID;
1381 int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;
1382 int ignore_missing = options & CE_MATCH_IGNORE_MISSING;
1383 int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR;
1384
1385 if (!refresh || ce_uptodate(ce))
1386 return ce;
1387
1388 if (!ignore_fsmonitor)
1389 refresh_fsmonitor(istate);
1390 /*
1391 * CE_VALID or CE_SKIP_WORKTREE means the user promised us
1392 * that the change to the work tree does not matter and told
1393 * us not to worry.
1394 */
1395 if (!ignore_skip_worktree && ce_skip_worktree(ce)) {
1396 ce_mark_uptodate(ce);
1397 return ce;
1398 }
1399 if (!ignore_valid && (ce->ce_flags & CE_VALID)) {
1400 ce_mark_uptodate(ce);
1401 return ce;
1402 }
1403 if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID)) {
1404 ce_mark_uptodate(ce);
1405 return ce;
1406 }
1407
1408 if (has_symlink_leading_path(ce->name, ce_namelen(ce))) {
1409 if (ignore_missing)
1410 return ce;
1411 if (err)
1412 *err = ENOENT;
1413 return NULL;
1414 }
1415
1416 if (lstat(ce->name, &st) < 0) {
1417 if (ignore_missing && errno == ENOENT)
1418 return ce;
1419 if (err)
1420 *err = errno;
1421 return NULL;
1422 }
1423
1424 changed = ie_match_stat(istate, ce, &st, options);
1425 if (changed_ret)
1426 *changed_ret = changed;
1427 if (!changed) {
1428 /*
1429 * The path is unchanged. If we were told to ignore
1430 * valid bit, then we did the actual stat check and
1431 * found that the entry is unmodified. If the entry
1432 * is not marked VALID, this is the place to mark it
1433 * valid again, under "assume unchanged" mode.
1434 */
1435 if (ignore_valid && assume_unchanged &&
1436 !(ce->ce_flags & CE_VALID))
1437 ; /* mark this one VALID again */
1438 else {
1439 /*
1440 * We do not mark the index itself "modified"
1441 * because CE_UPTODATE flag is in-core only;
1442 * we are not going to write this change out.
1443 */
1444 if (!S_ISGITLINK(ce->ce_mode)) {
1445 ce_mark_uptodate(ce);
1446 mark_fsmonitor_valid(istate, ce);
1447 }
1448 return ce;
1449 }
1450 }
1451
1452 if (ie_modified(istate, ce, &st, options)) {
1453 if (err)
1454 *err = EINVAL;
1455 return NULL;
1456 }
1457
1458 updated = make_empty_cache_entry(istate, ce_namelen(ce));
1459 copy_cache_entry(updated, ce);
1460 memcpy(updated->name, ce->name, ce->ce_namelen + 1);
1461 fill_stat_cache_info(istate, updated, &st);
1462 /*
1463 * If ignore_valid is not set, we should leave CE_VALID bit
1464 * alone. Otherwise, paths marked with --no-assume-unchanged
1465 * (i.e. things to be edited) will reacquire CE_VALID bit
1466 * automatically, which is not really what we want.
1467 */
1468 if (!ignore_valid && assume_unchanged &&
1469 !(ce->ce_flags & CE_VALID))
1470 updated->ce_flags &= ~CE_VALID;
1471
1472 /* istate->cache_changed is updated in the caller */
1473 return updated;
1474 }
1475
1476 static void show_file(const char * fmt, const char * name, int in_porcelain,
1477 int * first, const char *header_msg)
1478 {
1479 if (in_porcelain && *first && header_msg) {
1480 printf("%s\n", header_msg);
1481 *first = 0;
1482 }
1483 printf(fmt, name);
1484 }
1485
1486 int refresh_index(struct index_state *istate, unsigned int flags,
1487 const struct pathspec *pathspec,
1488 char *seen, const char *header_msg)
1489 {
1490 int i;
1491 int has_errors = 0;
1492 int really = (flags & REFRESH_REALLY) != 0;
1493 int allow_unmerged = (flags & REFRESH_UNMERGED) != 0;
1494 int quiet = (flags & REFRESH_QUIET) != 0;
1495 int not_new = (flags & REFRESH_IGNORE_MISSING) != 0;
1496 int ignore_submodules = (flags & REFRESH_IGNORE_SUBMODULES) != 0;
1497 int first = 1;
1498 int in_porcelain = (flags & REFRESH_IN_PORCELAIN);
1499 unsigned int options = (CE_MATCH_REFRESH |
1500 (really ? CE_MATCH_IGNORE_VALID : 0) |
1501 (not_new ? CE_MATCH_IGNORE_MISSING : 0));
1502 const char *modified_fmt;
1503 const char *deleted_fmt;
1504 const char *typechange_fmt;
1505 const char *added_fmt;
1506 const char *unmerged_fmt;
1507 struct progress *progress = NULL;
1508
1509 if (flags & REFRESH_PROGRESS && isatty(2))
1510 progress = start_delayed_progress(_("Refresh index"),
1511 istate->cache_nr);
1512
1513 trace_performance_enter();
1514 modified_fmt = in_porcelain ? "M\t%s\n" : "%s: needs update\n";
1515 deleted_fmt = in_porcelain ? "D\t%s\n" : "%s: needs update\n";
1516 typechange_fmt = in_porcelain ? "T\t%s\n" : "%s: needs update\n";
1517 added_fmt = in_porcelain ? "A\t%s\n" : "%s: needs update\n";
1518 unmerged_fmt = in_porcelain ? "U\t%s\n" : "%s: needs merge\n";
1519 /*
1520 * Use the multi-threaded preload_index() to refresh most of the
1521 * cache entries quickly then in the single threaded loop below,
1522 * we only have to do the special cases that are left.
1523 */
1524 preload_index(istate, pathspec, 0);
1525 for (i = 0; i < istate->cache_nr; i++) {
1526 struct cache_entry *ce, *new_entry;
1527 int cache_errno = 0;
1528 int changed = 0;
1529 int filtered = 0;
1530
1531 ce = istate->cache[i];
1532 if (ignore_submodules && S_ISGITLINK(ce->ce_mode))
1533 continue;
1534
1535 if (pathspec && !ce_path_match(istate, ce, pathspec, seen))
1536 filtered = 1;
1537
1538 if (ce_stage(ce)) {
1539 while ((i < istate->cache_nr) &&
1540 ! strcmp(istate->cache[i]->name, ce->name))
1541 i++;
1542 i--;
1543 if (allow_unmerged)
1544 continue;
1545 if (!filtered)
1546 show_file(unmerged_fmt, ce->name, in_porcelain,
1547 &first, header_msg);
1548 has_errors = 1;
1549 continue;
1550 }
1551
1552 if (filtered)
1553 continue;
1554
1555 new_entry = refresh_cache_ent(istate, ce, options, &cache_errno, &changed);
1556 if (new_entry == ce)
1557 continue;
1558 if (progress)
1559 display_progress(progress, i);
1560 if (!new_entry) {
1561 const char *fmt;
1562
1563 if (really && cache_errno == EINVAL) {
1564 /* If we are doing --really-refresh that
1565 * means the index is not valid anymore.
1566 */
1567 ce->ce_flags &= ~CE_VALID;
1568 ce->ce_flags |= CE_UPDATE_IN_BASE;
1569 mark_fsmonitor_invalid(istate, ce);
1570 istate->cache_changed |= CE_ENTRY_CHANGED;
1571 }
1572 if (quiet)
1573 continue;
1574
1575 if (cache_errno == ENOENT)
1576 fmt = deleted_fmt;
1577 else if (ce_intent_to_add(ce))
1578 fmt = added_fmt; /* must be before other checks */
1579 else if (changed & TYPE_CHANGED)
1580 fmt = typechange_fmt;
1581 else
1582 fmt = modified_fmt;
1583 show_file(fmt,
1584 ce->name, in_porcelain, &first, header_msg);
1585 has_errors = 1;
1586 continue;
1587 }
1588
1589 replace_index_entry(istate, i, new_entry);
1590 }
1591 if (progress) {
1592 display_progress(progress, istate->cache_nr);
1593 stop_progress(&progress);
1594 }
1595 trace_performance_leave("refresh index");
1596 return has_errors;
1597 }
1598
1599 struct cache_entry *refresh_cache_entry(struct index_state *istate,
1600 struct cache_entry *ce,
1601 unsigned int options)
1602 {
1603 return refresh_cache_ent(istate, ce, options, NULL, NULL);
1604 }
1605
1606
1607 /*****************************************************************
1608 * Index File I/O
1609 *****************************************************************/
1610
1611 #define INDEX_FORMAT_DEFAULT 3
1612
1613 static unsigned int get_index_format_default(void)
1614 {
1615 char *envversion = getenv("GIT_INDEX_VERSION");
1616 char *endp;
1617 int value;
1618 unsigned int version = INDEX_FORMAT_DEFAULT;
1619
1620 if (!envversion) {
1621 if (!git_config_get_int("index.version", &value))
1622 version = value;
1623 if (version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) {
1624 warning(_("index.version set, but the value is invalid.\n"
1625 "Using version %i"), INDEX_FORMAT_DEFAULT);
1626 return INDEX_FORMAT_DEFAULT;
1627 }
1628 return version;
1629 }
1630
1631 version = strtoul(envversion, &endp, 10);
1632 if (*endp ||
1633 version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) {
1634 warning(_("GIT_INDEX_VERSION set, but the value is invalid.\n"
1635 "Using version %i"), INDEX_FORMAT_DEFAULT);
1636 version = INDEX_FORMAT_DEFAULT;
1637 }
1638 return version;
1639 }
1640
1641 /*
1642 * dev/ino/uid/gid/size are also just tracked to the low 32 bits
1643 * Again - this is just a (very strong in practice) heuristic that
1644 * the inode hasn't changed.
1645 *
1646 * We save the fields in big-endian order to allow using the
1647 * index file over NFS transparently.
1648 */
1649 struct ondisk_cache_entry {
1650 struct cache_time ctime;
1651 struct cache_time mtime;
1652 uint32_t dev;
1653 uint32_t ino;
1654 uint32_t mode;
1655 uint32_t uid;
1656 uint32_t gid;
1657 uint32_t size;
1658 /*
1659 * unsigned char hash[hashsz];
1660 * uint16_t flags;
1661 * if (flags & CE_EXTENDED)
1662 * uint16_t flags2;
1663 */
1664 unsigned char data[GIT_MAX_RAWSZ + 2 * sizeof(uint16_t)];
1665 char name[FLEX_ARRAY];
1666 };
1667
1668 /* These are only used for v3 or lower */
1669 #define align_padding_size(size, len) ((size + (len) + 8) & ~7) - (size + len)
1670 #define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,data) + (len) + 8) & ~7)
1671 #define ondisk_cache_entry_size(len) align_flex_name(ondisk_cache_entry,len)
1672 #define ondisk_data_size(flags, len) (the_hash_algo->rawsz + \
1673 ((flags & CE_EXTENDED) ? 2 : 1) * sizeof(uint16_t) + len)
1674 #define ondisk_data_size_max(len) (ondisk_data_size(CE_EXTENDED, len))
1675 #define ondisk_ce_size(ce) (ondisk_cache_entry_size(ondisk_data_size((ce)->ce_flags, ce_namelen(ce))))
1676
1677 /* Allow fsck to force verification of the index checksum. */
1678 int verify_index_checksum;
1679
1680 /* Allow fsck to force verification of the cache entry order. */
1681 int verify_ce_order;
1682
1683 static int verify_hdr(const struct cache_header *hdr, unsigned long size)
1684 {
1685 git_hash_ctx c;
1686 unsigned char hash[GIT_MAX_RAWSZ];
1687 int hdr_version;
1688
1689 if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
1690 return error(_("bad signature 0x%08x"), hdr->hdr_signature);
1691 hdr_version = ntohl(hdr->hdr_version);
1692 if (hdr_version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < hdr_version)
1693 return error(_("bad index version %d"), hdr_version);
1694
1695 if (!verify_index_checksum)
1696 return 0;
1697
1698 the_hash_algo->init_fn(&c);
1699 the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
1700 the_hash_algo->final_fn(hash, &c);
1701 if (!hasheq(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
1702 return error(_("bad index file sha1 signature"));
1703 return 0;
1704 }
1705
1706 static int read_index_extension(struct index_state *istate,
1707 const char *ext, const char *data, unsigned long sz)
1708 {
1709 switch (CACHE_EXT(ext)) {
1710 case CACHE_EXT_TREE:
1711 istate->cache_tree = cache_tree_read(data, sz);
1712 break;
1713 case CACHE_EXT_RESOLVE_UNDO:
1714 istate->resolve_undo = resolve_undo_read(data, sz);
1715 break;
1716 case CACHE_EXT_LINK:
1717 if (read_link_extension(istate, data, sz))
1718 return -1;
1719 break;
1720 case CACHE_EXT_UNTRACKED:
1721 istate->untracked = read_untracked_extension(data, sz);
1722 break;
1723 case CACHE_EXT_FSMONITOR:
1724 read_fsmonitor_extension(istate, data, sz);
1725 break;
1726 case CACHE_EXT_ENDOFINDEXENTRIES:
1727 case CACHE_EXT_INDEXENTRYOFFSETTABLE:
1728 /* already handled in do_read_index() */
1729 break;
1730 default:
1731 if (*ext < 'A' || 'Z' < *ext)
1732 return error(_("index uses %.4s extension, which we do not understand"),
1733 ext);
1734 fprintf_ln(stderr, _("ignoring %.4s extension"), ext);
1735 break;
1736 }
1737 return 0;
1738 }
1739
1740 static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
1741 unsigned int version,
1742 struct ondisk_cache_entry *ondisk,
1743 unsigned long *ent_size,
1744 const struct cache_entry *previous_ce)
1745 {
1746 struct cache_entry *ce;
1747 size_t len;
1748 const char *name;
1749 const unsigned hashsz = the_hash_algo->rawsz;
1750 const uint16_t *flagsp = (const uint16_t *)(ondisk->data + hashsz);
1751 unsigned int flags;
1752 size_t copy_len = 0;
1753 /*
1754 * Adjacent cache entries tend to share the leading paths, so it makes
1755 * sense to only store the differences in later entries. In the v4
1756 * on-disk format of the index, each on-disk cache entry stores the
1757 * number of bytes to be stripped from the end of the previous name,
1758 * and the bytes to append to the result, to come up with its name.
1759 */
1760 int expand_name_field = version == 4;
1761
1762 /* On-disk flags are just 16 bits */
1763 flags = get_be16(flagsp);
1764 len = flags & CE_NAMEMASK;
1765
1766 if (flags & CE_EXTENDED) {
1767 int extended_flags;
1768 extended_flags = get_be16(flagsp + 1) << 16;
1769 /* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
1770 if (extended_flags & ~CE_EXTENDED_FLAGS)
1771 die(_("unknown index entry format 0x%08x"), extended_flags);
1772 flags |= extended_flags;
1773 name = (const char *)(flagsp + 2);
1774 }
1775 else
1776 name = (const char *)(flagsp + 1);
1777
1778 if (expand_name_field) {
1779 const unsigned char *cp = (const unsigned char *)name;
1780 size_t strip_len, previous_len;
1781
1782 /* If we're at the begining of a block, ignore the previous name */
1783 strip_len = decode_varint(&cp);
1784 if (previous_ce) {
1785 previous_len = previous_ce->ce_namelen;
1786 if (previous_len < strip_len)
1787 die(_("malformed name field in the index, near path '%s'"),
1788 previous_ce->name);
1789 copy_len = previous_len - strip_len;
1790 }
1791 name = (const char *)cp;
1792 }
1793
1794 if (len == CE_NAMEMASK) {
1795 len = strlen(name);
1796 if (expand_name_field)
1797 len += copy_len;
1798 }
1799
1800 ce = mem_pool__ce_alloc(ce_mem_pool, len);
1801
1802 ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec);
1803 ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec);
1804 ce->ce_stat_data.sd_ctime.nsec = get_be32(&ondisk->ctime.nsec);
1805 ce->ce_stat_data.sd_mtime.nsec = get_be32(&ondisk->mtime.nsec);
1806 ce->ce_stat_data.sd_dev = get_be32(&ondisk->dev);
1807 ce->ce_stat_data.sd_ino = get_be32(&ondisk->ino);
1808 ce->ce_mode = get_be32(&ondisk->mode);
1809 ce->ce_stat_data.sd_uid = get_be32(&ondisk->uid);
1810 ce->ce_stat_data.sd_gid = get_be32(&ondisk->gid);
1811 ce->ce_stat_data.sd_size = get_be32(&ondisk->size);
1812 ce->ce_flags = flags & ~CE_NAMEMASK;
1813 ce->ce_namelen = len;
1814 ce->index = 0;
1815 hashcpy(ce->oid.hash, ondisk->data);
1816 memcpy(ce->name, name, len);
1817 ce->name[len] = '\0';
1818
1819 if (expand_name_field) {
1820 if (copy_len)
1821 memcpy(ce->name, previous_ce->name, copy_len);
1822 memcpy(ce->name + copy_len, name, len + 1 - copy_len);
1823 *ent_size = (name - ((char *)ondisk)) + len + 1 - copy_len;
1824 } else {
1825 memcpy(ce->name, name, len + 1);
1826 *ent_size = ondisk_ce_size(ce);
1827 }
1828 return ce;
1829 }
1830
1831 static void check_ce_order(struct index_state *istate)
1832 {
1833 unsigned int i;
1834
1835 if (!verify_ce_order)
1836 return;
1837
1838 for (i = 1; i < istate->cache_nr; i++) {
1839 struct cache_entry *ce = istate->cache[i - 1];
1840 struct cache_entry *next_ce = istate->cache[i];
1841 int name_compare = strcmp(ce->name, next_ce->name);
1842
1843 if (0 < name_compare)
1844 die(_("unordered stage entries in index"));
1845 if (!name_compare) {
1846 if (!ce_stage(ce))
1847 die(_("multiple stage entries for merged file '%s'"),
1848 ce->name);
1849 if (ce_stage(ce) > ce_stage(next_ce))
1850 die(_("unordered stage entries for '%s'"),
1851 ce->name);
1852 }
1853 }
1854 }
1855
1856 static void tweak_untracked_cache(struct index_state *istate)
1857 {
1858 switch (git_config_get_untracked_cache()) {
1859 case -1: /* keep: do nothing */
1860 break;
1861 case 0: /* false */
1862 remove_untracked_cache(istate);
1863 break;
1864 case 1: /* true */
1865 add_untracked_cache(istate);
1866 break;
1867 default: /* unknown value: do nothing */
1868 break;
1869 }
1870 }
1871
1872 static void tweak_split_index(struct index_state *istate)
1873 {
1874 switch (git_config_get_split_index()) {
1875 case -1: /* unset: do nothing */
1876 break;
1877 case 0: /* false */
1878 remove_split_index(istate);
1879 break;
1880 case 1: /* true */
1881 add_split_index(istate);
1882 break;
1883 default: /* unknown value: do nothing */
1884 break;
1885 }
1886 }
1887
1888 static void post_read_index_from(struct index_state *istate)
1889 {
1890 check_ce_order(istate);
1891 tweak_untracked_cache(istate);
1892 tweak_split_index(istate);
1893 tweak_fsmonitor(istate);
1894 }
1895
1896 static size_t estimate_cache_size_from_compressed(unsigned int entries)
1897 {
1898 return entries * (sizeof(struct cache_entry) + CACHE_ENTRY_PATH_LENGTH);
1899 }
1900
1901 static size_t estimate_cache_size(size_t ondisk_size, unsigned int entries)
1902 {
1903 long per_entry = sizeof(struct cache_entry) - sizeof(struct ondisk_cache_entry);
1904
1905 /*
1906 * Account for potential alignment differences.
1907 */
1908 per_entry += align_padding_size(sizeof(struct cache_entry), -sizeof(struct ondisk_cache_entry));
1909 return ondisk_size + entries * per_entry;
1910 }
1911
1912 struct index_entry_offset
1913 {
1914 /* starting byte offset into index file, count of index entries in this block */
1915 int offset, nr;
1916 };
1917
1918 struct index_entry_offset_table
1919 {
1920 int nr;
1921 struct index_entry_offset entries[FLEX_ARRAY];
1922 };
1923
1924 static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset);
1925 static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot);
1926
1927 static size_t read_eoie_extension(const char *mmap, size_t mmap_size);
1928 static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset);
1929
1930 struct load_index_extensions
1931 {
1932 pthread_t pthread;
1933 struct index_state *istate;
1934 const char *mmap;
1935 size_t mmap_size;
1936 unsigned long src_offset;
1937 };
1938
1939 static void *load_index_extensions(void *_data)
1940 {
1941 struct load_index_extensions *p = _data;
1942 unsigned long src_offset = p->src_offset;
1943
1944 while (src_offset <= p->mmap_size - the_hash_algo->rawsz - 8) {
1945 /* After an array of active_nr index entries,
1946 * there can be arbitrary number of extended
1947 * sections, each of which is prefixed with
1948 * extension name (4-byte) and section length
1949 * in 4-byte network byte order.
1950 */
1951 uint32_t extsize = get_be32(p->mmap + src_offset + 4);
1952 if (read_index_extension(p->istate,
1953 p->mmap + src_offset,
1954 p->mmap + src_offset + 8,
1955 extsize) < 0) {
1956 munmap((void *)p->mmap, p->mmap_size);
1957 die(_("index file corrupt"));
1958 }
1959 src_offset += 8;
1960 src_offset += extsize;
1961 }
1962
1963 return NULL;
1964 }
1965
1966 /*
1967 * A helper function that will load the specified range of cache entries
1968 * from the memory mapped file and add them to the given index.
1969 */
1970 static unsigned long load_cache_entry_block(struct index_state *istate,
1971 struct mem_pool *ce_mem_pool, int offset, int nr, const char *mmap,
1972 unsigned long start_offset, const struct cache_entry *previous_ce)
1973 {
1974 int i;
1975 unsigned long src_offset = start_offset;
1976
1977 for (i = offset; i < offset + nr; i++) {
1978 struct ondisk_cache_entry *disk_ce;
1979 struct cache_entry *ce;
1980 unsigned long consumed;
1981
1982 disk_ce = (struct ondisk_cache_entry *)(mmap + src_offset);
1983 ce = create_from_disk(ce_mem_pool, istate->version, disk_ce, &consumed, previous_ce);
1984 set_index_entry(istate, i, ce);
1985
1986 src_offset += consumed;
1987 previous_ce = ce;
1988 }
1989 return src_offset - start_offset;
1990 }
1991
1992 static unsigned long load_all_cache_entries(struct index_state *istate,
1993 const char *mmap, size_t mmap_size, unsigned long src_offset)
1994 {
1995 unsigned long consumed;
1996
1997 if (istate->version == 4) {
1998 mem_pool_init(&istate->ce_mem_pool,
1999 estimate_cache_size_from_compressed(istate->cache_nr));
2000 } else {
2001 mem_pool_init(&istate->ce_mem_pool,
2002 estimate_cache_size(mmap_size, istate->cache_nr));
2003 }
2004
2005 consumed = load_cache_entry_block(istate, istate->ce_mem_pool,
2006 0, istate->cache_nr, mmap, src_offset, NULL);
2007 return consumed;
2008 }
2009
2010 /*
2011 * Mostly randomly chosen maximum thread counts: we
2012 * cap the parallelism to online_cpus() threads, and we want
2013 * to have at least 10000 cache entries per thread for it to
2014 * be worth starting a thread.
2015 */
2016
2017 #define THREAD_COST (10000)
2018
2019 struct load_cache_entries_thread_data
2020 {
2021 pthread_t pthread;
2022 struct index_state *istate;
2023 struct mem_pool *ce_mem_pool;
2024 int offset;
2025 const char *mmap;
2026 struct index_entry_offset_table *ieot;
2027 int ieot_start; /* starting index into the ieot array */
2028 int ieot_blocks; /* count of ieot entries to process */
2029 unsigned long consumed; /* return # of bytes in index file processed */
2030 };
2031
2032 /*
2033 * A thread proc to run the load_cache_entries() computation
2034 * across multiple background threads.
2035 */
2036 static void *load_cache_entries_thread(void *_data)
2037 {
2038 struct load_cache_entries_thread_data *p = _data;
2039 int i;
2040
2041 /* iterate across all ieot blocks assigned to this thread */
2042 for (i = p->ieot_start; i < p->ieot_start + p->ieot_blocks; i++) {
2043 p->consumed += load_cache_entry_block(p->istate, p->ce_mem_pool,
2044 p->offset, p->ieot->entries[i].nr, p->mmap, p->ieot->entries[i].offset, NULL);
2045 p->offset += p->ieot->entries[i].nr;
2046 }
2047 return NULL;
2048 }
2049
2050 static unsigned long load_cache_entries_threaded(struct index_state *istate, const char *mmap, size_t mmap_size,
2051 int nr_threads, struct index_entry_offset_table *ieot)
2052 {
2053 int i, offset, ieot_blocks, ieot_start, err;
2054 struct load_cache_entries_thread_data *data;
2055 unsigned long consumed = 0;
2056
2057 /* a little sanity checking */
2058 if (istate->name_hash_initialized)
2059 BUG("the name hash isn't thread safe");
2060
2061 mem_pool_init(&istate->ce_mem_pool, 0);
2062
2063 /* ensure we have no more threads than we have blocks to process */
2064 if (nr_threads > ieot->nr)
2065 nr_threads = ieot->nr;
2066 data = xcalloc(nr_threads, sizeof(*data));
2067
2068 offset = ieot_start = 0;
2069 ieot_blocks = DIV_ROUND_UP(ieot->nr, nr_threads);
2070 for (i = 0; i < nr_threads; i++) {
2071 struct load_cache_entries_thread_data *p = &data[i];
2072 int nr, j;
2073
2074 if (ieot_start + ieot_blocks > ieot->nr)
2075 ieot_blocks = ieot->nr - ieot_start;
2076
2077 p->istate = istate;
2078 p->offset = offset;
2079 p->mmap = mmap;
2080 p->ieot = ieot;
2081 p->ieot_start = ieot_start;
2082 p->ieot_blocks = ieot_blocks;
2083
2084 /* create a mem_pool for each thread */
2085 nr = 0;
2086 for (j = p->ieot_start; j < p->ieot_start + p->ieot_blocks; j++)
2087 nr += p->ieot->entries[j].nr;
2088 if (istate->version == 4) {
2089 mem_pool_init(&p->ce_mem_pool,
2090 estimate_cache_size_from_compressed(nr));
2091 } else {
2092 mem_pool_init(&p->ce_mem_pool,
2093 estimate_cache_size(mmap_size, nr));
2094 }
2095
2096 err = pthread_create(&p->pthread, NULL, load_cache_entries_thread, p);
2097 if (err)
2098 die(_("unable to create load_cache_entries thread: %s"), strerror(err));
2099
2100 /* increment by the number of cache entries in the ieot block being processed */
2101 for (j = 0; j < ieot_blocks; j++)
2102 offset += ieot->entries[ieot_start + j].nr;
2103 ieot_start += ieot_blocks;
2104 }
2105
2106 for (i = 0; i < nr_threads; i++) {
2107 struct load_cache_entries_thread_data *p = &data[i];
2108
2109 err = pthread_join(p->pthread, NULL);
2110 if (err)
2111 die(_("unable to join load_cache_entries thread: %s"), strerror(err));
2112 mem_pool_combine(istate->ce_mem_pool, p->ce_mem_pool);
2113 consumed += p->consumed;
2114 }
2115
2116 free(data);
2117
2118 return consumed;
2119 }
2120
2121 /* remember to discard_cache() before reading a different cache! */
2122 int do_read_index(struct index_state *istate, const char *path, int must_exist)
2123 {
2124 int fd;
2125 struct stat st;
2126 unsigned long src_offset;
2127 const struct cache_header *hdr;
2128 const char *mmap;
2129 size_t mmap_size;
2130 struct load_index_extensions p;
2131 size_t extension_offset = 0;
2132 int nr_threads, cpus;
2133 struct index_entry_offset_table *ieot = NULL;
2134
2135 if (istate->initialized)
2136 return istate->cache_nr;
2137
2138 istate->timestamp.sec = 0;
2139 istate->timestamp.nsec = 0;
2140 fd = open(path, O_RDONLY);
2141 if (fd < 0) {
2142 if (!must_exist && errno == ENOENT)
2143 return 0;
2144 die_errno(_("%s: index file open failed"), path);
2145 }
2146
2147 if (fstat(fd, &st))
2148 die_errno(_("%s: cannot stat the open index"), path);
2149
2150 mmap_size = xsize_t(st.st_size);
2151 if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
2152 die(_("%s: index file smaller than expected"), path);
2153
2154 mmap = xmmap_gently(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
2155 if (mmap == MAP_FAILED)
2156 die_errno(_("%s: unable to map index file"), path);
2157 close(fd);
2158
2159 hdr = (const struct cache_header *)mmap;
2160 if (verify_hdr(hdr, mmap_size) < 0)
2161 goto unmap;
2162
2163 hashcpy(istate->oid.hash, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
2164 istate->version = ntohl(hdr->hdr_version);
2165 istate->cache_nr = ntohl(hdr->hdr_entries);
2166 istate->cache_alloc = alloc_nr(istate->cache_nr);
2167 istate->cache = xcalloc(istate->cache_alloc, sizeof(*istate->cache));
2168 istate->initialized = 1;
2169
2170 p.istate = istate;
2171 p.mmap = mmap;
2172 p.mmap_size = mmap_size;
2173
2174 src_offset = sizeof(*hdr);
2175
2176 if (git_config_get_index_threads(&nr_threads))
2177 nr_threads = 1;
2178
2179 /* TODO: does creating more threads than cores help? */
2180 if (!nr_threads) {
2181 nr_threads = istate->cache_nr / THREAD_COST;
2182 cpus = online_cpus();
2183 if (nr_threads > cpus)
2184 nr_threads = cpus;
2185 }
2186
2187 if (!HAVE_THREADS)
2188 nr_threads = 1;
2189
2190 if (nr_threads > 1) {
2191 extension_offset = read_eoie_extension(mmap, mmap_size);
2192 if (extension_offset) {
2193 int err;
2194
2195 p.src_offset = extension_offset;
2196 err = pthread_create(&p.pthread, NULL, load_index_extensions, &p);
2197 if (err)
2198 die(_("unable to create load_index_extensions thread: %s"), strerror(err));
2199
2200 nr_threads--;
2201 }
2202 }
2203
2204 /*
2205 * Locate and read the index entry offset table so that we can use it
2206 * to multi-thread the reading of the cache entries.
2207 */
2208 if (extension_offset && nr_threads > 1)
2209 ieot = read_ieot_extension(mmap, mmap_size, extension_offset);
2210
2211 if (ieot) {
2212 src_offset += load_cache_entries_threaded(istate, mmap, mmap_size, nr_threads, ieot);
2213 free(ieot);
2214 } else {
2215 src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset);
2216 }
2217
2218 istate->timestamp.sec = st.st_mtime;
2219 istate->timestamp.nsec = ST_MTIME_NSEC(st);
2220
2221 /* if we created a thread, join it otherwise load the extensions on the primary thread */
2222 if (extension_offset) {
2223 int ret = pthread_join(p.pthread, NULL);
2224 if (ret)
2225 die(_("unable to join load_index_extensions thread: %s"), strerror(ret));
2226 } else {
2227 p.src_offset = src_offset;
2228 load_index_extensions(&p);
2229 }
2230 munmap((void *)mmap, mmap_size);
2231
2232 /*
2233 * TODO trace2: replace "the_repository" with the actual repo instance
2234 * that is associated with the given "istate".
2235 */
2236 trace2_data_intmax("index", the_repository, "read/version",
2237 istate->version);
2238 trace2_data_intmax("index", the_repository, "read/cache_nr",
2239 istate->cache_nr);
2240
2241 return istate->cache_nr;
2242
2243 unmap:
2244 munmap((void *)mmap, mmap_size);
2245 die(_("index file corrupt"));
2246 }
2247
2248 /*
2249 * Signal that the shared index is used by updating its mtime.
2250 *
2251 * This way, shared index can be removed if they have not been used
2252 * for some time.
2253 */
2254 static void freshen_shared_index(const char *shared_index, int warn)
2255 {
2256 if (!check_and_freshen_file(shared_index, 1) && warn)
2257 warning(_("could not freshen shared index '%s'"), shared_index);
2258 }
2259
2260 int read_index_from(struct index_state *istate, const char *path,
2261 const char *gitdir)
2262 {
2263 struct split_index *split_index;
2264 int ret;
2265 char *base_oid_hex;
2266 char *base_path;
2267
2268 /* istate->initialized covers both .git/index and .git/sharedindex.xxx */
2269 if (istate->initialized)
2270 return istate->cache_nr;
2271
2272 /*
2273 * TODO trace2: replace "the_repository" with the actual repo instance
2274 * that is associated with the given "istate".
2275 */
2276 trace2_region_enter_printf("index", "do_read_index", the_repository,
2277 "%s", path);
2278 trace_performance_enter();
2279 ret = do_read_index(istate, path, 0);
2280 trace_performance_leave("read cache %s", path);
2281 trace2_region_leave_printf("index", "do_read_index", the_repository,
2282 "%s", path);
2283
2284 split_index = istate->split_index;
2285 if (!split_index || is_null_oid(&split_index->base_oid)) {
2286 post_read_index_from(istate);
2287 return ret;
2288 }
2289
2290 trace_performance_enter();
2291 if (split_index->base)
2292 discard_index(split_index->base);
2293 else
2294 split_index->base = xcalloc(1, sizeof(*split_index->base));
2295
2296 base_oid_hex = oid_to_hex(&split_index->base_oid);
2297 base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
2298 trace2_region_enter_printf("index", "shared/do_read_index",
2299 the_repository, "%s", base_path);
2300 ret = do_read_index(split_index->base, base_path, 1);
2301 trace2_region_leave_printf("index", "shared/do_read_index",
2302 the_repository, "%s", base_path);
2303 if (!oideq(&split_index->base_oid, &split_index->base->oid))
2304 die(_("broken index, expect %s in %s, got %s"),
2305 base_oid_hex, base_path,
2306 oid_to_hex(&split_index->base->oid));
2307
2308 freshen_shared_index(base_path, 0);
2309 merge_base_index(istate);
2310 post_read_index_from(istate);
2311 trace_performance_leave("read cache %s", base_path);
2312 free(base_path);
2313 return ret;
2314 }
2315
2316 int is_index_unborn(struct index_state *istate)
2317 {
2318 return (!istate->cache_nr && !istate->timestamp.sec);
2319 }
2320
2321 int discard_index(struct index_state *istate)
2322 {
2323 /*
2324 * Cache entries in istate->cache[] should have been allocated
2325 * from the memory pool associated with this index, or from an
2326 * associated split_index. There is no need to free individual
2327 * cache entries. validate_cache_entries can detect when this
2328 * assertion does not hold.
2329 */
2330 validate_cache_entries(istate);
2331
2332 resolve_undo_clear_index(istate);
2333 istate->cache_nr = 0;
2334 istate->cache_changed = 0;
2335 istate->timestamp.sec = 0;
2336 istate->timestamp.nsec = 0;
2337 free_name_hash(istate);
2338 cache_tree_free(&(istate->cache_tree));
2339 istate->initialized = 0;
2340 istate->fsmonitor_has_run_once = 0;
2341 FREE_AND_NULL(istate->cache);
2342 istate->cache_alloc = 0;
2343 discard_split_index(istate);
2344 free_untracked_cache(istate->untracked);
2345 istate->untracked = NULL;
2346
2347 if (istate->ce_mem_pool) {
2348 mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
2349 istate->ce_mem_pool = NULL;
2350 }
2351
2352 return 0;
2353 }
2354
2355 /*
2356 * Validate the cache entries of this index.
2357 * All cache entries associated with this index
2358 * should have been allocated by the memory pool
2359 * associated with this index, or by a referenced
2360 * split index.
2361 */
2362 void validate_cache_entries(const struct index_state *istate)
2363 {
2364 int i;
2365
2366 if (!should_validate_cache_entries() ||!istate || !istate->initialized)
2367 return;
2368
2369 for (i = 0; i < istate->cache_nr; i++) {
2370 if (!istate) {
2371 BUG("cache entry is not allocated from expected memory pool");
2372 } else if (!istate->ce_mem_pool ||
2373 !mem_pool_contains(istate->ce_mem_pool, istate->cache[i])) {
2374 if (!istate->split_index ||
2375 !istate->split_index->base ||
2376 !istate->split_index->base->ce_mem_pool ||
2377 !mem_pool_contains(istate->split_index->base->ce_mem_pool, istate->cache[i])) {
2378 BUG("cache entry is not allocated from expected memory pool");
2379 }
2380 }
2381 }
2382
2383 if (istate->split_index)
2384 validate_cache_entries(istate->split_index->base);
2385 }
2386
2387 int unmerged_index(const struct index_state *istate)
2388 {
2389 int i;
2390 for (i = 0; i < istate->cache_nr; i++) {
2391 if (ce_stage(istate->cache[i]))
2392 return 1;
2393 }
2394 return 0;
2395 }
2396
2397 int repo_index_has_changes(struct repository *repo,
2398 struct tree *tree,
2399 struct strbuf *sb)
2400 {
2401 struct index_state *istate = repo->index;
2402 struct object_id cmp;
2403 int i;
2404
2405 if (tree)
2406 cmp = tree->object.oid;
2407 if (tree || !get_oid_tree("HEAD", &cmp)) {
2408 struct diff_options opt;
2409
2410 repo_diff_setup(repo, &opt);
2411 opt.flags.exit_with_status = 1;
2412 if (!sb)
2413 opt.flags.quick = 1;
2414 do_diff_cache(&cmp, &opt);
2415 diffcore_std(&opt);
2416 for (i = 0; sb && i < diff_queued_diff.nr; i++) {
2417 if (i)
2418 strbuf_addch(sb, ' ');
2419 strbuf_addstr(sb, diff_queued_diff.queue[i]->two->path);
2420 }
2421 diff_flush(&opt);
2422 return opt.flags.has_changes != 0;
2423 } else {
2424 for (i = 0; sb && i < istate->cache_nr; i++) {
2425 if (i)
2426 strbuf_addch(sb, ' ');
2427 strbuf_addstr(sb, istate->cache[i]->name);
2428 }
2429 return !!istate->cache_nr;
2430 }
2431 }
2432
2433 #define WRITE_BUFFER_SIZE 8192
2434 static unsigned char write_buffer[WRITE_BUFFER_SIZE];
2435 static unsigned long write_buffer_len;
2436
2437 static int ce_write_flush(git_hash_ctx *context, int fd)
2438 {
2439 unsigned int buffered = write_buffer_len;
2440 if (buffered) {
2441 the_hash_algo->update_fn(context, write_buffer, buffered);
2442 if (write_in_full(fd, write_buffer, buffered) < 0)
2443 return -1;
2444 write_buffer_len = 0;
2445 }
2446 return 0;
2447 }
2448
2449 static int ce_write(git_hash_ctx *context, int fd, void *data, unsigned int len)
2450 {
2451 while (len) {
2452 unsigned int buffered = write_buffer_len;
2453 unsigned int partial = WRITE_BUFFER_SIZE - buffered;
2454 if (partial > len)
2455 partial = len;
2456 memcpy(write_buffer + buffered, data, partial);
2457 buffered += partial;
2458 if (buffered == WRITE_BUFFER_SIZE) {
2459 write_buffer_len = buffered;
2460 if (ce_write_flush(context, fd))
2461 return -1;
2462 buffered = 0;
2463 }
2464 write_buffer_len = buffered;
2465 len -= partial;
2466 data = (char *) data + partial;
2467 }
2468 return 0;
2469 }
2470
2471 static int write_index_ext_header(git_hash_ctx *context, git_hash_ctx *eoie_context,
2472 int fd, unsigned int ext, unsigned int sz)
2473 {
2474 ext = htonl(ext);
2475 sz = htonl(sz);
2476 if (eoie_context) {
2477 the_hash_algo->update_fn(eoie_context, &ext, 4);
2478 the_hash_algo->update_fn(eoie_context, &sz, 4);
2479 }
2480 return ((ce_write(context, fd, &ext, 4) < 0) ||
2481 (ce_write(context, fd, &sz, 4) < 0)) ? -1 : 0;
2482 }
2483
2484 static int ce_flush(git_hash_ctx *context, int fd, unsigned char *hash)
2485 {
2486 unsigned int left = write_buffer_len;
2487
2488 if (left) {
2489 write_buffer_len = 0;
2490 the_hash_algo->update_fn(context, write_buffer, left);
2491 }
2492
2493 /* Flush first if not enough space for hash signature */
2494 if (left + the_hash_algo->rawsz > WRITE_BUFFER_SIZE) {
2495 if (write_in_full(fd, write_buffer, left) < 0)
2496 return -1;
2497 left = 0;
2498 }
2499
2500 /* Append the hash signature at the end */
2501 the_hash_algo->final_fn(write_buffer + left, context);
2502 hashcpy(hash, write_buffer + left);
2503 left += the_hash_algo->rawsz;
2504 return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0;
2505 }
2506
2507 static void ce_smudge_racily_clean_entry(struct index_state *istate,
2508 struct cache_entry *ce)
2509 {
2510 /*
2511 * The only thing we care about in this function is to smudge the
2512 * falsely clean entry due to touch-update-touch race, so we leave
2513 * everything else as they are. We are called for entries whose
2514 * ce_stat_data.sd_mtime match the index file mtime.
2515 *
2516 * Note that this actually does not do much for gitlinks, for
2517 * which ce_match_stat_basic() always goes to the actual
2518 * contents. The caller checks with is_racy_timestamp() which
2519 * always says "no" for gitlinks, so we are not called for them ;-)
2520 */
2521 struct stat st;
2522
2523 if (lstat(ce->name, &st) < 0)
2524 return;
2525 if (ce_match_stat_basic(ce, &st))
2526 return;
2527 if (ce_modified_check_fs(istate, ce, &st)) {
2528 /* This is "racily clean"; smudge it. Note that this
2529 * is a tricky code. At first glance, it may appear
2530 * that it can break with this sequence:
2531 *
2532 * $ echo xyzzy >frotz
2533 * $ git-update-index --add frotz
2534 * $ : >frotz
2535 * $ sleep 3
2536 * $ echo filfre >nitfol
2537 * $ git-update-index --add nitfol
2538 *
2539 * but it does not. When the second update-index runs,
2540 * it notices that the entry "frotz" has the same timestamp
2541 * as index, and if we were to smudge it by resetting its
2542 * size to zero here, then the object name recorded
2543 * in index is the 6-byte file but the cached stat information
2544 * becomes zero --- which would then match what we would
2545 * obtain from the filesystem next time we stat("frotz").
2546 *
2547 * However, the second update-index, before calling
2548 * this function, notices that the cached size is 6
2549 * bytes and what is on the filesystem is an empty
2550 * file, and never calls us, so the cached size information
2551 * for "frotz" stays 6 which does not match the filesystem.
2552 */
2553 ce->ce_stat_data.sd_size = 0;
2554 }
2555 }
2556
2557 /* Copy miscellaneous fields but not the name */
2558 static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk,
2559 struct cache_entry *ce)
2560 {
2561 short flags;
2562 const unsigned hashsz = the_hash_algo->rawsz;
2563 uint16_t *flagsp = (uint16_t *)(ondisk->data + hashsz);
2564
2565 ondisk->ctime.sec = htonl(ce->ce_stat_data.sd_ctime.sec);
2566 ondisk->mtime.sec = htonl(ce->ce_stat_data.sd_mtime.sec);
2567 ondisk->ctime.nsec = htonl(ce->ce_stat_data.sd_ctime.nsec);
2568 ondisk->mtime.nsec = htonl(ce->ce_stat_data.sd_mtime.nsec);
2569 ondisk->dev = htonl(ce->ce_stat_data.sd_dev);
2570 ondisk->ino = htonl(ce->ce_stat_data.sd_ino);
2571 ondisk->mode = htonl(ce->ce_mode);
2572 ondisk->uid = htonl(ce->ce_stat_data.sd_uid);
2573 ondisk->gid = htonl(ce->ce_stat_data.sd_gid);
2574 ondisk->size = htonl(ce->ce_stat_data.sd_size);
2575 hashcpy(ondisk->data, ce->oid.hash);
2576
2577 flags = ce->ce_flags & ~CE_NAMEMASK;
2578 flags |= (ce_namelen(ce) >= CE_NAMEMASK ? CE_NAMEMASK : ce_namelen(ce));
2579 flagsp[0] = htons(flags);
2580 if (ce->ce_flags & CE_EXTENDED) {
2581 flagsp[1] = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16);
2582 }
2583 }
2584
2585 static int ce_write_entry(git_hash_ctx *c, int fd, struct cache_entry *ce,
2586 struct strbuf *previous_name, struct ondisk_cache_entry *ondisk)
2587 {
2588 int size;
2589 int result;
2590 unsigned int saved_namelen;
2591 int stripped_name = 0;
2592 static unsigned char padding[8] = { 0x00 };
2593
2594 if (ce->ce_flags & CE_STRIP_NAME) {
2595 saved_namelen = ce_namelen(ce);
2596 ce->ce_namelen = 0;
2597 stripped_name = 1;
2598 }
2599
2600 size = offsetof(struct ondisk_cache_entry,data) + ondisk_data_size(ce->ce_flags, 0);
2601
2602 if (!previous_name) {
2603 int len = ce_namelen(ce);
2604 copy_cache_entry_to_ondisk(ondisk, ce);
2605 result = ce_write(c, fd, ondisk, size);
2606 if (!result)
2607 result = ce_write(c, fd, ce->name, len);
2608 if (!result)
2609 result = ce_write(c, fd, padding, align_padding_size(size, len));
2610 } else {
2611 int common, to_remove, prefix_size;
2612 unsigned char to_remove_vi[16];
2613 for (common = 0;
2614 (ce->name[common] &&
2615 common < previous_name->len &&
2616 ce->name[common] == previous_name->buf[common]);
2617 common++)
2618 ; /* still matching */
2619 to_remove = previous_name->len - common;
2620 prefix_size = encode_varint(to_remove, to_remove_vi);
2621
2622 copy_cache_entry_to_ondisk(ondisk, ce);
2623 result = ce_write(c, fd, ondisk, size);
2624 if (!result)
2625 result = ce_write(c, fd, to_remove_vi, prefix_size);
2626 if (!result)
2627 result = ce_write(c, fd, ce->name + common, ce_namelen(ce) - common);
2628 if (!result)
2629 result = ce_write(c, fd, padding, 1);
2630
2631 strbuf_splice(previous_name, common, to_remove,
2632 ce->name + common, ce_namelen(ce) - common);
2633 }
2634 if (stripped_name) {
2635 ce->ce_namelen = saved_namelen;
2636 ce->ce_flags &= ~CE_STRIP_NAME;
2637 }
2638
2639 return result;
2640 }
2641
2642 /*
2643 * This function verifies if index_state has the correct sha1 of the
2644 * index file. Don't die if we have any other failure, just return 0.
2645 */
2646 static int verify_index_from(const struct index_state *istate, const char *path)
2647 {
2648 int fd;
2649 ssize_t n;
2650 struct stat st;
2651 unsigned char hash[GIT_MAX_RAWSZ];
2652
2653 if (!istate->initialized)
2654 return 0;
2655
2656 fd = open(path, O_RDONLY);
2657 if (fd < 0)
2658 return 0;
2659
2660 if (fstat(fd, &st))
2661 goto out;
2662
2663 if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
2664 goto out;
2665
2666 n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz);
2667 if (n != the_hash_algo->rawsz)
2668 goto out;
2669
2670 if (!hasheq(istate->oid.hash, hash))
2671 goto out;
2672
2673 close(fd);
2674 return 1;
2675
2676 out:
2677 close(fd);
2678 return 0;
2679 }
2680
2681 static int repo_verify_index(struct repository *repo)
2682 {
2683 return verify_index_from(repo->index, repo->index_file);
2684 }
2685
2686 static int has_racy_timestamp(struct index_state *istate)
2687 {
2688 int entries = istate->cache_nr;
2689 int i;
2690
2691 for (i = 0; i < entries; i++) {
2692 struct cache_entry *ce = istate->cache[i];
2693 if (is_racy_timestamp(istate, ce))
2694 return 1;
2695 }
2696 return 0;
2697 }
2698
2699 void repo_update_index_if_able(struct repository *repo,
2700 struct lock_file *lockfile)
2701 {
2702 if ((repo->index->cache_changed ||
2703 has_racy_timestamp(repo->index)) &&
2704 repo_verify_index(repo))
2705 write_locked_index(repo->index, lockfile, COMMIT_LOCK);
2706 else
2707 rollback_lock_file(lockfile);
2708 }
2709
2710 static int record_eoie(void)
2711 {
2712 int val;
2713
2714 if (!git_config_get_bool("index.recordendofindexentries", &val))
2715 return val;
2716
2717 /*
2718 * As a convenience, the end of index entries extension
2719 * used for threading is written by default if the user
2720 * explicitly requested threaded index reads.
2721 */
2722 return !git_config_get_index_threads(&val) && val != 1;
2723 }
2724
2725 static int record_ieot(void)
2726 {
2727 int val;
2728
2729 if (!git_config_get_bool("index.recordoffsettable", &val))
2730 return val;
2731
2732 /*
2733 * As a convenience, the offset table used for threading is
2734 * written by default if the user explicitly requested
2735 * threaded index reads.
2736 */
2737 return !git_config_get_index_threads(&val) && val != 1;
2738 }
2739
2740 /*
2741 * On success, `tempfile` is closed. If it is the temporary file
2742 * of a `struct lock_file`, we will therefore effectively perform
2743 * a 'close_lock_file_gently()`. Since that is an implementation
2744 * detail of lockfiles, callers of `do_write_index()` should not
2745 * rely on it.
2746 */
2747 static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
2748 int strip_extensions)
2749 {
2750 uint64_t start = getnanotime();
2751 int newfd = tempfile->fd;
2752 git_hash_ctx c, eoie_c;
2753 struct cache_header hdr;
2754 int i, err = 0, removed, extended, hdr_version;
2755 struct cache_entry **cache = istate->cache;
2756 int entries = istate->cache_nr;
2757 struct stat st;
2758 struct ondisk_cache_entry ondisk;
2759 struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
2760 int drop_cache_tree = istate->drop_cache_tree;
2761 off_t offset;
2762 int ieot_entries = 1;
2763 struct index_entry_offset_table *ieot = NULL;
2764 int nr, nr_threads;
2765
2766 for (i = removed = extended = 0; i < entries; i++) {
2767 if (cache[i]->ce_flags & CE_REMOVE)
2768 removed++;
2769
2770 /* reduce extended entries if possible */
2771 cache[i]->ce_flags &= ~CE_EXTENDED;
2772 if (cache[i]->ce_flags & CE_EXTENDED_FLAGS) {
2773 extended++;
2774 cache[i]->ce_flags |= CE_EXTENDED;
2775 }
2776 }
2777
2778 if (!istate->version) {
2779 istate->version = get_index_format_default();
2780 if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0))
2781 init_split_index(istate);
2782 }
2783
2784 /* demote version 3 to version 2 when the latter suffices */
2785 if (istate->version == 3 || istate->version == 2)
2786 istate->version = extended ? 3 : 2;
2787
2788 hdr_version = istate->version;
2789
2790 hdr.hdr_signature = htonl(CACHE_SIGNATURE);
2791 hdr.hdr_version = htonl(hdr_version);
2792 hdr.hdr_entries = htonl(entries - removed);
2793
2794 the_hash_algo->init_fn(&c);
2795 if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
2796 return -1;
2797
2798 if (!HAVE_THREADS || git_config_get_index_threads(&nr_threads))
2799 nr_threads = 1;
2800
2801 if (nr_threads != 1 && record_ieot()) {
2802 int ieot_blocks, cpus;
2803
2804 /*
2805 * ensure default number of ieot blocks maps evenly to the
2806 * default number of threads that will process them leaving
2807 * room for the thread to load the index extensions.
2808 */
2809 if (!nr_threads) {
2810 ieot_blocks = istate->cache_nr / THREAD_COST;
2811 cpus = online_cpus();
2812 if (ieot_blocks > cpus - 1)
2813 ieot_blocks = cpus - 1;
2814 } else {
2815 ieot_blocks = nr_threads;
2816 if (ieot_blocks > istate->cache_nr)
2817 ieot_blocks = istate->cache_nr;
2818 }
2819
2820 /*
2821 * no reason to write out the IEOT extension if we don't
2822 * have enough blocks to utilize multi-threading
2823 */
2824 if (ieot_blocks > 1) {
2825 ieot = xcalloc(1, sizeof(struct index_entry_offset_table)
2826 + (ieot_blocks * sizeof(struct index_entry_offset)));
2827 ieot_entries = DIV_ROUND_UP(entries, ieot_blocks);
2828 }
2829 }
2830
2831 offset = lseek(newfd, 0, SEEK_CUR);
2832 if (offset < 0) {
2833 free(ieot);
2834 return -1;
2835 }
2836 offset += write_buffer_len;
2837 nr = 0;
2838 previous_name = (hdr_version == 4) ? &previous_name_buf : NULL;
2839
2840 for (i = 0; i < entries; i++) {
2841 struct cache_entry *ce = cache[i];
2842 if (ce->ce_flags & CE_REMOVE)
2843 continue;
2844 if (!ce_uptodate(ce) && is_racy_timestamp(istate, ce))
2845 ce_smudge_racily_clean_entry(istate, ce);
2846 if (is_null_oid(&ce->oid)) {
2847 static const char msg[] = "cache entry has null sha1: %s";
2848 static int allow = -1;
2849
2850 if (allow < 0)
2851 allow = git_env_bool("GIT_ALLOW_NULL_SHA1", 0);
2852 if (allow)
2853 warning(msg, ce->name);
2854 else
2855 err = error(msg, ce->name);
2856
2857 drop_cache_tree = 1;
2858 }
2859 if (ieot && i && (i % ieot_entries == 0)) {
2860 ieot->entries[ieot->nr].nr = nr;
2861 ieot->entries[ieot->nr].offset = offset;
2862 ieot->nr++;
2863 /*
2864 * If we have a V4 index, set the first byte to an invalid
2865 * character to ensure there is nothing common with the previous
2866 * entry
2867 */
2868 if (previous_name)
2869 previous_name->buf[0] = 0;
2870 nr = 0;
2871 offset = lseek(newfd, 0, SEEK_CUR);
2872 if (offset < 0) {
2873 free(ieot);
2874 return -1;
2875 }
2876 offset += write_buffer_len;
2877 }
2878 if (ce_write_entry(&c, newfd, ce, previous_name, (struct ondisk_cache_entry *)&ondisk) < 0)
2879 err = -1;
2880
2881 if (err)
2882 break;
2883 nr++;
2884 }
2885 if (ieot && nr) {
2886 ieot->entries[ieot->nr].nr = nr;
2887 ieot->entries[ieot->nr].offset = offset;
2888 ieot->nr++;
2889 }
2890 strbuf_release(&previous_name_buf);
2891
2892 if (err) {
2893 free(ieot);
2894 return err;
2895 }
2896
2897 /* Write extension data here */
2898 offset = lseek(newfd, 0, SEEK_CUR);
2899 if (offset < 0) {
2900 free(ieot);
2901 return -1;
2902 }
2903 offset += write_buffer_len;
2904 the_hash_algo->init_fn(&eoie_c);
2905
2906 /*
2907 * Lets write out CACHE_EXT_INDEXENTRYOFFSETTABLE first so that we
2908 * can minimize the number of extensions we have to scan through to
2909 * find it during load. Write it out regardless of the
2910 * strip_extensions parameter as we need it when loading the shared
2911 * index.
2912 */
2913 if (ieot) {
2914 struct strbuf sb = STRBUF_INIT;
2915
2916 write_ieot_extension(&sb, ieot);
2917 err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_INDEXENTRYOFFSETTABLE, sb.len) < 0
2918 || ce_write(&c, newfd, sb.buf, sb.len) < 0;
2919 strbuf_release(&sb);
2920 free(ieot);
2921 if (err)
2922 return -1;
2923 }
2924
2925 if (!strip_extensions && istate->split_index &&
2926 !is_null_oid(&istate->split_index->base_oid)) {
2927 struct strbuf sb = STRBUF_INIT;
2928
2929 err = write_link_extension(&sb, istate) < 0 ||
2930 write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_LINK,
2931 sb.len) < 0 ||
2932 ce_write(&c, newfd, sb.buf, sb.len) < 0;
2933 strbuf_release(&sb);
2934 if (err)
2935 return -1;
2936 }
2937 if (!strip_extensions && !drop_cache_tree && istate->cache_tree) {
2938 struct strbuf sb = STRBUF_INIT;
2939
2940 cache_tree_write(&sb, istate->cache_tree);
2941 err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_TREE, sb.len) < 0
2942 || ce_write(&c, newfd, sb.buf, sb.len) < 0;
2943 strbuf_release(&sb);
2944 if (err)
2945 return -1;
2946 }
2947 if (!strip_extensions && istate->resolve_undo) {
2948 struct strbuf sb = STRBUF_INIT;
2949
2950 resolve_undo_write(&sb, istate->resolve_undo);
2951 err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_RESOLVE_UNDO,
2952 sb.len) < 0
2953 || ce_write(&c, newfd, sb.buf, sb.len) < 0;
2954 strbuf_release(&sb);
2955 if (err)
2956 return -1;
2957 }
2958 if (!strip_extensions && istate->untracked) {
2959 struct strbuf sb = STRBUF_INIT;
2960
2961 write_untracked_extension(&sb, istate->untracked);
2962 err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_UNTRACKED,
2963 sb.len) < 0 ||
2964 ce_write(&c, newfd, sb.buf, sb.len) < 0;
2965 strbuf_release(&sb);
2966 if (err)
2967 return -1;
2968 }
2969 if (!strip_extensions && istate->fsmonitor_last_update) {
2970 struct strbuf sb = STRBUF_INIT;
2971
2972 write_fsmonitor_extension(&sb, istate);
2973 err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_FSMONITOR, sb.len) < 0
2974 || ce_write(&c, newfd, sb.buf, sb.len) < 0;
2975 strbuf_release(&sb);
2976 if (err)
2977 return -1;
2978 }
2979
2980 /*
2981 * CACHE_EXT_ENDOFINDEXENTRIES must be written as the last entry before the SHA1
2982 * so that it can be found and processed before all the index entries are
2983 * read. Write it out regardless of the strip_extensions parameter as we need it
2984 * when loading the shared index.
2985 */
2986 if (offset && record_eoie()) {
2987 struct strbuf sb = STRBUF_INIT;
2988
2989 write_eoie_extension(&sb, &eoie_c, offset);
2990 err = write_index_ext_header(&c, NULL, newfd, CACHE_EXT_ENDOFINDEXENTRIES, sb.len) < 0
2991 || ce_write(&c, newfd, sb.buf, sb.len) < 0;
2992 strbuf_release(&sb);
2993 if (err)
2994 return -1;
2995 }
2996
2997 if (ce_flush(&c, newfd, istate->oid.hash))
2998 return -1;
2999 if (close_tempfile_gently(tempfile)) {
3000 error(_("could not close '%s'"), tempfile->filename.buf);
3001 return -1;
3002 }
3003 if (stat(tempfile->filename.buf, &st))
3004 return -1;
3005 istate->timestamp.sec = (unsigned int)st.st_mtime;
3006 istate->timestamp.nsec = ST_MTIME_NSEC(st);
3007 trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
3008
3009 /*
3010 * TODO trace2: replace "the_repository" with the actual repo instance
3011 * that is associated with the given "istate".
3012 */
3013 trace2_data_intmax("index", the_repository, "write/version",
3014 istate->version);
3015 trace2_data_intmax("index", the_repository, "write/cache_nr",
3016 istate->cache_nr);
3017
3018 return 0;
3019 }
3020
3021 void set_alternate_index_output(const char *name)
3022 {
3023 alternate_index_output = name;
3024 }
3025
3026 static int commit_locked_index(struct lock_file *lk)
3027 {
3028 if (alternate_index_output)
3029 return commit_lock_file_to(lk, alternate_index_output);
3030 else
3031 return commit_lock_file(lk);
3032 }
3033
3034 static int do_write_locked_index(struct index_state *istate, struct lock_file *lock,
3035 unsigned flags)
3036 {
3037 int ret;
3038
3039 /*
3040 * TODO trace2: replace "the_repository" with the actual repo instance
3041 * that is associated with the given "istate".
3042 */
3043 trace2_region_enter_printf("index", "do_write_index", the_repository,
3044 "%s", lock->tempfile->filename.buf);
3045 ret = do_write_index(istate, lock->tempfile, 0);
3046 trace2_region_leave_printf("index", "do_write_index", the_repository,
3047 "%s", lock->tempfile->filename.buf);
3048
3049 if (ret)
3050 return ret;
3051 if (flags & COMMIT_LOCK)
3052 ret = commit_locked_index(lock);
3053 else
3054 ret = close_lock_file_gently(lock);
3055
3056 run_hook_le(NULL, "post-index-change",
3057 istate->updated_workdir ? "1" : "0",
3058 istate->updated_skipworktree ? "1" : "0", NULL);
3059 istate->updated_workdir = 0;
3060 istate->updated_skipworktree = 0;
3061
3062 return ret;
3063 }
3064
3065 static int write_split_index(struct index_state *istate,
3066 struct lock_file *lock,
3067 unsigned flags)
3068 {
3069 int ret;
3070 prepare_to_write_split_index(istate);
3071 ret = do_write_locked_index(istate, lock, flags);
3072 finish_writing_split_index(istate);
3073 return ret;
3074 }
3075
3076 static const char *shared_index_expire = "2.weeks.ago";
3077
3078 static unsigned long get_shared_index_expire_date(void)
3079 {
3080 static unsigned long shared_index_expire_date;
3081 static int shared_index_expire_date_prepared;
3082
3083 if (!shared_index_expire_date_prepared) {
3084 git_config_get_expiry("splitindex.sharedindexexpire",
3085 &shared_index_expire);
3086 shared_index_expire_date = approxidate(shared_index_expire);
3087 shared_index_expire_date_prepared = 1;
3088 }
3089
3090 return shared_index_expire_date;
3091 }
3092
3093 static int should_delete_shared_index(const char *shared_index_path)
3094 {
3095 struct stat st;
3096 unsigned long expiration;
3097
3098 /* Check timestamp */
3099 expiration = get_shared_index_expire_date();
3100 if (!expiration)
3101 return 0;
3102 if (stat(shared_index_path, &st))
3103 return error_errno(_("could not stat '%s'"), shared_index_path);
3104 if (st.st_mtime > expiration)
3105 return 0;
3106
3107 return 1;
3108 }
3109
3110 static int clean_shared_index_files(const char *current_hex)
3111 {
3112 struct dirent *de;
3113 DIR *dir = opendir(get_git_dir());
3114
3115 if (!dir)
3116 return error_errno(_("unable to open git dir: %s"), get_git_dir());
3117
3118 while ((de = readdir(dir)) != NULL) {
3119 const char *sha1_hex;
3120 const char *shared_index_path;
3121 if (!skip_prefix(de->d_name, "sharedindex.", &sha1_hex))
3122 continue;
3123 if (!strcmp(sha1_hex, current_hex))
3124 continue;
3125 shared_index_path = git_path("%s", de->d_name);
3126 if (should_delete_shared_index(shared_index_path) > 0 &&
3127 unlink(shared_index_path))
3128 warning_errno(_("unable to unlink: %s"), shared_index_path);
3129 }
3130 closedir(dir);
3131
3132 return 0;
3133 }
3134
3135 static int write_shared_index(struct index_state *istate,
3136 struct tempfile **temp)
3137 {
3138 struct split_index *si = istate->split_index;
3139 int ret;
3140
3141 move_cache_to_base_index(istate);
3142
3143 trace2_region_enter_printf("index", "shared/do_write_index",
3144 the_repository, "%s", (*temp)->filename.buf);
3145 ret = do_write_index(si->base, *temp, 1);
3146 trace2_region_leave_printf("index", "shared/do_write_index",
3147 the_repository, "%s", (*temp)->filename.buf);
3148
3149 if (ret)
3150 return ret;
3151 ret = adjust_shared_perm(get_tempfile_path(*temp));
3152 if (ret) {
3153 error(_("cannot fix permission bits on '%s'"), get_tempfile_path(*temp));
3154 return ret;
3155 }
3156 ret = rename_tempfile(temp,
3157 git_path("sharedindex.%s", oid_to_hex(&si->base->oid)));
3158 if (!ret) {
3159 oidcpy(&si->base_oid, &si->base->oid);
3160 clean_shared_index_files(oid_to_hex(&si->base->oid));
3161 }
3162
3163 return ret;
3164 }
3165
3166 static const int default_max_percent_split_change = 20;
3167
3168 static int too_many_not_shared_entries(struct index_state *istate)
3169 {
3170 int i, not_shared = 0;
3171 int max_split = git_config_get_max_percent_split_change();
3172
3173 switch (max_split) {
3174 case -1:
3175 /* not or badly configured: use the default value */
3176 max_split = default_max_percent_split_change;
3177 break;
3178 case 0:
3179 return 1; /* 0% means always write a new shared index */
3180 case 100:
3181 return 0; /* 100% means never write a new shared index */
3182 default:
3183 break; /* just use the configured value */
3184 }
3185
3186 /* Count not shared entries */
3187 for (i = 0; i < istate->cache_nr; i++) {
3188 struct cache_entry *ce = istate->cache[i];
3189 if (!ce->index)
3190 not_shared++;
3191 }
3192
3193 return (int64_t)istate->cache_nr * max_split < (int64_t)not_shared * 100;
3194 }
3195
3196 int write_locked_index(struct index_state *istate, struct lock_file *lock,
3197 unsigned flags)
3198 {
3199 int new_shared_index, ret;
3200 struct split_index *si = istate->split_index;
3201
3202 if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
3203 cache_tree_verify(the_repository, istate);
3204
3205 if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) {
3206 if (flags & COMMIT_LOCK)
3207 rollback_lock_file(lock);
3208 return 0;
3209 }
3210
3211 if (istate->fsmonitor_last_update)
3212 fill_fsmonitor_bitmap(istate);
3213
3214 if (!si || alternate_index_output ||
3215 (istate->cache_changed & ~EXTMASK)) {
3216 if (si)
3217 oidclr(&si->base_oid);
3218 ret = do_write_locked_index(istate, lock, flags);
3219 goto out;
3220 }
3221
3222 if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0)) {
3223 int v = si->base_oid.hash[0];
3224 if ((v & 15) < 6)
3225 istate->cache_changed |= SPLIT_INDEX_ORDERED;
3226 }
3227 if (too_many_not_shared_entries(istate))
3228 istate->cache_changed |= SPLIT_INDEX_ORDERED;
3229
3230 new_shared_index = istate->cache_changed & SPLIT_INDEX_ORDERED;
3231
3232 if (new_shared_index) {
3233 struct tempfile *temp;
3234 int saved_errno;
3235
3236 /* Same initial permissions as the main .git/index file */
3237 temp = mks_tempfile_sm(git_path("sharedindex_XXXXXX"), 0, 0666);
3238 if (!temp) {
3239 oidclr(&si->base_oid);
3240 ret = do_write_locked_index(istate, lock, flags);
3241 goto out;
3242 }
3243 ret = write_shared_index(istate, &temp);
3244
3245 saved_errno = errno;
3246 if (is_tempfile_active(temp))
3247 delete_tempfile(&temp);
3248 errno = saved_errno;
3249
3250 if (ret)
3251 goto out;
3252 }
3253
3254 ret = write_split_index(istate, lock, flags);
3255
3256 /* Freshen the shared index only if the split-index was written */
3257 if (!ret && !new_shared_index && !is_null_oid(&si->base_oid)) {
3258 const char *shared_index = git_path("sharedindex.%s",
3259 oid_to_hex(&si->base_oid));
3260 freshen_shared_index(shared_index, 1);
3261 }
3262
3263 out:
3264 if (flags & COMMIT_LOCK)
3265 rollback_lock_file(lock);
3266 return ret;
3267 }
3268
3269 /*
3270 * Read the index file that is potentially unmerged into given
3271 * index_state, dropping any unmerged entries to stage #0 (potentially
3272 * resulting in a path appearing as both a file and a directory in the
3273 * index; the caller is responsible to clear out the extra entries
3274 * before writing the index to a tree). Returns true if the index is
3275 * unmerged. Callers who want to refuse to work from an unmerged
3276 * state can call this and check its return value, instead of calling
3277 * read_cache().
3278 */
3279 int repo_read_index_unmerged(struct repository *repo)
3280 {
3281 struct index_state *istate;
3282 int i;
3283 int unmerged = 0;
3284
3285 repo_read_index(repo);
3286 istate = repo->index;
3287 for (i = 0; i < istate->cache_nr; i++) {
3288 struct cache_entry *ce = istate->cache[i];
3289 struct cache_entry *new_ce;
3290 int len;
3291
3292 if (!ce_stage(ce))
3293 continue;
3294 unmerged = 1;
3295 len = ce_namelen(ce);
3296 new_ce = make_empty_cache_entry(istate, len);
3297 memcpy(new_ce->name, ce->name, len);
3298 new_ce->ce_flags = create_ce_flags(0) | CE_CONFLICTED;
3299 new_ce->ce_namelen = len;
3300 new_ce->ce_mode = ce->ce_mode;
3301 if (add_index_entry(istate, new_ce, ADD_CACHE_SKIP_DFCHECK))
3302 return error(_("%s: cannot drop to stage #0"),
3303 new_ce->name);
3304 }
3305 return unmerged;
3306 }
3307
3308 /*
3309 * Returns 1 if the path is an "other" path with respect to
3310 * the index; that is, the path is not mentioned in the index at all,
3311 * either as a file, a directory with some files in the index,
3312 * or as an unmerged entry.
3313 *
3314 * We helpfully remove a trailing "/" from directories so that
3315 * the output of read_directory can be used as-is.
3316 */
3317 int index_name_is_other(const struct index_state *istate, const char *name,
3318 int namelen)
3319 {
3320 int pos;
3321 if (namelen && name[namelen - 1] == '/')
3322 namelen--;
3323 pos = index_name_pos(istate, name, namelen);
3324 if (0 <= pos)
3325 return 0; /* exact match */
3326 pos = -pos - 1;
3327 if (pos < istate->cache_nr) {
3328 struct cache_entry *ce = istate->cache[pos];
3329 if (ce_namelen(ce) == namelen &&
3330 !memcmp(ce->name, name, namelen))
3331 return 0; /* Yup, this one exists unmerged */
3332 }
3333 return 1;
3334 }
3335
3336 void *read_blob_data_from_index(const struct index_state *istate,
3337 const char *path, unsigned long *size)
3338 {
3339 int pos, len;
3340 unsigned long sz;
3341 enum object_type type;
3342 void *data;
3343
3344 len = strlen(path);
3345 pos = index_name_pos(istate, path, len);
3346 if (pos < 0) {
3347 /*
3348 * We might be in the middle of a merge, in which
3349 * case we would read stage #2 (ours).
3350 */
3351 int i;
3352 for (i = -pos - 1;
3353 (pos < 0 && i < istate->cache_nr &&
3354 !strcmp(istate->cache[i]->name, path));
3355 i++)
3356 if (ce_stage(istate->cache[i]) == 2)
3357 pos = i;
3358 }
3359 if (pos < 0)
3360 return NULL;
3361 data = read_object_file(&istate->cache[pos]->oid, &type, &sz);
3362 if (!data || type != OBJ_BLOB) {
3363 free(data);
3364 return NULL;
3365 }
3366 if (size)
3367 *size = sz;
3368 return data;
3369 }
3370
3371 void stat_validity_clear(struct stat_validity *sv)
3372 {
3373 FREE_AND_NULL(sv->sd);
3374 }
3375
3376 int stat_validity_check(struct stat_validity *sv, const char *path)
3377 {
3378 struct stat st;
3379
3380 if (stat(path, &st) < 0)
3381 return sv->sd == NULL;
3382 if (!sv->sd)
3383 return 0;
3384 return S_ISREG(st.st_mode) && !match_stat_data(sv->sd, &st);
3385 }
3386
3387 void stat_validity_update(struct stat_validity *sv, int fd)
3388 {
3389 struct stat st;
3390
3391 if (fstat(fd, &st) < 0 || !S_ISREG(st.st_mode))
3392 stat_validity_clear(sv);
3393 else {
3394 if (!sv->sd)
3395 sv->sd = xcalloc(1, sizeof(struct stat_data));
3396 fill_stat_data(sv->sd, &st);
3397 }
3398 }
3399
3400 void move_index_extensions(struct index_state *dst, struct index_state *src)
3401 {
3402 dst->untracked = src->untracked;
3403 src->untracked = NULL;
3404 dst->cache_tree = src->cache_tree;
3405 src->cache_tree = NULL;
3406 }
3407
3408 struct cache_entry *dup_cache_entry(const struct cache_entry *ce,
3409 struct index_state *istate)
3410 {
3411 unsigned int size = ce_size(ce);
3412 int mem_pool_allocated;
3413 struct cache_entry *new_entry = make_empty_cache_entry(istate, ce_namelen(ce));
3414 mem_pool_allocated = new_entry->mem_pool_allocated;
3415
3416 memcpy(new_entry, ce, size);
3417 new_entry->mem_pool_allocated = mem_pool_allocated;
3418 return new_entry;
3419 }
3420
3421 void discard_cache_entry(struct cache_entry *ce)
3422 {
3423 if (ce && should_validate_cache_entries())
3424 memset(ce, 0xCD, cache_entry_size(ce->ce_namelen));
3425
3426 if (ce && ce->mem_pool_allocated)
3427 return;
3428
3429 free(ce);
3430 }
3431
3432 int should_validate_cache_entries(void)
3433 {
3434 static int validate_index_cache_entries = -1;
3435
3436 if (validate_index_cache_entries < 0) {
3437 if (getenv("GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES"))
3438 validate_index_cache_entries = 1;
3439 else
3440 validate_index_cache_entries = 0;
3441 }
3442
3443 return validate_index_cache_entries;
3444 }
3445
3446 #define EOIE_SIZE (4 + GIT_SHA1_RAWSZ) /* <4-byte offset> + <20-byte hash> */
3447 #define EOIE_SIZE_WITH_HEADER (4 + 4 + EOIE_SIZE) /* <4-byte signature> + <4-byte length> + EOIE_SIZE */
3448
3449 static size_t read_eoie_extension(const char *mmap, size_t mmap_size)
3450 {
3451 /*
3452 * The end of index entries (EOIE) extension is guaranteed to be last
3453 * so that it can be found by scanning backwards from the EOF.
3454 *
3455 * "EOIE"
3456 * <4-byte length>
3457 * <4-byte offset>
3458 * <20-byte hash>
3459 */
3460 const char *index, *eoie;
3461 uint32_t extsize;
3462 size_t offset, src_offset;
3463 unsigned char hash[GIT_MAX_RAWSZ];
3464 git_hash_ctx c;
3465
3466 /* ensure we have an index big enough to contain an EOIE extension */
3467 if (mmap_size < sizeof(struct cache_header) + EOIE_SIZE_WITH_HEADER + the_hash_algo->rawsz)
3468 return 0;
3469
3470 /* validate the extension signature */
3471 index = eoie = mmap + mmap_size - EOIE_SIZE_WITH_HEADER - the_hash_algo->rawsz;
3472 if (CACHE_EXT(index) != CACHE_EXT_ENDOFINDEXENTRIES)
3473 return 0;
3474 index += sizeof(uint32_t);
3475
3476 /* validate the extension size */
3477 extsize = get_be32(index);
3478 if (extsize != EOIE_SIZE)
3479 return 0;
3480 index += sizeof(uint32_t);
3481
3482 /*
3483 * Validate the offset we're going to look for the first extension
3484 * signature is after the index header and before the eoie extension.
3485 */
3486 offset = get_be32(index);
3487 if (mmap + offset < mmap + sizeof(struct cache_header))
3488 return 0;
3489 if (mmap + offset >= eoie)
3490 return 0;
3491 index += sizeof(uint32_t);
3492
3493 /*
3494 * The hash is computed over extension types and their sizes (but not
3495 * their contents). E.g. if we have "TREE" extension that is N-bytes
3496 * long, "REUC" extension that is M-bytes long, followed by "EOIE",
3497 * then the hash would be:
3498 *
3499 * SHA-1("TREE" + <binary representation of N> +
3500 * "REUC" + <binary representation of M>)
3501 */
3502 src_offset = offset;
3503 the_hash_algo->init_fn(&c);
3504 while (src_offset < mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER) {
3505 /* After an array of active_nr index entries,
3506 * there can be arbitrary number of extended
3507 * sections, each of which is prefixed with
3508 * extension name (4-byte) and section length
3509 * in 4-byte network byte order.
3510 */
3511 uint32_t extsize;
3512 memcpy(&extsize, mmap + src_offset + 4, 4);
3513 extsize = ntohl(extsize);
3514
3515 /* verify the extension size isn't so large it will wrap around */
3516 if (src_offset + 8 + extsize < src_offset)
3517 return 0;
3518
3519 the_hash_algo->update_fn(&c, mmap + src_offset, 8);
3520
3521 src_offset += 8;
3522 src_offset += extsize;
3523 }
3524 the_hash_algo->final_fn(hash, &c);
3525 if (!hasheq(hash, (const unsigned char *)index))
3526 return 0;
3527
3528 /* Validate that the extension offsets returned us back to the eoie extension. */
3529 if (src_offset != mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER)
3530 return 0;
3531
3532 return offset;
3533 }
3534
3535 static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset)
3536 {
3537 uint32_t buffer;
3538 unsigned char hash[GIT_MAX_RAWSZ];
3539
3540 /* offset */
3541 put_be32(&buffer, offset);
3542 strbuf_add(sb, &buffer, sizeof(uint32_t));
3543
3544 /* hash */
3545 the_hash_algo->final_fn(hash, eoie_context);
3546 strbuf_add(sb, hash, the_hash_algo->rawsz);
3547 }
3548
3549 #define IEOT_VERSION (1)
3550
3551 static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset)
3552 {
3553 const char *index = NULL;
3554 uint32_t extsize, ext_version;
3555 struct index_entry_offset_table *ieot;
3556 int i, nr;
3557
3558 /* find the IEOT extension */
3559 if (!offset)
3560 return NULL;
3561 while (offset <= mmap_size - the_hash_algo->rawsz - 8) {
3562 extsize = get_be32(mmap + offset + 4);
3563 if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) {
3564 index = mmap + offset + 4 + 4;
3565 break;
3566 }
3567 offset += 8;
3568 offset += extsize;
3569 }
3570 if (!index)
3571 return NULL;
3572
3573 /* validate the version is IEOT_VERSION */
3574 ext_version = get_be32(index);
3575 if (ext_version != IEOT_VERSION) {
3576 error("invalid IEOT version %d", ext_version);
3577 return NULL;
3578 }
3579 index += sizeof(uint32_t);
3580
3581 /* extension size - version bytes / bytes per entry */
3582 nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));
3583 if (!nr) {
3584 error("invalid number of IEOT entries %d", nr);
3585 return NULL;
3586 }
3587 ieot = xmalloc(sizeof(struct index_entry_offset_table)
3588 + (nr * sizeof(struct index_entry_offset)));
3589 ieot->nr = nr;
3590 for (i = 0; i < nr; i++) {
3591 ieot->entries[i].offset = get_be32(index);
3592 index += sizeof(uint32_t);
3593 ieot->entries[i].nr = get_be32(index);
3594 index += sizeof(uint32_t);
3595 }
3596
3597 return ieot;
3598 }
3599
3600 static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot)
3601 {
3602 uint32_t buffer;
3603 int i;
3604
3605 /* version */
3606 put_be32(&buffer, IEOT_VERSION);
3607 strbuf_add(sb, &buffer, sizeof(uint32_t));
3608
3609 /* ieot */
3610 for (i = 0; i < ieot->nr; i++) {
3611
3612 /* offset */
3613 put_be32(&buffer, ieot->entries[i].offset);
3614 strbuf_add(sb, &buffer, sizeof(uint32_t));
3615
3616 /* count */
3617 put_be32(&buffer, ieot->entries[i].nr);
3618 strbuf_add(sb, &buffer, sizeof(uint32_t));
3619 }
3620 }