]>
Commit | Line | Data |
---|---|---|
e83c5163 LT |
1 | #ifndef CACHE_H |
2 | #define CACHE_H | |
3 | ||
4050c0df | 4 | #include "git-compat-util.h" |
5ecd293d | 5 | #include "strbuf.h" |
e05881a4 | 6 | #include "hashmap.h" |
ac48adf4 | 7 | #include "pathspec.h" |
a64215b6 | 8 | #include "object.h" |
ac48adf4 | 9 | #include "statinfo.h" |
e83c5163 | 10 | |
e83c5163 LT |
11 | /* |
12 | * Basic data structures for the directory cache | |
e83c5163 LT |
13 | */ |
14 | ||
15 | #define CACHE_SIGNATURE 0x44495243 /* "DIRC" */ | |
16 | struct cache_header { | |
7800c1eb TG |
17 | uint32_t hdr_signature; |
18 | uint32_t hdr_version; | |
19 | uint32_t hdr_entries; | |
e83c5163 LT |
20 | }; |
21 | ||
9d227781 JH |
22 | #define INDEX_FORMAT_LB 2 |
23 | #define INDEX_FORMAT_UB 4 | |
24 | ||
e83c5163 | 25 | struct cache_entry { |
8b013788 | 26 | struct hashmap_entry ent; |
c21d39d7 | 27 | struct stat_data ce_stat_data; |
ccc4feb5 | 28 | unsigned int ce_mode; |
7a51ed66 | 29 | unsigned int ce_flags; |
8e72d675 | 30 | unsigned int mem_pool_allocated; |
b60e188c | 31 | unsigned int ce_namelen; |
5fc2fc8f | 32 | unsigned int index; /* for link extension */ |
99d1a986 | 33 | struct object_id oid; |
8f1d2e6f | 34 | char name[FLEX_ARRAY]; /* more */ |
e83c5163 LT |
35 | }; |
36 | ||
95fd5bf8 | 37 | #define CE_STAGEMASK (0x3000) |
16ce2e4c | 38 | #define CE_EXTENDED (0x4000) |
5f73076c | 39 | #define CE_VALID (0x8000) |
aee46198 | 40 | #define CE_STAGESHIFT 12 |
95fd5bf8 | 41 | |
06aaaa0b | 42 | /* |
ce51bf09 | 43 | * Range 0xFFFF0FFF in ce_flags is divided into |
06aaaa0b NTND |
44 | * two parts: in-memory flags and on-disk ones. |
45 | * Flags in CE_EXTENDED_FLAGS will get saved on-disk | |
46 | * if you want to save a new flag, add it in | |
47 | * CE_EXTENDED_FLAGS | |
48 | * | |
49 | * In-memory only flags | |
50 | */ | |
2977ffbb NTND |
51 | #define CE_UPDATE (1 << 16) |
52 | #define CE_REMOVE (1 << 17) | |
53 | #define CE_UPTODATE (1 << 18) | |
54 | #define CE_ADDED (1 << 19) | |
a22c6371 | 55 | |
2977ffbb | 56 | #define CE_HASHED (1 << 20) |
883e248b | 57 | #define CE_FSMONITOR_VALID (1 << 21) |
2977ffbb NTND |
58 | #define CE_WT_REMOVE (1 << 22) /* remove in work directory */ |
59 | #define CE_CONFLICTED (1 << 23) | |
7a51ed66 | 60 | |
2977ffbb | 61 | #define CE_UNPACKED (1 << 24) |
2431afbf | 62 | #define CE_NEW_SKIP_WORKTREE (1 << 25) |
da165f47 | 63 | |
e721c154 NTND |
64 | /* used to temporarily mark paths matched by pathspecs */ |
65 | #define CE_MATCHED (1 << 26) | |
66 | ||
078a58e8 | 67 | #define CE_UPDATE_IN_BASE (1 << 27) |
b3c96fb1 | 68 | #define CE_STRIP_NAME (1 << 28) |
078a58e8 | 69 | |
06aaaa0b NTND |
70 | /* |
71 | * Extended on-disk flags | |
72 | */ | |
2977ffbb NTND |
73 | #define CE_INTENT_TO_ADD (1 << 29) |
74 | #define CE_SKIP_WORKTREE (1 << 30) | |
06aaaa0b | 75 | /* CE_EXTENDED2 is for future extension */ |
9a93c668 | 76 | #define CE_EXTENDED2 (1U << 31) |
06aaaa0b | 77 | |
44a36913 | 78 | #define CE_EXTENDED_FLAGS (CE_INTENT_TO_ADD | CE_SKIP_WORKTREE) |
06aaaa0b NTND |
79 | |
80 | /* | |
81 | * Safeguard to avoid saving wrong flags: | |
82 | * - CE_EXTENDED2 won't get saved until its semantic is known | |
83 | * - Bits in 0x0000FFFF have been saved in ce_flags already | |
84 | * - Bits in 0x003F0000 are currently in-memory flags | |
85 | */ | |
86 | #if CE_EXTENDED_FLAGS & 0x803FFFFF | |
87 | #error "CE_EXTENDED_FLAGS out of range" | |
88 | #endif | |
89 | ||
3e3a4a41 | 90 | /* Forward structure decls */ |
64acde94 | 91 | struct pathspec; |
e1f8694f | 92 | struct tree; |
64acde94 | 93 | |
eb7a2f1d LT |
94 | /* |
95 | * Copy the sha1 and stat state of a cache entry from one to | |
96 | * another. But we never change the name, or the hash state! | |
97 | */ | |
20d142b4 RS |
98 | static inline void copy_cache_entry(struct cache_entry *dst, |
99 | const struct cache_entry *src) | |
eb7a2f1d | 100 | { |
419a597f | 101 | unsigned int state = dst->ce_flags & CE_HASHED; |
8e72d675 | 102 | int mem_pool_allocated = dst->mem_pool_allocated; |
eb7a2f1d LT |
103 | |
104 | /* Don't copy hash chain and name */ | |
8b013788 KB |
105 | memcpy(&dst->ce_stat_data, &src->ce_stat_data, |
106 | offsetof(struct cache_entry, name) - | |
107 | offsetof(struct cache_entry, ce_stat_data)); | |
eb7a2f1d LT |
108 | |
109 | /* Restore the hash state */ | |
419a597f | 110 | dst->ce_flags = (dst->ce_flags & ~CE_HASHED) | state; |
8e72d675 JM |
111 | |
112 | /* Restore the mem_pool_allocated flag */ | |
113 | dst->mem_pool_allocated = mem_pool_allocated; | |
eb7a2f1d LT |
114 | } |
115 | ||
b60e188c | 116 | static inline unsigned create_ce_flags(unsigned stage) |
7fec10b7 | 117 | { |
b60e188c | 118 | return (stage << CE_STAGESHIFT); |
7fec10b7 JH |
119 | } |
120 | ||
b60e188c | 121 | #define ce_namelen(ce) ((ce)->ce_namelen) |
aee46198 | 122 | #define ce_size(ce) cache_entry_size(ce_namelen(ce)) |
7a51ed66 | 123 | #define ce_stage(ce) ((CE_STAGEMASK & (ce)->ce_flags) >> CE_STAGESHIFT) |
eadb5831 | 124 | #define ce_uptodate(ce) ((ce)->ce_flags & CE_UPTODATE) |
44a36913 | 125 | #define ce_skip_worktree(ce) ((ce)->ce_flags & CE_SKIP_WORKTREE) |
eadb5831 | 126 | #define ce_mark_uptodate(ce) ((ce)->ce_flags |= CE_UPTODATE) |
895ff3b2 | 127 | #define ce_intent_to_add(ce) ((ce)->ce_flags & CE_INTENT_TO_ADD) |
aee46198 | 128 | |
20d142b4 RS |
129 | static inline unsigned int ce_mode_from_stat(const struct cache_entry *ce, |
130 | unsigned int mode) | |
185c975f | 131 | { |
78a8d641 JS |
132 | extern int trust_executable_bit, has_symlinks; |
133 | if (!has_symlinks && S_ISREG(mode) && | |
7a51ed66 | 134 | ce && S_ISLNK(ce->ce_mode)) |
78a8d641 | 135 | return ce->ce_mode; |
185c975f | 136 | if (!trust_executable_bit && S_ISREG(mode)) { |
7a51ed66 | 137 | if (ce && S_ISREG(ce->ce_mode)) |
185c975f JH |
138 | return ce->ce_mode; |
139 | return create_ce_mode(0666); | |
140 | } | |
141 | return create_ce_mode(mode); | |
142 | } | |
d6b8fc30 JH |
143 | static inline int ce_to_dtype(const struct cache_entry *ce) |
144 | { | |
145 | unsigned ce_mode = ntohl(ce->ce_mode); | |
146 | if (S_ISREG(ce_mode)) | |
147 | return DT_REG; | |
148 | else if (S_ISDIR(ce_mode) || S_ISGITLINK(ce_mode)) | |
149 | return DT_DIR; | |
150 | else if (S_ISLNK(ce_mode)) | |
151 | return DT_LNK; | |
152 | else | |
153 | return DT_UNKNOWN; | |
154 | } | |
e4479470 | 155 | |
ac48adf4 EN |
156 | static inline int ce_path_match(struct index_state *istate, |
157 | const struct cache_entry *ce, | |
158 | const struct pathspec *pathspec, | |
159 | char *seen) | |
160 | { | |
161 | return match_pathspec(istate, pathspec, ce->name, ce_namelen(ce), 0, seen, | |
162 | S_ISDIR(ce->ce_mode) || S_ISGITLINK(ce->ce_mode)); | |
163 | } | |
164 | ||
ee7825b5 | 165 | #define cache_entry_size(len) (offsetof(struct cache_entry,name) + (len) + 1) |
f5cabd13 | 166 | |
e636a7b4 NTND |
167 | #define SOMETHING_CHANGED (1 << 0) /* unclassified changes go here */ |
168 | #define CE_ENTRY_CHANGED (1 << 1) | |
169 | #define CE_ENTRY_REMOVED (1 << 2) | |
170 | #define CE_ENTRY_ADDED (1 << 3) | |
6c306a34 | 171 | #define RESOLVE_UNDO_CHANGED (1 << 4) |
a5400efe | 172 | #define CACHE_TREE_CHANGED (1 << 5) |
c18b80a0 | 173 | #define SPLIT_INDEX_ORDERED (1 << 6) |
1bbb3dba | 174 | #define UNTRACKED_CHANGED (1 << 7) |
883e248b | 175 | #define FSMONITOR_CHANGED (1 << 8) |
e636a7b4 | 176 | |
5fc2fc8f | 177 | struct split_index; |
83c094ad | 178 | struct untracked_cache; |
4dcd4def | 179 | struct progress; |
836e25c5 | 180 | struct pattern_list; |
83c094ad | 181 | |
9fadb373 DS |
182 | enum sparse_index_mode { |
183 | /* | |
184 | * There are no sparse directories in the index at all. | |
185 | * | |
186 | * Repositories that don't use cone-mode sparse-checkout will | |
187 | * always have their indexes in this mode. | |
188 | */ | |
189 | INDEX_EXPANDED = 0, | |
190 | ||
191 | /* | |
192 | * The index has already been collapsed to sparse directories | |
193 | * whereever possible. | |
194 | */ | |
195 | INDEX_COLLAPSED, | |
196 | ||
197 | /* | |
198 | * The sparse directories that exist are outside the | |
199 | * sparse-checkout boundary, but it is possible that some file | |
200 | * entries could collapse to sparse directory entries. | |
201 | */ | |
202 | INDEX_PARTIALLY_SPARSE, | |
203 | }; | |
204 | ||
228e94f9 JH |
205 | struct index_state { |
206 | struct cache_entry **cache; | |
9d227781 | 207 | unsigned int version; |
228e94f9 | 208 | unsigned int cache_nr, cache_alloc, cache_changed; |
cfc5789a | 209 | struct string_list *resolve_undo; |
228e94f9 | 210 | struct cache_tree *cache_tree; |
5fc2fc8f | 211 | struct split_index *split_index; |
fba2f38a | 212 | struct cache_time timestamp; |
913e0e99 | 213 | unsigned name_hash_initialized : 1, |
4bddd983 | 214 | initialized : 1, |
1956ecd0 BP |
215 | drop_cache_tree : 1, |
216 | updated_workdir : 1, | |
cfd635c7 | 217 | updated_skipworktree : 1, |
9fadb373 DS |
218 | fsmonitor_has_run_once : 1; |
219 | enum sparse_index_mode sparse_index; | |
8b013788 | 220 | struct hashmap name_hash; |
e05881a4 | 221 | struct hashmap dir_hash; |
75691ea3 | 222 | struct object_id oid; |
83c094ad | 223 | struct untracked_cache *untracked; |
56c69100 | 224 | char *fsmonitor_last_update; |
ba1b9cac | 225 | struct ewah_bitmap *fsmonitor_dirty; |
8e72d675 | 226 | struct mem_pool *ce_mem_pool; |
4dcd4def | 227 | struct progress *progress; |
1fd9ae51 | 228 | struct repository *repo; |
836e25c5 | 229 | struct pattern_list *sparse_checkout_patterns; |
228e94f9 JH |
230 | }; |
231 | ||
2f6b1eb7 ÆAB |
232 | /** |
233 | * A "struct index_state istate" must be initialized with | |
234 | * INDEX_STATE_INIT or the corresponding index_state_init(). | |
235 | * | |
236 | * If the variable won't be used again, use release_index() to free() | |
237 | * its resources. If it needs to be used again use discard_index(), | |
238 | * which does the same thing, but will use use index_state_init() at | |
6269f8ea ÆAB |
239 | * the end. The discard_index() will use its own "istate->repo" as the |
240 | * "r" argument to index_state_init() in that case. | |
2f6b1eb7 | 241 | */ |
6269f8ea ÆAB |
242 | #define INDEX_STATE_INIT(r) { \ |
243 | .repo = (r), \ | |
244 | } | |
245 | void index_state_init(struct index_state *istate, struct repository *r); | |
2f6b1eb7 ÆAB |
246 | void release_index(struct index_state *istate); |
247 | ||
96872bc2 | 248 | /* Name hashing */ |
55454427 DL |
249 | int test_lazy_init_name_hash(struct index_state *istate, int try_threaded); |
250 | void add_name_hash(struct index_state *istate, struct cache_entry *ce); | |
251 | void remove_name_hash(struct index_state *istate, struct cache_entry *ce); | |
252 | void free_name_hash(struct index_state *istate); | |
96872bc2 | 253 | |
a849735b JM |
254 | /* Cache entry creation and cleanup */ |
255 | ||
256 | /* | |
257 | * Create cache_entry intended for use in the specified index. Caller | |
258 | * is responsible for discarding the cache_entry with | |
259 | * `discard_cache_entry`. | |
260 | */ | |
261 | struct cache_entry *make_cache_entry(struct index_state *istate, | |
262 | unsigned int mode, | |
263 | const struct object_id *oid, | |
264 | const char *path, | |
265 | int stage, | |
266 | unsigned int refresh_options); | |
267 | ||
268 | struct cache_entry *make_empty_cache_entry(struct index_state *istate, | |
269 | size_t name_len); | |
270 | ||
271 | /* | |
96168827 MT |
272 | * Create a cache_entry that is not intended to be added to an index. If |
273 | * `ce_mem_pool` is not NULL, the entry is allocated within the given memory | |
274 | * pool. Caller is responsible for discarding "loose" entries with | |
275 | * `discard_cache_entry()` and the memory pool with | |
276 | * `mem_pool_discard(ce_mem_pool, should_validate_cache_entries())`. | |
a849735b JM |
277 | */ |
278 | struct cache_entry *make_transient_cache_entry(unsigned int mode, | |
279 | const struct object_id *oid, | |
280 | const char *path, | |
96168827 MT |
281 | int stage, |
282 | struct mem_pool *ce_mem_pool); | |
a849735b | 283 | |
96168827 MT |
284 | struct cache_entry *make_empty_transient_cache_entry(size_t len, |
285 | struct mem_pool *ce_mem_pool); | |
a849735b JM |
286 | |
287 | /* | |
288 | * Discard cache entry. | |
289 | */ | |
290 | void discard_cache_entry(struct cache_entry *ce); | |
291 | ||
8616a2d0 JM |
292 | /* |
293 | * Check configuration if we should perform extra validation on cache | |
294 | * entries. | |
295 | */ | |
296 | int should_validate_cache_entries(void); | |
297 | ||
8e72d675 JM |
298 | /* |
299 | * Duplicate a cache_entry. Allocate memory for the new entry from a | |
300 | * memory_pool. Takes into account cache_entry fields that are meant | |
301 | * for managing the underlying memory allocation of the cache_entry. | |
302 | */ | |
303 | struct cache_entry *dup_cache_entry(const struct cache_entry *ce, struct index_state *istate); | |
304 | ||
305 | /* | |
306 | * Validate the cache entries in the index. This is an internal | |
307 | * consistency check that the cache_entry structs are allocated from | |
308 | * the expected memory pool. | |
309 | */ | |
310 | void validate_cache_entries(const struct index_state *istate); | |
311 | ||
b2896d27 JT |
312 | /* |
313 | * Bulk prefetch all missing cache entries that are not GITLINKs and that match | |
314 | * the given predicate. This function should only be called if | |
c7c33f50 | 315 | * repo_has_promisor_remote() returns true. |
b2896d27 JT |
316 | */ |
317 | typedef int (*must_prefetch_predicate)(const struct cache_entry *); | |
318 | void prefetch_cache_entries(const struct index_state *istate, | |
319 | must_prefetch_predicate must_prefetch); | |
320 | ||
dfd0a893 | 321 | #ifdef USE_THE_INDEX_VARIABLE |
f8adbec9 | 322 | extern struct index_state the_index; |
666f53eb | 323 | #endif |
e83c5163 | 324 | |
f225aeb2 | 325 | #define INIT_DB_QUIET 0x0001 |
33158701 | 326 | #define INIT_DB_EXIST_OK 0x0002 |
f225aeb2 | 327 | |
55454427 | 328 | int init_db(const char *git_dir, const char *real_git_dir, |
8b8f7189 | 329 | const char *template_dir, int hash_algo, |
fc817350 EN |
330 | const char *initial_branch, int init_shared_repository, |
331 | unsigned int flags); | |
47ac9703 | 332 | void initialize_repository_version(int hash_algo, int reinit); |
f225aeb2 | 333 | |
734aab75 | 334 | /* Initialize and use the cache information */ |
03b86647 | 335 | struct lock_file; |
55454427 | 336 | void preload_index(struct index_state *index, |
ad6dad09 DL |
337 | const struct pathspec *pathspec, |
338 | unsigned int refresh_flags); | |
55454427 | 339 | int do_read_index(struct index_state *istate, const char *path, |
ad6dad09 | 340 | int must_exist); /* for testting only! */ |
55454427 | 341 | int read_index_from(struct index_state *, const char *path, |
ad6dad09 | 342 | const char *gitdir); |
55454427 | 343 | int is_index_unborn(struct index_state *); |
8dc38346 | 344 | |
4300f844 DS |
345 | void ensure_full_index(struct index_state *istate); |
346 | ||
8dc38346 | 347 | /* For use with `write_locked_index()`. */ |
03b86647 | 348 | #define COMMIT_LOCK (1 << 0) |
61000814 | 349 | #define SKIP_IF_UNCHANGED (1 << 1) |
8dc38346 MÅ |
350 | |
351 | /* | |
812d6b00 MÅ |
352 | * Write the index while holding an already-taken lock. Close the lock, |
353 | * and if `COMMIT_LOCK` is given, commit it. | |
8dc38346 MÅ |
354 | * |
355 | * Unless a split index is in use, write the index into the lockfile. | |
356 | * | |
357 | * With a split index, write the shared index to a temporary file, | |
358 | * adjust its permissions and rename it into place, then write the | |
359 | * split index to the lockfile. If the temporary file for the shared | |
360 | * index cannot be created, fall back to the behavior described in | |
361 | * the previous paragraph. | |
df60cf57 MÅ |
362 | * |
363 | * With `COMMIT_LOCK`, the lock is always committed or rolled back. | |
364 | * Without it, the lock is closed, but neither committed nor rolled | |
365 | * back. | |
61000814 MÅ |
366 | * |
367 | * If `SKIP_IF_UNCHANGED` is given and the index is unchanged, nothing | |
368 | * is written (and the lock is rolled back if `COMMIT_LOCK` is given). | |
8dc38346 | 369 | */ |
55454427 | 370 | int write_locked_index(struct index_state *, struct lock_file *lock, unsigned flags); |
8dc38346 | 371 | |
9c5f3ee3 | 372 | void discard_index(struct index_state *); |
55454427 DL |
373 | void move_index_extensions(struct index_state *dst, struct index_state *src); |
374 | int unmerged_index(const struct index_state *); | |
b101793c EN |
375 | |
376 | /** | |
e1f8694f EN |
377 | * Returns 1 if istate differs from tree, 0 otherwise. If tree is NULL, |
378 | * compares istate to HEAD. If tree is NULL and on an unborn branch, | |
379 | * returns 1 if there are entries in istate, 0 otherwise. If an strbuf is | |
380 | * provided, the space-separated list of files that differ will be appended | |
381 | * to it. | |
b101793c | 382 | */ |
55454427 | 383 | int repo_index_has_changes(struct repository *repo, |
ad6dad09 DL |
384 | struct tree *tree, |
385 | struct strbuf *sb); | |
b101793c | 386 | |
55454427 DL |
387 | int verify_path(const char *path, unsigned mode); |
388 | int strcmp_offset(const char *s1, const char *s2, size_t *first_change); | |
389 | int index_dir_exists(struct index_state *istate, const char *name, int namelen); | |
390 | void adjust_dirname_case(struct index_state *istate, char *name); | |
391 | struct cache_entry *index_file_exists(struct index_state *istate, const char *name, int namelen, int igncase); | |
12733e9d SB |
392 | |
393 | /* | |
394 | * Searches for an entry defined by name and namelen in the given index. | |
395 | * If the return value is positive (including 0) it is the position of an | |
396 | * exact match. If the return value is negative, the negated value minus 1 | |
397 | * is the position where the entry would be inserted. | |
398 | * Example: The current index consists of these files and its stages: | |
399 | * | |
400 | * b#0, d#0, f#1, f#3 | |
401 | * | |
402 | * index_name_pos(&index, "a", 1) -> -1 | |
403 | * index_name_pos(&index, "b", 1) -> 0 | |
404 | * index_name_pos(&index, "c", 1) -> -2 | |
405 | * index_name_pos(&index, "d", 1) -> 1 | |
406 | * index_name_pos(&index, "e", 1) -> -3 | |
407 | * index_name_pos(&index, "f", 1) -> -3 | |
408 | * index_name_pos(&index, "g", 1) -> -5 | |
409 | */ | |
847a9e5d | 410 | int index_name_pos(struct index_state *, const char *name, int namelen); |
12733e9d | 411 | |
9553aa0f VD |
412 | /* |
413 | * Like index_name_pos, returns the position of an entry of the given name in | |
414 | * the index if one exists, otherwise returns a negative value where the negated | |
415 | * value minus 1 is the position where the index entry would be inserted. Unlike | |
416 | * index_name_pos, however, a sparse index is not expanded to find an entry | |
417 | * inside a sparse directory. | |
418 | */ | |
419 | int index_name_pos_sparse(struct index_state *, const char *name, int namelen); | |
420 | ||
20ec2d03 VD |
421 | /* |
422 | * Determines whether an entry with the given name exists within the | |
423 | * given index. The return value is 1 if an exact match is found, otherwise | |
424 | * it is 0. Note that, unlike index_name_pos, this function does not expand | |
425 | * the index if it is sparse. If an item exists within the full index but it | |
426 | * is contained within a sparse directory (and not in the sparse index), 0 is | |
427 | * returned. | |
428 | */ | |
429 | int index_entry_exists(struct index_state *, const char *name, int namelen); | |
430 | ||
c097b95a JS |
431 | /* |
432 | * Some functions return the negative complement of an insert position when a | |
433 | * precise match was not found but a position was found where the entry would | |
434 | * need to be inserted. This helper protects that logic from any integer | |
435 | * underflow. | |
436 | */ | |
437 | static inline int index_pos_to_insert_pos(uintmax_t pos) | |
438 | { | |
439 | if (pos > INT_MAX) | |
440 | die("overflow: -1 - %"PRIuMAX, pos); | |
441 | return -1 - (int)pos; | |
442 | } | |
443 | ||
192268c1 JH |
444 | #define ADD_CACHE_OK_TO_ADD 1 /* Ok to add */ |
445 | #define ADD_CACHE_OK_TO_REPLACE 2 /* Ok to replace file/directory */ | |
b155725d | 446 | #define ADD_CACHE_SKIP_DFCHECK 4 /* Ok to skip DF conflict checks */ |
eefadd18 | 447 | #define ADD_CACHE_JUST_APPEND 8 /* Append only */ |
39425819 | 448 | #define ADD_CACHE_NEW_ONLY 16 /* Do not replace existing ones */ |
ce7c614b | 449 | #define ADD_CACHE_KEEP_CACHE_TREE 32 /* Do not invalidate cache-tree */ |
9e5da3d0 | 450 | #define ADD_CACHE_RENORMALIZE 64 /* Pass along HASH_RENORMALIZE */ |
55454427 DL |
451 | int add_index_entry(struct index_state *, struct cache_entry *ce, int option); |
452 | void rename_index_entry_at(struct index_state *, int pos, const char *new_name); | |
3bd72adf SB |
453 | |
454 | /* Remove entry, return true if there are more entries to go. */ | |
55454427 | 455 | int remove_index_entry_at(struct index_state *, int pos); |
3bd72adf | 456 | |
55454427 DL |
457 | void remove_marked_cache_entries(struct index_state *istate, int invalidate); |
458 | int remove_file_from_index(struct index_state *, const char *path); | |
38ed1d89 JH |
459 | #define ADD_CACHE_VERBOSE 1 |
460 | #define ADD_CACHE_PRETEND 2 | |
01665924 | 461 | #define ADD_CACHE_IGNORE_ERRORS 4 |
041aee31 | 462 | #define ADD_CACHE_IGNORE_REMOVAL 8 |
39425819 | 463 | #define ADD_CACHE_INTENT 16 |
20cf41d0 SB |
464 | /* |
465 | * These two are used to add the contents of the file at path | |
466 | * to the index, marking the working tree up-to-date by storing | |
467 | * the cached stat info in the resulting cache entry. A caller | |
468 | * that has already run lstat(2) on the path can call | |
469 | * add_to_index(), and all others can call add_file_to_index(); | |
470 | * the latter will do necessary lstat(2) internally before | |
471 | * calling the former. | |
472 | */ | |
55454427 DL |
473 | int add_to_index(struct index_state *, const char *path, struct stat *, int flags); |
474 | int add_file_to_index(struct index_state *, const char *path, int flags); | |
20cf41d0 | 475 | |
55454427 DL |
476 | int chmod_index_entry(struct index_state *, struct cache_entry *ce, char flip); |
477 | int ce_same_name(const struct cache_entry *a, const struct cache_entry *b); | |
478 | void set_object_name_for_intent_to_add_entry(struct cache_entry *ce); | |
847a9e5d DS |
479 | int index_name_is_other(struct index_state *, const char *, int); |
480 | void *read_blob_data_from_index(struct index_state *, const char *, unsigned long *); | |
4bd5b7da JH |
481 | |
482 | /* do stat comparison even if CE_VALID is true */ | |
483 | #define CE_MATCH_IGNORE_VALID 01 | |
484 | /* do not check the contents but report dirty on racily-clean entries */ | |
56cac48c NTND |
485 | #define CE_MATCH_RACY_IS_DIRTY 02 |
486 | /* do stat comparison even if CE_SKIP_WORKTREE is true */ | |
487 | #define CE_MATCH_IGNORE_SKIP_WORKTREE 04 | |
2e2e7ec1 BK |
488 | /* ignore non-existent files during stat update */ |
489 | #define CE_MATCH_IGNORE_MISSING 0x08 | |
25762726 BK |
490 | /* enable stat refresh */ |
491 | #define CE_MATCH_REFRESH 0x10 | |
883e248b BP |
492 | /* don't refresh_fsmonitor state or do stat comparison even if CE_FSMONITOR_VALID is true */ |
493 | #define CE_MATCH_IGNORE_FSMONITOR 0X20 | |
55454427 | 494 | int is_racy_timestamp(const struct index_state *istate, |
ad6dad09 | 495 | const struct cache_entry *ce); |
2ede073f | 496 | int has_racy_timestamp(struct index_state *istate); |
55454427 DL |
497 | int ie_match_stat(struct index_state *, const struct cache_entry *, struct stat *, unsigned int); |
498 | int ie_modified(struct index_state *, const struct cache_entry *, struct stat *, unsigned int); | |
4bd5b7da | 499 | |
c21d39d7 MH |
500 | /* |
501 | * Record to sd the data from st that we use to check whether a file | |
502 | * might have changed. | |
503 | */ | |
55454427 | 504 | void fill_stat_data(struct stat_data *sd, struct stat *st); |
c21d39d7 MH |
505 | |
506 | /* | |
507 | * Return 0 if st is consistent with a file not having been changed | |
508 | * since sd was filled. If there are differences, return a | |
509 | * combination of MTIME_CHANGED, CTIME_CHANGED, OWNER_CHANGED, | |
510 | * INODE_CHANGED, and DATA_CHANGED. | |
511 | */ | |
55454427 DL |
512 | int match_stat_data(const struct stat_data *sd, struct stat *st); |
513 | int match_stat_data_racy(const struct index_state *istate, | |
ad6dad09 | 514 | const struct stat_data *sd, struct stat *st); |
c21d39d7 | 515 | |
d4c0a3ac | 516 | void fill_stat_cache_info(struct index_state *istate, struct cache_entry *ce, struct stat *st); |
415e96c8 | 517 | |
b243012c MT |
518 | #define REFRESH_REALLY (1 << 0) /* ignore_valid */ |
519 | #define REFRESH_UNMERGED (1 << 1) /* allow unmerged */ | |
520 | #define REFRESH_QUIET (1 << 2) /* be quiet about it */ | |
521 | #define REFRESH_IGNORE_MISSING (1 << 3) /* ignore non-existent */ | |
522 | #define REFRESH_IGNORE_SUBMODULES (1 << 4) /* ignore submodules */ | |
523 | #define REFRESH_IN_PORCELAIN (1 << 5) /* user friendly output, not "needs update" */ | |
524 | #define REFRESH_PROGRESS (1 << 6) /* show progress bar if stderr is tty */ | |
525 | #define REFRESH_IGNORE_SKIP_WORKTREE (1 << 7) /* ignore skip_worktree entries */ | |
55454427 | 526 | int refresh_index(struct index_state *, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg); |
22184497 TG |
527 | /* |
528 | * Refresh the index and write it to disk. | |
529 | * | |
530 | * 'refresh_flags' is passed directly to 'refresh_index()', while | |
531 | * 'COMMIT_LOCK | write_flags' is passed to 'write_locked_index()', so | |
532 | * the lockfile is always either committed or rolled back. | |
533 | * | |
534 | * If 'gentle' is passed, errors locking the index are ignored. | |
535 | * | |
536 | * Return 1 if refreshing the index returns an error, -1 if writing | |
537 | * the index to disk fails, 0 on success. | |
538 | * | |
539 | * Note that if refreshing the index returns an error, we still write | |
540 | * out the index (unless locking fails). | |
541 | */ | |
542 | int repo_refresh_and_write_index(struct repository*, unsigned int refresh_flags, unsigned int write_flags, int gentle, const struct pathspec *, char *seen, const char *header_msg); | |
543 | ||
55454427 | 544 | struct cache_entry *refresh_cache_entry(struct index_state *, struct cache_entry *, unsigned int); |
405e5b2f | 545 | |
55454427 | 546 | void set_alternate_index_output(const char *); |
697cc8ef | 547 | |
a33fc72f | 548 | extern int verify_index_checksum; |
00ec50e5 | 549 | extern int verify_ce_order; |
a33fc72f | 550 | |
734aab75 LT |
551 | #define MTIME_CHANGED 0x0001 |
552 | #define CTIME_CHANGED 0x0002 | |
553 | #define OWNER_CHANGED 0x0004 | |
554 | #define MODE_CHANGED 0x0008 | |
555 | #define INODE_CHANGED 0x0010 | |
556 | #define DATA_CHANGED 0x0020 | |
8ae0a8c5 | 557 | #define TYPE_CHANGED 0x0040 |
e83c5163 | 558 | |
aabc5617 | 559 | int cmp_cache_name_compare(const void *a_, const void *b_); |
e83c5163 | 560 | |
b6ec1d61 | 561 | /* add */ |
7ae02a30 AR |
562 | /* |
563 | * return 0 if success, 1 - if addition of a file failed and | |
564 | * ADD_FILES_IGNORE_ERRORS was specified in flags | |
565 | */ | |
610d55af | 566 | int add_files_to_cache(const char *prefix, const struct pathspec *pathspec, int flags); |
b6ec1d61 | 567 | |
aecbf914 JH |
568 | /* diff.c */ |
569 | extern int diff_auto_refresh_index; | |
570 | ||
ee425e46 | 571 | /* ls-files */ |
312c984a BW |
572 | void overlay_tree_on_index(struct index_state *istate, |
573 | const char *tree_name, const char *prefix); | |
ee425e46 | 574 | |
db699a8a NTND |
575 | /* merge.c */ |
576 | struct commit_list; | |
7e196c3a NTND |
577 | int try_merge_command(struct repository *r, |
578 | const char *strategy, size_t xopts_nr, | |
db699a8a NTND |
579 | const char **xopts, struct commit_list *common, |
580 | const char *head_arg, struct commit_list *remotes); | |
7e196c3a NTND |
581 | int checkout_fast_forward(struct repository *r, |
582 | const struct object_id *from, | |
f06e90da | 583 | const struct object_id *to, |
db699a8a NTND |
584 | int overwrite_ignore); |
585 | ||
cac42b26 | 586 | |
38f865c2 JK |
587 | int sane_execvp(const char *file, char *const argv[]); |
588 | ||
38612532 MH |
589 | /* |
590 | * A struct to encapsulate the concept of whether a file has changed | |
591 | * since we last checked it. This uses criteria similar to those used | |
592 | * for the index. | |
593 | */ | |
594 | struct stat_validity { | |
595 | struct stat_data *sd; | |
596 | }; | |
597 | ||
598 | void stat_validity_clear(struct stat_validity *sv); | |
599 | ||
600 | /* | |
601 | * Returns 1 if the path is a regular file (or a symlink to a regular | |
602 | * file) and matches the saved stat_validity, 0 otherwise. A missing | |
603 | * or inaccessible file is considered a match if the struct was just | |
604 | * initialized, or if the previous update found an inaccessible file. | |
605 | */ | |
606 | int stat_validity_check(struct stat_validity *sv, const char *path); | |
607 | ||
608 | /* | |
609 | * Update the stat_validity from a file opened at descriptor fd. If | |
610 | * the file is missing, inaccessible, or not a regular file, then | |
611 | * future calls to stat_validity_check will match iff one of those | |
612 | * conditions continues to be true. | |
613 | */ | |
614 | void stat_validity_update(struct stat_validity *sv, int fd); | |
615 | ||
e83c5163 | 616 | #endif /* CACHE_H */ |