]>
Commit | Line | Data |
---|---|---|
e83c5163 LT |
1 | #ifndef CACHE_H |
2 | #define CACHE_H | |
3 | ||
4050c0df | 4 | #include "git-compat-util.h" |
5ecd293d | 5 | #include "strbuf.h" |
e05881a4 | 6 | #include "hashmap.h" |
ac48adf4 | 7 | #include "pathspec.h" |
a64215b6 | 8 | #include "object.h" |
ac48adf4 | 9 | #include "statinfo.h" |
e83c5163 | 10 | |
e83c5163 LT |
11 | /* |
12 | * Basic data structures for the directory cache | |
e83c5163 LT |
13 | */ |
14 | ||
15 | #define CACHE_SIGNATURE 0x44495243 /* "DIRC" */ | |
16 | struct cache_header { | |
7800c1eb TG |
17 | uint32_t hdr_signature; |
18 | uint32_t hdr_version; | |
19 | uint32_t hdr_entries; | |
e83c5163 LT |
20 | }; |
21 | ||
9d227781 JH |
22 | #define INDEX_FORMAT_LB 2 |
23 | #define INDEX_FORMAT_UB 4 | |
24 | ||
e83c5163 | 25 | struct cache_entry { |
8b013788 | 26 | struct hashmap_entry ent; |
c21d39d7 | 27 | struct stat_data ce_stat_data; |
ccc4feb5 | 28 | unsigned int ce_mode; |
7a51ed66 | 29 | unsigned int ce_flags; |
8e72d675 | 30 | unsigned int mem_pool_allocated; |
b60e188c | 31 | unsigned int ce_namelen; |
5fc2fc8f | 32 | unsigned int index; /* for link extension */ |
99d1a986 | 33 | struct object_id oid; |
8f1d2e6f | 34 | char name[FLEX_ARRAY]; /* more */ |
e83c5163 LT |
35 | }; |
36 | ||
95fd5bf8 | 37 | #define CE_STAGEMASK (0x3000) |
16ce2e4c | 38 | #define CE_EXTENDED (0x4000) |
5f73076c | 39 | #define CE_VALID (0x8000) |
aee46198 | 40 | #define CE_STAGESHIFT 12 |
95fd5bf8 | 41 | |
06aaaa0b | 42 | /* |
ce51bf09 | 43 | * Range 0xFFFF0FFF in ce_flags is divided into |
06aaaa0b NTND |
44 | * two parts: in-memory flags and on-disk ones. |
45 | * Flags in CE_EXTENDED_FLAGS will get saved on-disk | |
46 | * if you want to save a new flag, add it in | |
47 | * CE_EXTENDED_FLAGS | |
48 | * | |
49 | * In-memory only flags | |
50 | */ | |
2977ffbb NTND |
51 | #define CE_UPDATE (1 << 16) |
52 | #define CE_REMOVE (1 << 17) | |
53 | #define CE_UPTODATE (1 << 18) | |
54 | #define CE_ADDED (1 << 19) | |
a22c6371 | 55 | |
2977ffbb | 56 | #define CE_HASHED (1 << 20) |
883e248b | 57 | #define CE_FSMONITOR_VALID (1 << 21) |
2977ffbb NTND |
58 | #define CE_WT_REMOVE (1 << 22) /* remove in work directory */ |
59 | #define CE_CONFLICTED (1 << 23) | |
7a51ed66 | 60 | |
2977ffbb | 61 | #define CE_UNPACKED (1 << 24) |
2431afbf | 62 | #define CE_NEW_SKIP_WORKTREE (1 << 25) |
da165f47 | 63 | |
e721c154 NTND |
64 | /* used to temporarily mark paths matched by pathspecs */ |
65 | #define CE_MATCHED (1 << 26) | |
66 | ||
078a58e8 | 67 | #define CE_UPDATE_IN_BASE (1 << 27) |
b3c96fb1 | 68 | #define CE_STRIP_NAME (1 << 28) |
078a58e8 | 69 | |
06aaaa0b NTND |
70 | /* |
71 | * Extended on-disk flags | |
72 | */ | |
2977ffbb NTND |
73 | #define CE_INTENT_TO_ADD (1 << 29) |
74 | #define CE_SKIP_WORKTREE (1 << 30) | |
06aaaa0b | 75 | /* CE_EXTENDED2 is for future extension */ |
9a93c668 | 76 | #define CE_EXTENDED2 (1U << 31) |
06aaaa0b | 77 | |
44a36913 | 78 | #define CE_EXTENDED_FLAGS (CE_INTENT_TO_ADD | CE_SKIP_WORKTREE) |
06aaaa0b NTND |
79 | |
80 | /* | |
81 | * Safeguard to avoid saving wrong flags: | |
82 | * - CE_EXTENDED2 won't get saved until its semantic is known | |
83 | * - Bits in 0x0000FFFF have been saved in ce_flags already | |
84 | * - Bits in 0x003F0000 are currently in-memory flags | |
85 | */ | |
86 | #if CE_EXTENDED_FLAGS & 0x803FFFFF | |
87 | #error "CE_EXTENDED_FLAGS out of range" | |
88 | #endif | |
89 | ||
3e3a4a41 | 90 | /* Forward structure decls */ |
64acde94 | 91 | struct pathspec; |
e1f8694f | 92 | struct tree; |
64acde94 | 93 | |
eb7a2f1d LT |
94 | /* |
95 | * Copy the sha1 and stat state of a cache entry from one to | |
96 | * another. But we never change the name, or the hash state! | |
97 | */ | |
20d142b4 RS |
98 | static inline void copy_cache_entry(struct cache_entry *dst, |
99 | const struct cache_entry *src) | |
eb7a2f1d | 100 | { |
419a597f | 101 | unsigned int state = dst->ce_flags & CE_HASHED; |
8e72d675 | 102 | int mem_pool_allocated = dst->mem_pool_allocated; |
eb7a2f1d LT |
103 | |
104 | /* Don't copy hash chain and name */ | |
8b013788 KB |
105 | memcpy(&dst->ce_stat_data, &src->ce_stat_data, |
106 | offsetof(struct cache_entry, name) - | |
107 | offsetof(struct cache_entry, ce_stat_data)); | |
eb7a2f1d LT |
108 | |
109 | /* Restore the hash state */ | |
419a597f | 110 | dst->ce_flags = (dst->ce_flags & ~CE_HASHED) | state; |
8e72d675 JM |
111 | |
112 | /* Restore the mem_pool_allocated flag */ | |
113 | dst->mem_pool_allocated = mem_pool_allocated; | |
eb7a2f1d LT |
114 | } |
115 | ||
b60e188c | 116 | static inline unsigned create_ce_flags(unsigned stage) |
7fec10b7 | 117 | { |
b60e188c | 118 | return (stage << CE_STAGESHIFT); |
7fec10b7 JH |
119 | } |
120 | ||
b60e188c | 121 | #define ce_namelen(ce) ((ce)->ce_namelen) |
aee46198 | 122 | #define ce_size(ce) cache_entry_size(ce_namelen(ce)) |
7a51ed66 | 123 | #define ce_stage(ce) ((CE_STAGEMASK & (ce)->ce_flags) >> CE_STAGESHIFT) |
eadb5831 | 124 | #define ce_uptodate(ce) ((ce)->ce_flags & CE_UPTODATE) |
44a36913 | 125 | #define ce_skip_worktree(ce) ((ce)->ce_flags & CE_SKIP_WORKTREE) |
eadb5831 | 126 | #define ce_mark_uptodate(ce) ((ce)->ce_flags |= CE_UPTODATE) |
895ff3b2 | 127 | #define ce_intent_to_add(ce) ((ce)->ce_flags & CE_INTENT_TO_ADD) |
aee46198 | 128 | |
20d142b4 RS |
129 | static inline unsigned int ce_mode_from_stat(const struct cache_entry *ce, |
130 | unsigned int mode) | |
185c975f | 131 | { |
78a8d641 JS |
132 | extern int trust_executable_bit, has_symlinks; |
133 | if (!has_symlinks && S_ISREG(mode) && | |
7a51ed66 | 134 | ce && S_ISLNK(ce->ce_mode)) |
78a8d641 | 135 | return ce->ce_mode; |
185c975f | 136 | if (!trust_executable_bit && S_ISREG(mode)) { |
7a51ed66 | 137 | if (ce && S_ISREG(ce->ce_mode)) |
185c975f JH |
138 | return ce->ce_mode; |
139 | return create_ce_mode(0666); | |
140 | } | |
141 | return create_ce_mode(mode); | |
142 | } | |
d6b8fc30 JH |
143 | static inline int ce_to_dtype(const struct cache_entry *ce) |
144 | { | |
145 | unsigned ce_mode = ntohl(ce->ce_mode); | |
146 | if (S_ISREG(ce_mode)) | |
147 | return DT_REG; | |
148 | else if (S_ISDIR(ce_mode) || S_ISGITLINK(ce_mode)) | |
149 | return DT_DIR; | |
150 | else if (S_ISLNK(ce_mode)) | |
151 | return DT_LNK; | |
152 | else | |
153 | return DT_UNKNOWN; | |
154 | } | |
e4479470 | 155 | |
ac48adf4 EN |
156 | static inline int ce_path_match(struct index_state *istate, |
157 | const struct cache_entry *ce, | |
158 | const struct pathspec *pathspec, | |
159 | char *seen) | |
160 | { | |
161 | return match_pathspec(istate, pathspec, ce->name, ce_namelen(ce), 0, seen, | |
162 | S_ISDIR(ce->ce_mode) || S_ISGITLINK(ce->ce_mode)); | |
163 | } | |
164 | ||
ee7825b5 | 165 | #define cache_entry_size(len) (offsetof(struct cache_entry,name) + (len) + 1) |
f5cabd13 | 166 | |
e636a7b4 NTND |
167 | #define SOMETHING_CHANGED (1 << 0) /* unclassified changes go here */ |
168 | #define CE_ENTRY_CHANGED (1 << 1) | |
169 | #define CE_ENTRY_REMOVED (1 << 2) | |
170 | #define CE_ENTRY_ADDED (1 << 3) | |
6c306a34 | 171 | #define RESOLVE_UNDO_CHANGED (1 << 4) |
a5400efe | 172 | #define CACHE_TREE_CHANGED (1 << 5) |
c18b80a0 | 173 | #define SPLIT_INDEX_ORDERED (1 << 6) |
1bbb3dba | 174 | #define UNTRACKED_CHANGED (1 << 7) |
883e248b | 175 | #define FSMONITOR_CHANGED (1 << 8) |
e636a7b4 | 176 | |
5fc2fc8f | 177 | struct split_index; |
83c094ad | 178 | struct untracked_cache; |
4dcd4def | 179 | struct progress; |
836e25c5 | 180 | struct pattern_list; |
83c094ad | 181 | |
9fadb373 DS |
182 | enum sparse_index_mode { |
183 | /* | |
184 | * There are no sparse directories in the index at all. | |
185 | * | |
186 | * Repositories that don't use cone-mode sparse-checkout will | |
187 | * always have their indexes in this mode. | |
188 | */ | |
189 | INDEX_EXPANDED = 0, | |
190 | ||
191 | /* | |
192 | * The index has already been collapsed to sparse directories | |
193 | * whereever possible. | |
194 | */ | |
195 | INDEX_COLLAPSED, | |
196 | ||
197 | /* | |
198 | * The sparse directories that exist are outside the | |
199 | * sparse-checkout boundary, but it is possible that some file | |
200 | * entries could collapse to sparse directory entries. | |
201 | */ | |
202 | INDEX_PARTIALLY_SPARSE, | |
203 | }; | |
204 | ||
228e94f9 JH |
205 | struct index_state { |
206 | struct cache_entry **cache; | |
9d227781 | 207 | unsigned int version; |
228e94f9 | 208 | unsigned int cache_nr, cache_alloc, cache_changed; |
cfc5789a | 209 | struct string_list *resolve_undo; |
228e94f9 | 210 | struct cache_tree *cache_tree; |
5fc2fc8f | 211 | struct split_index *split_index; |
fba2f38a | 212 | struct cache_time timestamp; |
913e0e99 | 213 | unsigned name_hash_initialized : 1, |
4bddd983 | 214 | initialized : 1, |
1956ecd0 BP |
215 | drop_cache_tree : 1, |
216 | updated_workdir : 1, | |
cfd635c7 | 217 | updated_skipworktree : 1, |
9fadb373 DS |
218 | fsmonitor_has_run_once : 1; |
219 | enum sparse_index_mode sparse_index; | |
8b013788 | 220 | struct hashmap name_hash; |
e05881a4 | 221 | struct hashmap dir_hash; |
75691ea3 | 222 | struct object_id oid; |
83c094ad | 223 | struct untracked_cache *untracked; |
56c69100 | 224 | char *fsmonitor_last_update; |
ba1b9cac | 225 | struct ewah_bitmap *fsmonitor_dirty; |
8e72d675 | 226 | struct mem_pool *ce_mem_pool; |
4dcd4def | 227 | struct progress *progress; |
1fd9ae51 | 228 | struct repository *repo; |
836e25c5 | 229 | struct pattern_list *sparse_checkout_patterns; |
228e94f9 JH |
230 | }; |
231 | ||
2f6b1eb7 ÆAB |
232 | /** |
233 | * A "struct index_state istate" must be initialized with | |
234 | * INDEX_STATE_INIT or the corresponding index_state_init(). | |
235 | * | |
236 | * If the variable won't be used again, use release_index() to free() | |
237 | * its resources. If it needs to be used again use discard_index(), | |
238 | * which does the same thing, but will use use index_state_init() at | |
6269f8ea ÆAB |
239 | * the end. The discard_index() will use its own "istate->repo" as the |
240 | * "r" argument to index_state_init() in that case. | |
2f6b1eb7 | 241 | */ |
6269f8ea ÆAB |
242 | #define INDEX_STATE_INIT(r) { \ |
243 | .repo = (r), \ | |
244 | } | |
245 | void index_state_init(struct index_state *istate, struct repository *r); | |
2f6b1eb7 ÆAB |
246 | void release_index(struct index_state *istate); |
247 | ||
a849735b JM |
248 | /* Cache entry creation and cleanup */ |
249 | ||
250 | /* | |
251 | * Create cache_entry intended for use in the specified index. Caller | |
252 | * is responsible for discarding the cache_entry with | |
253 | * `discard_cache_entry`. | |
254 | */ | |
255 | struct cache_entry *make_cache_entry(struct index_state *istate, | |
256 | unsigned int mode, | |
257 | const struct object_id *oid, | |
258 | const char *path, | |
259 | int stage, | |
260 | unsigned int refresh_options); | |
261 | ||
262 | struct cache_entry *make_empty_cache_entry(struct index_state *istate, | |
263 | size_t name_len); | |
264 | ||
265 | /* | |
96168827 MT |
266 | * Create a cache_entry that is not intended to be added to an index. If |
267 | * `ce_mem_pool` is not NULL, the entry is allocated within the given memory | |
268 | * pool. Caller is responsible for discarding "loose" entries with | |
269 | * `discard_cache_entry()` and the memory pool with | |
270 | * `mem_pool_discard(ce_mem_pool, should_validate_cache_entries())`. | |
a849735b JM |
271 | */ |
272 | struct cache_entry *make_transient_cache_entry(unsigned int mode, | |
273 | const struct object_id *oid, | |
274 | const char *path, | |
96168827 MT |
275 | int stage, |
276 | struct mem_pool *ce_mem_pool); | |
a849735b | 277 | |
96168827 MT |
278 | struct cache_entry *make_empty_transient_cache_entry(size_t len, |
279 | struct mem_pool *ce_mem_pool); | |
a849735b JM |
280 | |
281 | /* | |
282 | * Discard cache entry. | |
283 | */ | |
284 | void discard_cache_entry(struct cache_entry *ce); | |
285 | ||
8616a2d0 JM |
286 | /* |
287 | * Check configuration if we should perform extra validation on cache | |
288 | * entries. | |
289 | */ | |
290 | int should_validate_cache_entries(void); | |
291 | ||
8e72d675 JM |
292 | /* |
293 | * Duplicate a cache_entry. Allocate memory for the new entry from a | |
294 | * memory_pool. Takes into account cache_entry fields that are meant | |
295 | * for managing the underlying memory allocation of the cache_entry. | |
296 | */ | |
297 | struct cache_entry *dup_cache_entry(const struct cache_entry *ce, struct index_state *istate); | |
298 | ||
299 | /* | |
300 | * Validate the cache entries in the index. This is an internal | |
301 | * consistency check that the cache_entry structs are allocated from | |
302 | * the expected memory pool. | |
303 | */ | |
304 | void validate_cache_entries(const struct index_state *istate); | |
305 | ||
b2896d27 JT |
306 | /* |
307 | * Bulk prefetch all missing cache entries that are not GITLINKs and that match | |
308 | * the given predicate. This function should only be called if | |
c7c33f50 | 309 | * repo_has_promisor_remote() returns true. |
b2896d27 JT |
310 | */ |
311 | typedef int (*must_prefetch_predicate)(const struct cache_entry *); | |
312 | void prefetch_cache_entries(const struct index_state *istate, | |
313 | must_prefetch_predicate must_prefetch); | |
314 | ||
dfd0a893 | 315 | #ifdef USE_THE_INDEX_VARIABLE |
f8adbec9 | 316 | extern struct index_state the_index; |
666f53eb | 317 | #endif |
e83c5163 | 318 | |
734aab75 | 319 | /* Initialize and use the cache information */ |
03b86647 | 320 | struct lock_file; |
55454427 | 321 | int do_read_index(struct index_state *istate, const char *path, |
ad6dad09 | 322 | int must_exist); /* for testting only! */ |
55454427 | 323 | int read_index_from(struct index_state *, const char *path, |
ad6dad09 | 324 | const char *gitdir); |
55454427 | 325 | int is_index_unborn(struct index_state *); |
8dc38346 MÅ |
326 | |
327 | /* For use with `write_locked_index()`. */ | |
03b86647 | 328 | #define COMMIT_LOCK (1 << 0) |
61000814 | 329 | #define SKIP_IF_UNCHANGED (1 << 1) |
8dc38346 MÅ |
330 | |
331 | /* | |
812d6b00 MÅ |
332 | * Write the index while holding an already-taken lock. Close the lock, |
333 | * and if `COMMIT_LOCK` is given, commit it. | |
8dc38346 MÅ |
334 | * |
335 | * Unless a split index is in use, write the index into the lockfile. | |
336 | * | |
337 | * With a split index, write the shared index to a temporary file, | |
338 | * adjust its permissions and rename it into place, then write the | |
339 | * split index to the lockfile. If the temporary file for the shared | |
340 | * index cannot be created, fall back to the behavior described in | |
341 | * the previous paragraph. | |
df60cf57 MÅ |
342 | * |
343 | * With `COMMIT_LOCK`, the lock is always committed or rolled back. | |
344 | * Without it, the lock is closed, but neither committed nor rolled | |
345 | * back. | |
61000814 MÅ |
346 | * |
347 | * If `SKIP_IF_UNCHANGED` is given and the index is unchanged, nothing | |
348 | * is written (and the lock is rolled back if `COMMIT_LOCK` is given). | |
8dc38346 | 349 | */ |
55454427 | 350 | int write_locked_index(struct index_state *, struct lock_file *lock, unsigned flags); |
8dc38346 | 351 | |
9c5f3ee3 | 352 | void discard_index(struct index_state *); |
55454427 DL |
353 | void move_index_extensions(struct index_state *dst, struct index_state *src); |
354 | int unmerged_index(const struct index_state *); | |
b101793c EN |
355 | |
356 | /** | |
e1f8694f EN |
357 | * Returns 1 if istate differs from tree, 0 otherwise. If tree is NULL, |
358 | * compares istate to HEAD. If tree is NULL and on an unborn branch, | |
359 | * returns 1 if there are entries in istate, 0 otherwise. If an strbuf is | |
360 | * provided, the space-separated list of files that differ will be appended | |
361 | * to it. | |
b101793c | 362 | */ |
55454427 | 363 | int repo_index_has_changes(struct repository *repo, |
ad6dad09 DL |
364 | struct tree *tree, |
365 | struct strbuf *sb); | |
b101793c | 366 | |
55454427 DL |
367 | int verify_path(const char *path, unsigned mode); |
368 | int strcmp_offset(const char *s1, const char *s2, size_t *first_change); | |
12733e9d SB |
369 | |
370 | /* | |
371 | * Searches for an entry defined by name and namelen in the given index. | |
372 | * If the return value is positive (including 0) it is the position of an | |
373 | * exact match. If the return value is negative, the negated value minus 1 | |
374 | * is the position where the entry would be inserted. | |
375 | * Example: The current index consists of these files and its stages: | |
376 | * | |
377 | * b#0, d#0, f#1, f#3 | |
378 | * | |
379 | * index_name_pos(&index, "a", 1) -> -1 | |
380 | * index_name_pos(&index, "b", 1) -> 0 | |
381 | * index_name_pos(&index, "c", 1) -> -2 | |
382 | * index_name_pos(&index, "d", 1) -> 1 | |
383 | * index_name_pos(&index, "e", 1) -> -3 | |
384 | * index_name_pos(&index, "f", 1) -> -3 | |
385 | * index_name_pos(&index, "g", 1) -> -5 | |
386 | */ | |
847a9e5d | 387 | int index_name_pos(struct index_state *, const char *name, int namelen); |
12733e9d | 388 | |
9553aa0f VD |
389 | /* |
390 | * Like index_name_pos, returns the position of an entry of the given name in | |
391 | * the index if one exists, otherwise returns a negative value where the negated | |
392 | * value minus 1 is the position where the index entry would be inserted. Unlike | |
393 | * index_name_pos, however, a sparse index is not expanded to find an entry | |
394 | * inside a sparse directory. | |
395 | */ | |
396 | int index_name_pos_sparse(struct index_state *, const char *name, int namelen); | |
397 | ||
20ec2d03 VD |
398 | /* |
399 | * Determines whether an entry with the given name exists within the | |
400 | * given index. The return value is 1 if an exact match is found, otherwise | |
401 | * it is 0. Note that, unlike index_name_pos, this function does not expand | |
402 | * the index if it is sparse. If an item exists within the full index but it | |
403 | * is contained within a sparse directory (and not in the sparse index), 0 is | |
404 | * returned. | |
405 | */ | |
406 | int index_entry_exists(struct index_state *, const char *name, int namelen); | |
407 | ||
c097b95a JS |
408 | /* |
409 | * Some functions return the negative complement of an insert position when a | |
410 | * precise match was not found but a position was found where the entry would | |
411 | * need to be inserted. This helper protects that logic from any integer | |
412 | * underflow. | |
413 | */ | |
414 | static inline int index_pos_to_insert_pos(uintmax_t pos) | |
415 | { | |
416 | if (pos > INT_MAX) | |
417 | die("overflow: -1 - %"PRIuMAX, pos); | |
418 | return -1 - (int)pos; | |
419 | } | |
420 | ||
192268c1 JH |
421 | #define ADD_CACHE_OK_TO_ADD 1 /* Ok to add */ |
422 | #define ADD_CACHE_OK_TO_REPLACE 2 /* Ok to replace file/directory */ | |
b155725d | 423 | #define ADD_CACHE_SKIP_DFCHECK 4 /* Ok to skip DF conflict checks */ |
eefadd18 | 424 | #define ADD_CACHE_JUST_APPEND 8 /* Append only */ |
39425819 | 425 | #define ADD_CACHE_NEW_ONLY 16 /* Do not replace existing ones */ |
ce7c614b | 426 | #define ADD_CACHE_KEEP_CACHE_TREE 32 /* Do not invalidate cache-tree */ |
9e5da3d0 | 427 | #define ADD_CACHE_RENORMALIZE 64 /* Pass along HASH_RENORMALIZE */ |
55454427 DL |
428 | int add_index_entry(struct index_state *, struct cache_entry *ce, int option); |
429 | void rename_index_entry_at(struct index_state *, int pos, const char *new_name); | |
3bd72adf SB |
430 | |
431 | /* Remove entry, return true if there are more entries to go. */ | |
55454427 | 432 | int remove_index_entry_at(struct index_state *, int pos); |
3bd72adf | 433 | |
55454427 DL |
434 | void remove_marked_cache_entries(struct index_state *istate, int invalidate); |
435 | int remove_file_from_index(struct index_state *, const char *path); | |
38ed1d89 JH |
436 | #define ADD_CACHE_VERBOSE 1 |
437 | #define ADD_CACHE_PRETEND 2 | |
01665924 | 438 | #define ADD_CACHE_IGNORE_ERRORS 4 |
041aee31 | 439 | #define ADD_CACHE_IGNORE_REMOVAL 8 |
39425819 | 440 | #define ADD_CACHE_INTENT 16 |
20cf41d0 SB |
441 | /* |
442 | * These two are used to add the contents of the file at path | |
443 | * to the index, marking the working tree up-to-date by storing | |
444 | * the cached stat info in the resulting cache entry. A caller | |
445 | * that has already run lstat(2) on the path can call | |
446 | * add_to_index(), and all others can call add_file_to_index(); | |
447 | * the latter will do necessary lstat(2) internally before | |
448 | * calling the former. | |
449 | */ | |
55454427 DL |
450 | int add_to_index(struct index_state *, const char *path, struct stat *, int flags); |
451 | int add_file_to_index(struct index_state *, const char *path, int flags); | |
20cf41d0 | 452 | |
55454427 DL |
453 | int chmod_index_entry(struct index_state *, struct cache_entry *ce, char flip); |
454 | int ce_same_name(const struct cache_entry *a, const struct cache_entry *b); | |
455 | void set_object_name_for_intent_to_add_entry(struct cache_entry *ce); | |
847a9e5d DS |
456 | int index_name_is_other(struct index_state *, const char *, int); |
457 | void *read_blob_data_from_index(struct index_state *, const char *, unsigned long *); | |
4bd5b7da JH |
458 | |
459 | /* do stat comparison even if CE_VALID is true */ | |
460 | #define CE_MATCH_IGNORE_VALID 01 | |
461 | /* do not check the contents but report dirty on racily-clean entries */ | |
56cac48c NTND |
462 | #define CE_MATCH_RACY_IS_DIRTY 02 |
463 | /* do stat comparison even if CE_SKIP_WORKTREE is true */ | |
464 | #define CE_MATCH_IGNORE_SKIP_WORKTREE 04 | |
2e2e7ec1 BK |
465 | /* ignore non-existent files during stat update */ |
466 | #define CE_MATCH_IGNORE_MISSING 0x08 | |
25762726 BK |
467 | /* enable stat refresh */ |
468 | #define CE_MATCH_REFRESH 0x10 | |
883e248b BP |
469 | /* don't refresh_fsmonitor state or do stat comparison even if CE_FSMONITOR_VALID is true */ |
470 | #define CE_MATCH_IGNORE_FSMONITOR 0X20 | |
55454427 | 471 | int is_racy_timestamp(const struct index_state *istate, |
ad6dad09 | 472 | const struct cache_entry *ce); |
2ede073f | 473 | int has_racy_timestamp(struct index_state *istate); |
55454427 DL |
474 | int ie_match_stat(struct index_state *, const struct cache_entry *, struct stat *, unsigned int); |
475 | int ie_modified(struct index_state *, const struct cache_entry *, struct stat *, unsigned int); | |
4bd5b7da | 476 | |
55454427 | 477 | int match_stat_data_racy(const struct index_state *istate, |
ad6dad09 | 478 | const struct stat_data *sd, struct stat *st); |
c21d39d7 | 479 | |
d4c0a3ac | 480 | void fill_stat_cache_info(struct index_state *istate, struct cache_entry *ce, struct stat *st); |
415e96c8 | 481 | |
b243012c MT |
482 | #define REFRESH_REALLY (1 << 0) /* ignore_valid */ |
483 | #define REFRESH_UNMERGED (1 << 1) /* allow unmerged */ | |
484 | #define REFRESH_QUIET (1 << 2) /* be quiet about it */ | |
485 | #define REFRESH_IGNORE_MISSING (1 << 3) /* ignore non-existent */ | |
486 | #define REFRESH_IGNORE_SUBMODULES (1 << 4) /* ignore submodules */ | |
487 | #define REFRESH_IN_PORCELAIN (1 << 5) /* user friendly output, not "needs update" */ | |
488 | #define REFRESH_PROGRESS (1 << 6) /* show progress bar if stderr is tty */ | |
489 | #define REFRESH_IGNORE_SKIP_WORKTREE (1 << 7) /* ignore skip_worktree entries */ | |
55454427 | 490 | int refresh_index(struct index_state *, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg); |
22184497 TG |
491 | /* |
492 | * Refresh the index and write it to disk. | |
493 | * | |
494 | * 'refresh_flags' is passed directly to 'refresh_index()', while | |
495 | * 'COMMIT_LOCK | write_flags' is passed to 'write_locked_index()', so | |
496 | * the lockfile is always either committed or rolled back. | |
497 | * | |
498 | * If 'gentle' is passed, errors locking the index are ignored. | |
499 | * | |
500 | * Return 1 if refreshing the index returns an error, -1 if writing | |
501 | * the index to disk fails, 0 on success. | |
502 | * | |
503 | * Note that if refreshing the index returns an error, we still write | |
504 | * out the index (unless locking fails). | |
505 | */ | |
506 | int repo_refresh_and_write_index(struct repository*, unsigned int refresh_flags, unsigned int write_flags, int gentle, const struct pathspec *, char *seen, const char *header_msg); | |
507 | ||
55454427 | 508 | struct cache_entry *refresh_cache_entry(struct index_state *, struct cache_entry *, unsigned int); |
405e5b2f | 509 | |
55454427 | 510 | void set_alternate_index_output(const char *); |
697cc8ef | 511 | |
a33fc72f | 512 | extern int verify_index_checksum; |
00ec50e5 | 513 | extern int verify_ce_order; |
a33fc72f | 514 | |
aabc5617 | 515 | int cmp_cache_name_compare(const void *a_, const void *b_); |
e83c5163 | 516 | |
b6ec1d61 | 517 | /* add */ |
7ae02a30 AR |
518 | /* |
519 | * return 0 if success, 1 - if addition of a file failed and | |
520 | * ADD_FILES_IGNORE_ERRORS was specified in flags | |
521 | */ | |
50c37ee8 EN |
522 | int add_files_to_cache(struct repository *repo, const char *prefix, |
523 | const struct pathspec *pathspec, int include_sparse, | |
524 | int flags); | |
b6ec1d61 | 525 | |
ee425e46 | 526 | /* ls-files */ |
312c984a BW |
527 | void overlay_tree_on_index(struct index_state *istate, |
528 | const char *tree_name, const char *prefix); | |
ee425e46 | 529 | |
e83c5163 | 530 | #endif /* CACHE_H */ |