]> git.ipfire.org Git - thirdparty/git.git/blame - read-cache-ll.h
Start the 2.46 cycle
[thirdparty/git.git] / read-cache-ll.h
CommitLineData
08c46a49
EN
1#ifndef READ_CACHE_LL_H
2#define READ_CACHE_LL_H
3
4#include "hash-ll.h"
5#include "hashmap.h"
6#include "statinfo.h"
7
8/*
9 * Basic data structures for the directory cache
10 */
11
12#define CACHE_SIGNATURE 0x44495243 /* "DIRC" */
13struct cache_header {
14 uint32_t hdr_signature;
15 uint32_t hdr_version;
16 uint32_t hdr_entries;
17};
18
19#define INDEX_FORMAT_LB 2
20#define INDEX_FORMAT_UB 4
21
22struct cache_entry {
23 struct hashmap_entry ent;
24 struct stat_data ce_stat_data;
25 unsigned int ce_mode;
26 unsigned int ce_flags;
27 unsigned int mem_pool_allocated;
28 unsigned int ce_namelen;
29 unsigned int index; /* for link extension */
30 struct object_id oid;
31 char name[FLEX_ARRAY]; /* more */
32};
33
34#define CE_STAGEMASK (0x3000)
35#define CE_EXTENDED (0x4000)
36#define CE_VALID (0x8000)
37#define CE_STAGESHIFT 12
38
39/*
40 * Range 0xFFFF0FFF in ce_flags is divided into
41 * two parts: in-memory flags and on-disk ones.
42 * Flags in CE_EXTENDED_FLAGS will get saved on-disk
43 * if you want to save a new flag, add it in
44 * CE_EXTENDED_FLAGS
45 *
46 * In-memory only flags
47 */
48#define CE_UPDATE (1 << 16)
49#define CE_REMOVE (1 << 17)
50#define CE_UPTODATE (1 << 18)
51#define CE_ADDED (1 << 19)
52
53#define CE_HASHED (1 << 20)
54#define CE_FSMONITOR_VALID (1 << 21)
55#define CE_WT_REMOVE (1 << 22) /* remove in work directory */
56#define CE_CONFLICTED (1 << 23)
57
58#define CE_UNPACKED (1 << 24)
59#define CE_NEW_SKIP_WORKTREE (1 << 25)
60
61/* used to temporarily mark paths matched by pathspecs */
62#define CE_MATCHED (1 << 26)
63
64#define CE_UPDATE_IN_BASE (1 << 27)
65#define CE_STRIP_NAME (1 << 28)
66
67/*
68 * Extended on-disk flags
69 */
70#define CE_INTENT_TO_ADD (1 << 29)
71#define CE_SKIP_WORKTREE (1 << 30)
72/* CE_EXTENDED2 is for future extension */
73#define CE_EXTENDED2 (1U << 31)
74
75#define CE_EXTENDED_FLAGS (CE_INTENT_TO_ADD | CE_SKIP_WORKTREE)
76
77/*
78 * Safeguard to avoid saving wrong flags:
79 * - CE_EXTENDED2 won't get saved until its semantic is known
80 * - Bits in 0x0000FFFF have been saved in ce_flags already
81 * - Bits in 0x003F0000 are currently in-memory flags
82 */
83#if CE_EXTENDED_FLAGS & 0x803FFFFF
84#error "CE_EXTENDED_FLAGS out of range"
85#endif
86
87/* Forward structure decls */
88struct pathspec;
89struct tree;
90
91/*
92 * Copy the sha1 and stat state of a cache entry from one to
93 * another. But we never change the name, or the hash state!
94 */
95static inline void copy_cache_entry(struct cache_entry *dst,
96 const struct cache_entry *src)
97{
98 unsigned int state = dst->ce_flags & CE_HASHED;
99 int mem_pool_allocated = dst->mem_pool_allocated;
100
101 /* Don't copy hash chain and name */
102 memcpy(&dst->ce_stat_data, &src->ce_stat_data,
103 offsetof(struct cache_entry, name) -
104 offsetof(struct cache_entry, ce_stat_data));
105
106 /* Restore the hash state */
107 dst->ce_flags = (dst->ce_flags & ~CE_HASHED) | state;
108
109 /* Restore the mem_pool_allocated flag */
110 dst->mem_pool_allocated = mem_pool_allocated;
111}
112
113static inline unsigned create_ce_flags(unsigned stage)
114{
115 return (stage << CE_STAGESHIFT);
116}
117
118#define ce_namelen(ce) ((ce)->ce_namelen)
119#define ce_size(ce) cache_entry_size(ce_namelen(ce))
120#define ce_stage(ce) ((CE_STAGEMASK & (ce)->ce_flags) >> CE_STAGESHIFT)
121#define ce_uptodate(ce) ((ce)->ce_flags & CE_UPTODATE)
122#define ce_skip_worktree(ce) ((ce)->ce_flags & CE_SKIP_WORKTREE)
123#define ce_mark_uptodate(ce) ((ce)->ce_flags |= CE_UPTODATE)
124#define ce_intent_to_add(ce) ((ce)->ce_flags & CE_INTENT_TO_ADD)
125
126#define cache_entry_size(len) (offsetof(struct cache_entry,name) + (len) + 1)
127
128#define SOMETHING_CHANGED (1 << 0) /* unclassified changes go here */
129#define CE_ENTRY_CHANGED (1 << 1)
130#define CE_ENTRY_REMOVED (1 << 2)
131#define CE_ENTRY_ADDED (1 << 3)
132#define RESOLVE_UNDO_CHANGED (1 << 4)
133#define CACHE_TREE_CHANGED (1 << 5)
134#define SPLIT_INDEX_ORDERED (1 << 6)
135#define UNTRACKED_CHANGED (1 << 7)
136#define FSMONITOR_CHANGED (1 << 8)
137
138struct split_index;
139struct untracked_cache;
140struct progress;
141struct pattern_list;
142
143enum sparse_index_mode {
144 /*
145 * There are no sparse directories in the index at all.
146 *
147 * Repositories that don't use cone-mode sparse-checkout will
148 * always have their indexes in this mode.
149 */
150 INDEX_EXPANDED = 0,
151
152 /*
153 * The index has already been collapsed to sparse directories
154 * whereever possible.
155 */
156 INDEX_COLLAPSED,
157
158 /*
159 * The sparse directories that exist are outside the
160 * sparse-checkout boundary, but it is possible that some file
161 * entries could collapse to sparse directory entries.
162 */
163 INDEX_PARTIALLY_SPARSE,
164};
165
166struct index_state {
167 struct cache_entry **cache;
168 unsigned int version;
169 unsigned int cache_nr, cache_alloc, cache_changed;
170 struct string_list *resolve_undo;
171 struct cache_tree *cache_tree;
172 struct split_index *split_index;
173 struct cache_time timestamp;
174 unsigned name_hash_initialized : 1,
175 initialized : 1,
176 drop_cache_tree : 1,
177 updated_workdir : 1,
178 updated_skipworktree : 1,
179 fsmonitor_has_run_once : 1;
180 enum sparse_index_mode sparse_index;
181 struct hashmap name_hash;
182 struct hashmap dir_hash;
183 struct object_id oid;
184 struct untracked_cache *untracked;
185 char *fsmonitor_last_update;
186 struct ewah_bitmap *fsmonitor_dirty;
187 struct mem_pool *ce_mem_pool;
188 struct progress *progress;
189 struct repository *repo;
190 struct pattern_list *sparse_checkout_patterns;
191};
192
193/**
194 * A "struct index_state istate" must be initialized with
195 * INDEX_STATE_INIT or the corresponding index_state_init().
196 *
197 * If the variable won't be used again, use release_index() to free()
198 * its resources. If it needs to be used again use discard_index(),
199 * which does the same thing, but will use use index_state_init() at
200 * the end. The discard_index() will use its own "istate->repo" as the
201 * "r" argument to index_state_init() in that case.
202 */
203#define INDEX_STATE_INIT(r) { \
204 .repo = (r), \
205}
206void index_state_init(struct index_state *istate, struct repository *r);
207void release_index(struct index_state *istate);
208
209/* Cache entry creation and cleanup */
210
211/*
212 * Create cache_entry intended for use in the specified index. Caller
213 * is responsible for discarding the cache_entry with
214 * `discard_cache_entry`.
215 */
216struct cache_entry *make_cache_entry(struct index_state *istate,
217 unsigned int mode,
218 const struct object_id *oid,
219 const char *path,
220 int stage,
221 unsigned int refresh_options);
222
223struct cache_entry *make_empty_cache_entry(struct index_state *istate,
224 size_t name_len);
225
226/*
227 * Create a cache_entry that is not intended to be added to an index. If
228 * `ce_mem_pool` is not NULL, the entry is allocated within the given memory
229 * pool. Caller is responsible for discarding "loose" entries with
230 * `discard_cache_entry()` and the memory pool with
231 * `mem_pool_discard(ce_mem_pool, should_validate_cache_entries())`.
232 */
233struct cache_entry *make_transient_cache_entry(unsigned int mode,
234 const struct object_id *oid,
235 const char *path,
236 int stage,
237 struct mem_pool *ce_mem_pool);
238
239struct cache_entry *make_empty_transient_cache_entry(size_t len,
240 struct mem_pool *ce_mem_pool);
241
242/*
243 * Discard cache entry.
244 */
245void discard_cache_entry(struct cache_entry *ce);
246
247/*
248 * Check configuration if we should perform extra validation on cache
249 * entries.
250 */
251int should_validate_cache_entries(void);
252
253/*
254 * Duplicate a cache_entry. Allocate memory for the new entry from a
255 * memory_pool. Takes into account cache_entry fields that are meant
256 * for managing the underlying memory allocation of the cache_entry.
257 */
258struct cache_entry *dup_cache_entry(const struct cache_entry *ce, struct index_state *istate);
259
260/*
261 * Validate the cache entries in the index. This is an internal
262 * consistency check that the cache_entry structs are allocated from
263 * the expected memory pool.
264 */
265void validate_cache_entries(const struct index_state *istate);
266
267/*
268 * Bulk prefetch all missing cache entries that are not GITLINKs and that match
269 * the given predicate. This function should only be called if
270 * repo_has_promisor_remote() returns true.
271 */
272typedef int (*must_prefetch_predicate)(const struct cache_entry *);
273void prefetch_cache_entries(const struct index_state *istate,
274 must_prefetch_predicate must_prefetch);
275
276/* Initialize and use the cache information */
277struct lock_file;
278int do_read_index(struct index_state *istate, const char *path,
279 int must_exist); /* for testting only! */
280int read_index_from(struct index_state *, const char *path,
281 const char *gitdir);
282int is_index_unborn(struct index_state *);
283
284/* For use with `write_locked_index()`. */
285#define COMMIT_LOCK (1 << 0)
286#define SKIP_IF_UNCHANGED (1 << 1)
287
288/*
289 * Write the index while holding an already-taken lock. Close the lock,
290 * and if `COMMIT_LOCK` is given, commit it.
291 *
292 * Unless a split index is in use, write the index into the lockfile.
293 *
294 * With a split index, write the shared index to a temporary file,
295 * adjust its permissions and rename it into place, then write the
296 * split index to the lockfile. If the temporary file for the shared
297 * index cannot be created, fall back to the behavior described in
298 * the previous paragraph.
299 *
300 * With `COMMIT_LOCK`, the lock is always committed or rolled back.
301 * Without it, the lock is closed, but neither committed nor rolled
302 * back.
303 *
304 * If `SKIP_IF_UNCHANGED` is given and the index is unchanged, nothing
305 * is written (and the lock is rolled back if `COMMIT_LOCK` is given).
306 */
307int write_locked_index(struct index_state *, struct lock_file *lock, unsigned flags);
308
309void discard_index(struct index_state *);
310void move_index_extensions(struct index_state *dst, struct index_state *src);
311int unmerged_index(const struct index_state *);
312
313/**
314 * Returns 1 if istate differs from tree, 0 otherwise. If tree is NULL,
315 * compares istate to HEAD. If tree is NULL and on an unborn branch,
316 * returns 1 if there are entries in istate, 0 otherwise. If an strbuf is
317 * provided, the space-separated list of files that differ will be appended
318 * to it.
319 */
320int repo_index_has_changes(struct repository *repo,
321 struct tree *tree,
322 struct strbuf *sb);
323
324int verify_path(const char *path, unsigned mode);
325int strcmp_offset(const char *s1, const char *s2, size_t *first_change);
326
327/*
328 * Searches for an entry defined by name and namelen in the given index.
329 * If the return value is positive (including 0) it is the position of an
330 * exact match. If the return value is negative, the negated value minus 1
331 * is the position where the entry would be inserted.
332 * Example: The current index consists of these files and its stages:
333 *
334 * b#0, d#0, f#1, f#3
335 *
336 * index_name_pos(&index, "a", 1) -> -1
337 * index_name_pos(&index, "b", 1) -> 0
338 * index_name_pos(&index, "c", 1) -> -2
339 * index_name_pos(&index, "d", 1) -> 1
340 * index_name_pos(&index, "e", 1) -> -3
341 * index_name_pos(&index, "f", 1) -> -3
342 * index_name_pos(&index, "g", 1) -> -5
343 */
344int index_name_pos(struct index_state *, const char *name, int namelen);
345
346/*
347 * Like index_name_pos, returns the position of an entry of the given name in
348 * the index if one exists, otherwise returns a negative value where the negated
349 * value minus 1 is the position where the index entry would be inserted. Unlike
350 * index_name_pos, however, a sparse index is not expanded to find an entry
351 * inside a sparse directory.
352 */
353int index_name_pos_sparse(struct index_state *, const char *name, int namelen);
354
355/*
356 * Determines whether an entry with the given name exists within the
357 * given index. The return value is 1 if an exact match is found, otherwise
358 * it is 0. Note that, unlike index_name_pos, this function does not expand
359 * the index if it is sparse. If an item exists within the full index but it
360 * is contained within a sparse directory (and not in the sparse index), 0 is
361 * returned.
362 */
363int index_entry_exists(struct index_state *, const char *name, int namelen);
364
365/*
366 * Some functions return the negative complement of an insert position when a
367 * precise match was not found but a position was found where the entry would
368 * need to be inserted. This helper protects that logic from any integer
369 * underflow.
370 */
371static inline int index_pos_to_insert_pos(uintmax_t pos)
372{
373 if (pos > INT_MAX)
374 die("overflow: -1 - %"PRIuMAX, pos);
375 return -1 - (int)pos;
376}
377
378#define ADD_CACHE_OK_TO_ADD 1 /* Ok to add */
379#define ADD_CACHE_OK_TO_REPLACE 2 /* Ok to replace file/directory */
380#define ADD_CACHE_SKIP_DFCHECK 4 /* Ok to skip DF conflict checks */
381#define ADD_CACHE_JUST_APPEND 8 /* Append only */
382#define ADD_CACHE_NEW_ONLY 16 /* Do not replace existing ones */
383#define ADD_CACHE_KEEP_CACHE_TREE 32 /* Do not invalidate cache-tree */
384#define ADD_CACHE_RENORMALIZE 64 /* Pass along HASH_RENORMALIZE */
385int add_index_entry(struct index_state *, struct cache_entry *ce, int option);
386void rename_index_entry_at(struct index_state *, int pos, const char *new_name);
387
388/* Remove entry, return true if there are more entries to go. */
389int remove_index_entry_at(struct index_state *, int pos);
390
391void remove_marked_cache_entries(struct index_state *istate, int invalidate);
392int remove_file_from_index(struct index_state *, const char *path);
393#define ADD_CACHE_VERBOSE 1
394#define ADD_CACHE_PRETEND 2
395#define ADD_CACHE_IGNORE_ERRORS 4
396#define ADD_CACHE_IGNORE_REMOVAL 8
397#define ADD_CACHE_INTENT 16
398/*
399 * These two are used to add the contents of the file at path
400 * to the index, marking the working tree up-to-date by storing
401 * the cached stat info in the resulting cache entry. A caller
402 * that has already run lstat(2) on the path can call
403 * add_to_index(), and all others can call add_file_to_index();
404 * the latter will do necessary lstat(2) internally before
405 * calling the former.
406 */
407int add_to_index(struct index_state *, const char *path, struct stat *, int flags);
408int add_file_to_index(struct index_state *, const char *path, int flags);
409
410int chmod_index_entry(struct index_state *, struct cache_entry *ce, char flip);
411int ce_same_name(const struct cache_entry *a, const struct cache_entry *b);
412void set_object_name_for_intent_to_add_entry(struct cache_entry *ce);
413int index_name_is_other(struct index_state *, const char *, int);
414void *read_blob_data_from_index(struct index_state *, const char *, unsigned long *);
415
416/* do stat comparison even if CE_VALID is true */
417#define CE_MATCH_IGNORE_VALID 01
418/* do not check the contents but report dirty on racily-clean entries */
419#define CE_MATCH_RACY_IS_DIRTY 02
420/* do stat comparison even if CE_SKIP_WORKTREE is true */
421#define CE_MATCH_IGNORE_SKIP_WORKTREE 04
422/* ignore non-existent files during stat update */
423#define CE_MATCH_IGNORE_MISSING 0x08
424/* enable stat refresh */
425#define CE_MATCH_REFRESH 0x10
426/* don't refresh_fsmonitor state or do stat comparison even if CE_FSMONITOR_VALID is true */
427#define CE_MATCH_IGNORE_FSMONITOR 0X20
428int is_racy_timestamp(const struct index_state *istate,
429 const struct cache_entry *ce);
430int has_racy_timestamp(struct index_state *istate);
431int ie_match_stat(struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
432int ie_modified(struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
433
434int match_stat_data_racy(const struct index_state *istate,
435 const struct stat_data *sd, struct stat *st);
436
437void fill_stat_cache_info(struct index_state *istate, struct cache_entry *ce, struct stat *st);
438
c33fa871
JH
439/*
440 * Fill members of st by members of sd enough to convince match_stat()
441 * to consider that they match. It should be usable as a replacement
442 * for lstat() for a tracked path that is known to be up-to-date via
443 * some out-of-line means (like fsmonitor).
444 */
445int fake_lstat(const struct cache_entry *ce, struct stat *st);
446
08c46a49
EN
447#define REFRESH_REALLY (1 << 0) /* ignore_valid */
448#define REFRESH_UNMERGED (1 << 1) /* allow unmerged */
449#define REFRESH_QUIET (1 << 2) /* be quiet about it */
450#define REFRESH_IGNORE_MISSING (1 << 3) /* ignore non-existent */
451#define REFRESH_IGNORE_SUBMODULES (1 << 4) /* ignore submodules */
452#define REFRESH_IN_PORCELAIN (1 << 5) /* user friendly output, not "needs update" */
453#define REFRESH_PROGRESS (1 << 6) /* show progress bar if stderr is tty */
454#define REFRESH_IGNORE_SKIP_WORKTREE (1 << 7) /* ignore skip_worktree entries */
455int refresh_index(struct index_state *, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg);
456/*
457 * Refresh the index and write it to disk.
458 *
459 * 'refresh_flags' is passed directly to 'refresh_index()', while
460 * 'COMMIT_LOCK | write_flags' is passed to 'write_locked_index()', so
461 * the lockfile is always either committed or rolled back.
462 *
463 * If 'gentle' is passed, errors locking the index are ignored.
464 *
465 * Return 1 if refreshing the index returns an error, -1 if writing
466 * the index to disk fails, 0 on success.
467 *
468 * Note that if refreshing the index returns an error, we still write
469 * out the index (unless locking fails).
470 */
471int repo_refresh_and_write_index(struct repository*, unsigned int refresh_flags, unsigned int write_flags, int gentle, const struct pathspec *, char *seen, const char *header_msg);
472
473struct cache_entry *refresh_cache_entry(struct index_state *, struct cache_entry *, unsigned int);
474
475void set_alternate_index_output(const char *);
476
477extern int verify_index_checksum;
478extern int verify_ce_order;
479
480int cmp_cache_name_compare(const void *a_, const void *b_);
481
482int add_files_to_cache(struct repository *repo, const char *prefix,
86829f3f
JH
483 const struct pathspec *pathspec, char *ps_matched,
484 int include_sparse, int flags);
08c46a49
EN
485
486void overlay_tree_on_index(struct index_state *istate,
487 const char *tree_name, const char *prefix);
488
489#endif /* READ_CACHE_LL_H */