]> git.ipfire.org Git - thirdparty/git.git/blob - refs/ref-cache.c
alloc.h: move ALLOC_GROW() functions from cache.h
[thirdparty/git.git] / refs / ref-cache.c
1 #include "../git-compat-util.h"
2 #include "../alloc.h"
3 #include "../refs.h"
4 #include "refs-internal.h"
5 #include "ref-cache.h"
6 #include "../iterator.h"
7
8 void add_entry_to_dir(struct ref_dir *dir, struct ref_entry *entry)
9 {
10 ALLOC_GROW(dir->entries, dir->nr + 1, dir->alloc);
11 dir->entries[dir->nr++] = entry;
12 /* optimize for the case that entries are added in order */
13 if (dir->nr == 1 ||
14 (dir->nr == dir->sorted + 1 &&
15 strcmp(dir->entries[dir->nr - 2]->name,
16 dir->entries[dir->nr - 1]->name) < 0))
17 dir->sorted = dir->nr;
18 }
19
20 struct ref_dir *get_ref_dir(struct ref_entry *entry)
21 {
22 struct ref_dir *dir;
23 assert(entry->flag & REF_DIR);
24 dir = &entry->u.subdir;
25 if (entry->flag & REF_INCOMPLETE) {
26 if (!dir->cache->fill_ref_dir)
27 BUG("incomplete ref_store without fill_ref_dir function");
28
29 dir->cache->fill_ref_dir(dir->cache->ref_store, dir, entry->name);
30 entry->flag &= ~REF_INCOMPLETE;
31 }
32 return dir;
33 }
34
35 struct ref_entry *create_ref_entry(const char *refname,
36 const struct object_id *oid, int flag)
37 {
38 struct ref_entry *ref;
39
40 FLEX_ALLOC_STR(ref, name, refname);
41 oidcpy(&ref->u.value.oid, oid);
42 ref->flag = flag;
43 return ref;
44 }
45
46 struct ref_cache *create_ref_cache(struct ref_store *refs,
47 fill_ref_dir_fn *fill_ref_dir)
48 {
49 struct ref_cache *ret = xcalloc(1, sizeof(*ret));
50
51 ret->ref_store = refs;
52 ret->fill_ref_dir = fill_ref_dir;
53 ret->root = create_dir_entry(ret, "", 0);
54 return ret;
55 }
56
57 static void clear_ref_dir(struct ref_dir *dir);
58
59 static void free_ref_entry(struct ref_entry *entry)
60 {
61 if (entry->flag & REF_DIR) {
62 /*
63 * Do not use get_ref_dir() here, as that might
64 * trigger the reading of loose refs.
65 */
66 clear_ref_dir(&entry->u.subdir);
67 }
68 free(entry);
69 }
70
71 void free_ref_cache(struct ref_cache *cache)
72 {
73 free_ref_entry(cache->root);
74 free(cache);
75 }
76
77 /*
78 * Clear and free all entries in dir, recursively.
79 */
80 static void clear_ref_dir(struct ref_dir *dir)
81 {
82 int i;
83 for (i = 0; i < dir->nr; i++)
84 free_ref_entry(dir->entries[i]);
85 FREE_AND_NULL(dir->entries);
86 dir->sorted = dir->nr = dir->alloc = 0;
87 }
88
89 struct ref_entry *create_dir_entry(struct ref_cache *cache,
90 const char *dirname, size_t len)
91 {
92 struct ref_entry *direntry;
93
94 FLEX_ALLOC_MEM(direntry, name, dirname, len);
95 direntry->u.subdir.cache = cache;
96 direntry->flag = REF_DIR | REF_INCOMPLETE;
97 return direntry;
98 }
99
100 static int ref_entry_cmp(const void *a, const void *b)
101 {
102 struct ref_entry *one = *(struct ref_entry **)a;
103 struct ref_entry *two = *(struct ref_entry **)b;
104 return strcmp(one->name, two->name);
105 }
106
107 static void sort_ref_dir(struct ref_dir *dir);
108
109 struct string_slice {
110 size_t len;
111 const char *str;
112 };
113
114 static int ref_entry_cmp_sslice(const void *key_, const void *ent_)
115 {
116 const struct string_slice *key = key_;
117 const struct ref_entry *ent = *(const struct ref_entry * const *)ent_;
118 int cmp = strncmp(key->str, ent->name, key->len);
119 if (cmp)
120 return cmp;
121 return '\0' - (unsigned char)ent->name[key->len];
122 }
123
124 int search_ref_dir(struct ref_dir *dir, const char *refname, size_t len)
125 {
126 struct ref_entry **r;
127 struct string_slice key;
128
129 if (refname == NULL || !dir->nr)
130 return -1;
131
132 sort_ref_dir(dir);
133 key.len = len;
134 key.str = refname;
135 r = bsearch(&key, dir->entries, dir->nr, sizeof(*dir->entries),
136 ref_entry_cmp_sslice);
137
138 if (!r)
139 return -1;
140
141 return r - dir->entries;
142 }
143
144 /*
145 * Search for a directory entry directly within dir (without
146 * recursing). Sort dir if necessary. subdirname must be a directory
147 * name (i.e., end in '/'). Returns NULL if the desired
148 * directory cannot be found. dir must already be complete.
149 */
150 static struct ref_dir *search_for_subdir(struct ref_dir *dir,
151 const char *subdirname, size_t len)
152 {
153 int entry_index = search_ref_dir(dir, subdirname, len);
154 struct ref_entry *entry;
155
156 if (entry_index == -1)
157 return NULL;
158
159 entry = dir->entries[entry_index];
160 return get_ref_dir(entry);
161 }
162
163 /*
164 * If refname is a reference name, find the ref_dir within the dir
165 * tree that should hold refname. If refname is a directory name
166 * (i.e., it ends in '/'), then return that ref_dir itself. dir must
167 * represent the top-level directory and must already be complete.
168 * Sort ref_dirs and recurse into subdirectories as necessary. Will
169 * return NULL if the desired directory cannot be found.
170 */
171 static struct ref_dir *find_containing_dir(struct ref_dir *dir,
172 const char *refname)
173 {
174 const char *slash;
175 for (slash = strchr(refname, '/'); slash; slash = strchr(slash + 1, '/')) {
176 size_t dirnamelen = slash - refname + 1;
177 struct ref_dir *subdir;
178 subdir = search_for_subdir(dir, refname, dirnamelen);
179 if (!subdir) {
180 dir = NULL;
181 break;
182 }
183 dir = subdir;
184 }
185
186 return dir;
187 }
188
189 struct ref_entry *find_ref_entry(struct ref_dir *dir, const char *refname)
190 {
191 int entry_index;
192 struct ref_entry *entry;
193 dir = find_containing_dir(dir, refname);
194 if (!dir)
195 return NULL;
196 entry_index = search_ref_dir(dir, refname, strlen(refname));
197 if (entry_index == -1)
198 return NULL;
199 entry = dir->entries[entry_index];
200 return (entry->flag & REF_DIR) ? NULL : entry;
201 }
202
203 /*
204 * Emit a warning and return true iff ref1 and ref2 have the same name
205 * and the same oid. Die if they have the same name but different
206 * oids.
207 */
208 static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2)
209 {
210 if (strcmp(ref1->name, ref2->name))
211 return 0;
212
213 /* Duplicate name; make sure that they don't conflict: */
214
215 if ((ref1->flag & REF_DIR) || (ref2->flag & REF_DIR))
216 /* This is impossible by construction */
217 die("Reference directory conflict: %s", ref1->name);
218
219 if (!oideq(&ref1->u.value.oid, &ref2->u.value.oid))
220 die("Duplicated ref, and SHA1s don't match: %s", ref1->name);
221
222 warning("Duplicated ref: %s", ref1->name);
223 return 1;
224 }
225
226 /*
227 * Sort the entries in dir non-recursively (if they are not already
228 * sorted) and remove any duplicate entries.
229 */
230 static void sort_ref_dir(struct ref_dir *dir)
231 {
232 int i, j;
233 struct ref_entry *last = NULL;
234
235 /*
236 * This check also prevents passing a zero-length array to qsort(),
237 * which is a problem on some platforms.
238 */
239 if (dir->sorted == dir->nr)
240 return;
241
242 QSORT(dir->entries, dir->nr, ref_entry_cmp);
243
244 /* Remove any duplicates: */
245 for (i = 0, j = 0; j < dir->nr; j++) {
246 struct ref_entry *entry = dir->entries[j];
247 if (last && is_dup_ref(last, entry))
248 free_ref_entry(entry);
249 else
250 last = dir->entries[i++] = entry;
251 }
252 dir->sorted = dir->nr = i;
253 }
254
255 enum prefix_state {
256 /* All refs within the directory would match prefix: */
257 PREFIX_CONTAINS_DIR,
258
259 /* Some, but not all, refs within the directory might match prefix: */
260 PREFIX_WITHIN_DIR,
261
262 /* No refs within the directory could possibly match prefix: */
263 PREFIX_EXCLUDES_DIR
264 };
265
266 /*
267 * Return a `prefix_state` constant describing the relationship
268 * between the directory with the specified `dirname` and `prefix`.
269 */
270 static enum prefix_state overlaps_prefix(const char *dirname,
271 const char *prefix)
272 {
273 while (*prefix && *dirname == *prefix) {
274 dirname++;
275 prefix++;
276 }
277 if (!*prefix)
278 return PREFIX_CONTAINS_DIR;
279 else if (!*dirname)
280 return PREFIX_WITHIN_DIR;
281 else
282 return PREFIX_EXCLUDES_DIR;
283 }
284
285 /*
286 * Load all of the refs from `dir` (recursively) that could possibly
287 * contain references matching `prefix` into our in-memory cache. If
288 * `prefix` is NULL, prime unconditionally.
289 */
290 static void prime_ref_dir(struct ref_dir *dir, const char *prefix)
291 {
292 /*
293 * The hard work of loading loose refs is done by get_ref_dir(), so we
294 * just need to recurse through all of the sub-directories. We do not
295 * even need to care about sorting, as traversal order does not matter
296 * to us.
297 */
298 int i;
299 for (i = 0; i < dir->nr; i++) {
300 struct ref_entry *entry = dir->entries[i];
301 if (!(entry->flag & REF_DIR)) {
302 /* Not a directory; no need to recurse. */
303 } else if (!prefix) {
304 /* Recurse in any case: */
305 prime_ref_dir(get_ref_dir(entry), NULL);
306 } else {
307 switch (overlaps_prefix(entry->name, prefix)) {
308 case PREFIX_CONTAINS_DIR:
309 /*
310 * Recurse, and from here down we
311 * don't have to check the prefix
312 * anymore:
313 */
314 prime_ref_dir(get_ref_dir(entry), NULL);
315 break;
316 case PREFIX_WITHIN_DIR:
317 prime_ref_dir(get_ref_dir(entry), prefix);
318 break;
319 case PREFIX_EXCLUDES_DIR:
320 /* No need to prime this directory. */
321 break;
322 }
323 }
324 }
325 }
326
327 /*
328 * A level in the reference hierarchy that is currently being iterated
329 * through.
330 */
331 struct cache_ref_iterator_level {
332 /*
333 * The ref_dir being iterated over at this level. The ref_dir
334 * is sorted before being stored here.
335 */
336 struct ref_dir *dir;
337
338 enum prefix_state prefix_state;
339
340 /*
341 * The index of the current entry within dir (which might
342 * itself be a directory). If index == -1, then the iteration
343 * hasn't yet begun. If index == dir->nr, then the iteration
344 * through this level is over.
345 */
346 int index;
347 };
348
349 /*
350 * Represent an iteration through a ref_dir in the memory cache. The
351 * iteration recurses through subdirectories.
352 */
353 struct cache_ref_iterator {
354 struct ref_iterator base;
355
356 /*
357 * The number of levels currently on the stack. This is always
358 * at least 1, because when it becomes zero the iteration is
359 * ended and this struct is freed.
360 */
361 size_t levels_nr;
362
363 /* The number of levels that have been allocated on the stack */
364 size_t levels_alloc;
365
366 /*
367 * Only include references with this prefix in the iteration.
368 * The prefix is matched textually, without regard for path
369 * component boundaries.
370 */
371 const char *prefix;
372
373 /*
374 * A stack of levels. levels[0] is the uppermost level that is
375 * being iterated over in this iteration. (This is not
376 * necessary the top level in the references hierarchy. If we
377 * are iterating through a subtree, then levels[0] will hold
378 * the ref_dir for that subtree, and subsequent levels will go
379 * on from there.)
380 */
381 struct cache_ref_iterator_level *levels;
382
383 struct repository *repo;
384 };
385
386 static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator)
387 {
388 struct cache_ref_iterator *iter =
389 (struct cache_ref_iterator *)ref_iterator;
390
391 while (1) {
392 struct cache_ref_iterator_level *level =
393 &iter->levels[iter->levels_nr - 1];
394 struct ref_dir *dir = level->dir;
395 struct ref_entry *entry;
396 enum prefix_state entry_prefix_state;
397
398 if (level->index == -1)
399 sort_ref_dir(dir);
400
401 if (++level->index == level->dir->nr) {
402 /* This level is exhausted; pop up a level */
403 if (--iter->levels_nr == 0)
404 return ref_iterator_abort(ref_iterator);
405
406 continue;
407 }
408
409 entry = dir->entries[level->index];
410
411 if (level->prefix_state == PREFIX_WITHIN_DIR) {
412 entry_prefix_state = overlaps_prefix(entry->name, iter->prefix);
413 if (entry_prefix_state == PREFIX_EXCLUDES_DIR)
414 continue;
415 } else {
416 entry_prefix_state = level->prefix_state;
417 }
418
419 if (entry->flag & REF_DIR) {
420 /* push down a level */
421 ALLOC_GROW(iter->levels, iter->levels_nr + 1,
422 iter->levels_alloc);
423
424 level = &iter->levels[iter->levels_nr++];
425 level->dir = get_ref_dir(entry);
426 level->prefix_state = entry_prefix_state;
427 level->index = -1;
428 } else {
429 iter->base.refname = entry->name;
430 iter->base.oid = &entry->u.value.oid;
431 iter->base.flags = entry->flag;
432 return ITER_OK;
433 }
434 }
435 }
436
437 static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator,
438 struct object_id *peeled)
439 {
440 struct cache_ref_iterator *iter =
441 (struct cache_ref_iterator *)ref_iterator;
442
443 if (iter->repo != the_repository)
444 BUG("peeling for non-the_repository is not supported");
445 return peel_object(ref_iterator->oid, peeled) ? -1 : 0;
446 }
447
448 static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator)
449 {
450 struct cache_ref_iterator *iter =
451 (struct cache_ref_iterator *)ref_iterator;
452
453 free((char *)iter->prefix);
454 free(iter->levels);
455 base_ref_iterator_free(ref_iterator);
456 return ITER_DONE;
457 }
458
459 static struct ref_iterator_vtable cache_ref_iterator_vtable = {
460 .advance = cache_ref_iterator_advance,
461 .peel = cache_ref_iterator_peel,
462 .abort = cache_ref_iterator_abort
463 };
464
465 struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache,
466 const char *prefix,
467 struct repository *repo,
468 int prime_dir)
469 {
470 struct ref_dir *dir;
471 struct cache_ref_iterator *iter;
472 struct ref_iterator *ref_iterator;
473 struct cache_ref_iterator_level *level;
474
475 dir = get_ref_dir(cache->root);
476 if (prefix && *prefix)
477 dir = find_containing_dir(dir, prefix);
478 if (!dir)
479 /* There's nothing to iterate over. */
480 return empty_ref_iterator_begin();
481
482 if (prime_dir)
483 prime_ref_dir(dir, prefix);
484
485 CALLOC_ARRAY(iter, 1);
486 ref_iterator = &iter->base;
487 base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable, 1);
488 ALLOC_GROW(iter->levels, 10, iter->levels_alloc);
489
490 iter->levels_nr = 1;
491 level = &iter->levels[0];
492 level->index = -1;
493 level->dir = dir;
494
495 if (prefix && *prefix) {
496 iter->prefix = xstrdup(prefix);
497 level->prefix_state = PREFIX_WITHIN_DIR;
498 } else {
499 level->prefix_state = PREFIX_CONTAINS_DIR;
500 }
501
502 iter->repo = repo;
503
504 return ref_iterator;
505 }