]> git.ipfire.org Git - thirdparty/git.git/blob - refs/ref-cache.c
ci: deprecate ci/config/allow-ref script
[thirdparty/git.git] / refs / ref-cache.c
1 #include "../cache.h"
2 #include "../refs.h"
3 #include "refs-internal.h"
4 #include "ref-cache.h"
5 #include "../iterator.h"
6
7 void add_entry_to_dir(struct ref_dir *dir, struct ref_entry *entry)
8 {
9 ALLOC_GROW(dir->entries, dir->nr + 1, dir->alloc);
10 dir->entries[dir->nr++] = entry;
11 /* optimize for the case that entries are added in order */
12 if (dir->nr == 1 ||
13 (dir->nr == dir->sorted + 1 &&
14 strcmp(dir->entries[dir->nr - 2]->name,
15 dir->entries[dir->nr - 1]->name) < 0))
16 dir->sorted = dir->nr;
17 }
18
19 struct ref_dir *get_ref_dir(struct ref_entry *entry)
20 {
21 struct ref_dir *dir;
22 assert(entry->flag & REF_DIR);
23 dir = &entry->u.subdir;
24 if (entry->flag & REF_INCOMPLETE) {
25 if (!dir->cache->fill_ref_dir)
26 BUG("incomplete ref_store without fill_ref_dir function");
27
28 dir->cache->fill_ref_dir(dir->cache->ref_store, dir, entry->name);
29 entry->flag &= ~REF_INCOMPLETE;
30 }
31 return dir;
32 }
33
34 struct ref_entry *create_ref_entry(const char *refname,
35 const struct object_id *oid, int flag)
36 {
37 struct ref_entry *ref;
38
39 FLEX_ALLOC_STR(ref, name, refname);
40 oidcpy(&ref->u.value.oid, oid);
41 ref->flag = flag;
42 return ref;
43 }
44
45 struct ref_cache *create_ref_cache(struct ref_store *refs,
46 fill_ref_dir_fn *fill_ref_dir)
47 {
48 struct ref_cache *ret = xcalloc(1, sizeof(*ret));
49
50 ret->ref_store = refs;
51 ret->fill_ref_dir = fill_ref_dir;
52 ret->root = create_dir_entry(ret, "", 0);
53 return ret;
54 }
55
56 static void clear_ref_dir(struct ref_dir *dir);
57
58 static void free_ref_entry(struct ref_entry *entry)
59 {
60 if (entry->flag & REF_DIR) {
61 /*
62 * Do not use get_ref_dir() here, as that might
63 * trigger the reading of loose refs.
64 */
65 clear_ref_dir(&entry->u.subdir);
66 }
67 free(entry);
68 }
69
70 void free_ref_cache(struct ref_cache *cache)
71 {
72 free_ref_entry(cache->root);
73 free(cache);
74 }
75
76 /*
77 * Clear and free all entries in dir, recursively.
78 */
79 static void clear_ref_dir(struct ref_dir *dir)
80 {
81 int i;
82 for (i = 0; i < dir->nr; i++)
83 free_ref_entry(dir->entries[i]);
84 FREE_AND_NULL(dir->entries);
85 dir->sorted = dir->nr = dir->alloc = 0;
86 }
87
88 struct ref_entry *create_dir_entry(struct ref_cache *cache,
89 const char *dirname, size_t len)
90 {
91 struct ref_entry *direntry;
92
93 FLEX_ALLOC_MEM(direntry, name, dirname, len);
94 direntry->u.subdir.cache = cache;
95 direntry->flag = REF_DIR | REF_INCOMPLETE;
96 return direntry;
97 }
98
99 static int ref_entry_cmp(const void *a, const void *b)
100 {
101 struct ref_entry *one = *(struct ref_entry **)a;
102 struct ref_entry *two = *(struct ref_entry **)b;
103 return strcmp(one->name, two->name);
104 }
105
106 static void sort_ref_dir(struct ref_dir *dir);
107
108 struct string_slice {
109 size_t len;
110 const char *str;
111 };
112
113 static int ref_entry_cmp_sslice(const void *key_, const void *ent_)
114 {
115 const struct string_slice *key = key_;
116 const struct ref_entry *ent = *(const struct ref_entry * const *)ent_;
117 int cmp = strncmp(key->str, ent->name, key->len);
118 if (cmp)
119 return cmp;
120 return '\0' - (unsigned char)ent->name[key->len];
121 }
122
123 int search_ref_dir(struct ref_dir *dir, const char *refname, size_t len)
124 {
125 struct ref_entry **r;
126 struct string_slice key;
127
128 if (refname == NULL || !dir->nr)
129 return -1;
130
131 sort_ref_dir(dir);
132 key.len = len;
133 key.str = refname;
134 r = bsearch(&key, dir->entries, dir->nr, sizeof(*dir->entries),
135 ref_entry_cmp_sslice);
136
137 if (!r)
138 return -1;
139
140 return r - dir->entries;
141 }
142
143 /*
144 * Search for a directory entry directly within dir (without
145 * recursing). Sort dir if necessary. subdirname must be a directory
146 * name (i.e., end in '/'). Returns NULL if the desired
147 * directory cannot be found. dir must already be complete.
148 */
149 static struct ref_dir *search_for_subdir(struct ref_dir *dir,
150 const char *subdirname, size_t len)
151 {
152 int entry_index = search_ref_dir(dir, subdirname, len);
153 struct ref_entry *entry;
154
155 if (entry_index == -1)
156 return NULL;
157
158 entry = dir->entries[entry_index];
159 return get_ref_dir(entry);
160 }
161
162 /*
163 * If refname is a reference name, find the ref_dir within the dir
164 * tree that should hold refname. If refname is a directory name
165 * (i.e., it ends in '/'), then return that ref_dir itself. dir must
166 * represent the top-level directory and must already be complete.
167 * Sort ref_dirs and recurse into subdirectories as necessary. Will
168 * return NULL if the desired directory cannot be found.
169 */
170 static struct ref_dir *find_containing_dir(struct ref_dir *dir,
171 const char *refname)
172 {
173 const char *slash;
174 for (slash = strchr(refname, '/'); slash; slash = strchr(slash + 1, '/')) {
175 size_t dirnamelen = slash - refname + 1;
176 struct ref_dir *subdir;
177 subdir = search_for_subdir(dir, refname, dirnamelen);
178 if (!subdir) {
179 dir = NULL;
180 break;
181 }
182 dir = subdir;
183 }
184
185 return dir;
186 }
187
188 struct ref_entry *find_ref_entry(struct ref_dir *dir, const char *refname)
189 {
190 int entry_index;
191 struct ref_entry *entry;
192 dir = find_containing_dir(dir, refname);
193 if (!dir)
194 return NULL;
195 entry_index = search_ref_dir(dir, refname, strlen(refname));
196 if (entry_index == -1)
197 return NULL;
198 entry = dir->entries[entry_index];
199 return (entry->flag & REF_DIR) ? NULL : entry;
200 }
201
202 /*
203 * Emit a warning and return true iff ref1 and ref2 have the same name
204 * and the same oid. Die if they have the same name but different
205 * oids.
206 */
207 static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2)
208 {
209 if (strcmp(ref1->name, ref2->name))
210 return 0;
211
212 /* Duplicate name; make sure that they don't conflict: */
213
214 if ((ref1->flag & REF_DIR) || (ref2->flag & REF_DIR))
215 /* This is impossible by construction */
216 die("Reference directory conflict: %s", ref1->name);
217
218 if (!oideq(&ref1->u.value.oid, &ref2->u.value.oid))
219 die("Duplicated ref, and SHA1s don't match: %s", ref1->name);
220
221 warning("Duplicated ref: %s", ref1->name);
222 return 1;
223 }
224
225 /*
226 * Sort the entries in dir non-recursively (if they are not already
227 * sorted) and remove any duplicate entries.
228 */
229 static void sort_ref_dir(struct ref_dir *dir)
230 {
231 int i, j;
232 struct ref_entry *last = NULL;
233
234 /*
235 * This check also prevents passing a zero-length array to qsort(),
236 * which is a problem on some platforms.
237 */
238 if (dir->sorted == dir->nr)
239 return;
240
241 QSORT(dir->entries, dir->nr, ref_entry_cmp);
242
243 /* Remove any duplicates: */
244 for (i = 0, j = 0; j < dir->nr; j++) {
245 struct ref_entry *entry = dir->entries[j];
246 if (last && is_dup_ref(last, entry))
247 free_ref_entry(entry);
248 else
249 last = dir->entries[i++] = entry;
250 }
251 dir->sorted = dir->nr = i;
252 }
253
254 enum prefix_state {
255 /* All refs within the directory would match prefix: */
256 PREFIX_CONTAINS_DIR,
257
258 /* Some, but not all, refs within the directory might match prefix: */
259 PREFIX_WITHIN_DIR,
260
261 /* No refs within the directory could possibly match prefix: */
262 PREFIX_EXCLUDES_DIR
263 };
264
265 /*
266 * Return a `prefix_state` constant describing the relationship
267 * between the directory with the specified `dirname` and `prefix`.
268 */
269 static enum prefix_state overlaps_prefix(const char *dirname,
270 const char *prefix)
271 {
272 while (*prefix && *dirname == *prefix) {
273 dirname++;
274 prefix++;
275 }
276 if (!*prefix)
277 return PREFIX_CONTAINS_DIR;
278 else if (!*dirname)
279 return PREFIX_WITHIN_DIR;
280 else
281 return PREFIX_EXCLUDES_DIR;
282 }
283
284 /*
285 * Load all of the refs from `dir` (recursively) that could possibly
286 * contain references matching `prefix` into our in-memory cache. If
287 * `prefix` is NULL, prime unconditionally.
288 */
289 static void prime_ref_dir(struct ref_dir *dir, const char *prefix)
290 {
291 /*
292 * The hard work of loading loose refs is done by get_ref_dir(), so we
293 * just need to recurse through all of the sub-directories. We do not
294 * even need to care about sorting, as traversal order does not matter
295 * to us.
296 */
297 int i;
298 for (i = 0; i < dir->nr; i++) {
299 struct ref_entry *entry = dir->entries[i];
300 if (!(entry->flag & REF_DIR)) {
301 /* Not a directory; no need to recurse. */
302 } else if (!prefix) {
303 /* Recurse in any case: */
304 prime_ref_dir(get_ref_dir(entry), NULL);
305 } else {
306 switch (overlaps_prefix(entry->name, prefix)) {
307 case PREFIX_CONTAINS_DIR:
308 /*
309 * Recurse, and from here down we
310 * don't have to check the prefix
311 * anymore:
312 */
313 prime_ref_dir(get_ref_dir(entry), NULL);
314 break;
315 case PREFIX_WITHIN_DIR:
316 prime_ref_dir(get_ref_dir(entry), prefix);
317 break;
318 case PREFIX_EXCLUDES_DIR:
319 /* No need to prime this directory. */
320 break;
321 }
322 }
323 }
324 }
325
326 /*
327 * A level in the reference hierarchy that is currently being iterated
328 * through.
329 */
330 struct cache_ref_iterator_level {
331 /*
332 * The ref_dir being iterated over at this level. The ref_dir
333 * is sorted before being stored here.
334 */
335 struct ref_dir *dir;
336
337 enum prefix_state prefix_state;
338
339 /*
340 * The index of the current entry within dir (which might
341 * itself be a directory). If index == -1, then the iteration
342 * hasn't yet begun. If index == dir->nr, then the iteration
343 * through this level is over.
344 */
345 int index;
346 };
347
348 /*
349 * Represent an iteration through a ref_dir in the memory cache. The
350 * iteration recurses through subdirectories.
351 */
352 struct cache_ref_iterator {
353 struct ref_iterator base;
354
355 /*
356 * The number of levels currently on the stack. This is always
357 * at least 1, because when it becomes zero the iteration is
358 * ended and this struct is freed.
359 */
360 size_t levels_nr;
361
362 /* The number of levels that have been allocated on the stack */
363 size_t levels_alloc;
364
365 /*
366 * Only include references with this prefix in the iteration.
367 * The prefix is matched textually, without regard for path
368 * component boundaries.
369 */
370 const char *prefix;
371
372 /*
373 * A stack of levels. levels[0] is the uppermost level that is
374 * being iterated over in this iteration. (This is not
375 * necessary the top level in the references hierarchy. If we
376 * are iterating through a subtree, then levels[0] will hold
377 * the ref_dir for that subtree, and subsequent levels will go
378 * on from there.)
379 */
380 struct cache_ref_iterator_level *levels;
381
382 struct repository *repo;
383 };
384
385 static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator)
386 {
387 struct cache_ref_iterator *iter =
388 (struct cache_ref_iterator *)ref_iterator;
389
390 while (1) {
391 struct cache_ref_iterator_level *level =
392 &iter->levels[iter->levels_nr - 1];
393 struct ref_dir *dir = level->dir;
394 struct ref_entry *entry;
395 enum prefix_state entry_prefix_state;
396
397 if (level->index == -1)
398 sort_ref_dir(dir);
399
400 if (++level->index == level->dir->nr) {
401 /* This level is exhausted; pop up a level */
402 if (--iter->levels_nr == 0)
403 return ref_iterator_abort(ref_iterator);
404
405 continue;
406 }
407
408 entry = dir->entries[level->index];
409
410 if (level->prefix_state == PREFIX_WITHIN_DIR) {
411 entry_prefix_state = overlaps_prefix(entry->name, iter->prefix);
412 if (entry_prefix_state == PREFIX_EXCLUDES_DIR)
413 continue;
414 } else {
415 entry_prefix_state = level->prefix_state;
416 }
417
418 if (entry->flag & REF_DIR) {
419 /* push down a level */
420 ALLOC_GROW(iter->levels, iter->levels_nr + 1,
421 iter->levels_alloc);
422
423 level = &iter->levels[iter->levels_nr++];
424 level->dir = get_ref_dir(entry);
425 level->prefix_state = entry_prefix_state;
426 level->index = -1;
427 } else {
428 iter->base.refname = entry->name;
429 iter->base.oid = &entry->u.value.oid;
430 iter->base.flags = entry->flag;
431 return ITER_OK;
432 }
433 }
434 }
435
436 static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator,
437 struct object_id *peeled)
438 {
439 struct cache_ref_iterator *iter =
440 (struct cache_ref_iterator *)ref_iterator;
441
442 if (iter->repo != the_repository)
443 BUG("peeling for non-the_repository is not supported");
444 return peel_object(ref_iterator->oid, peeled) ? -1 : 0;
445 }
446
447 static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator)
448 {
449 struct cache_ref_iterator *iter =
450 (struct cache_ref_iterator *)ref_iterator;
451
452 free((char *)iter->prefix);
453 free(iter->levels);
454 base_ref_iterator_free(ref_iterator);
455 return ITER_DONE;
456 }
457
458 static struct ref_iterator_vtable cache_ref_iterator_vtable = {
459 .advance = cache_ref_iterator_advance,
460 .peel = cache_ref_iterator_peel,
461 .abort = cache_ref_iterator_abort
462 };
463
464 struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache,
465 const char *prefix,
466 struct repository *repo,
467 int prime_dir)
468 {
469 struct ref_dir *dir;
470 struct cache_ref_iterator *iter;
471 struct ref_iterator *ref_iterator;
472 struct cache_ref_iterator_level *level;
473
474 dir = get_ref_dir(cache->root);
475 if (prefix && *prefix)
476 dir = find_containing_dir(dir, prefix);
477 if (!dir)
478 /* There's nothing to iterate over. */
479 return empty_ref_iterator_begin();
480
481 if (prime_dir)
482 prime_ref_dir(dir, prefix);
483
484 CALLOC_ARRAY(iter, 1);
485 ref_iterator = &iter->base;
486 base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable, 1);
487 ALLOC_GROW(iter->levels, 10, iter->levels_alloc);
488
489 iter->levels_nr = 1;
490 level = &iter->levels[0];
491 level->index = -1;
492 level->dir = dir;
493
494 if (prefix && *prefix) {
495 iter->prefix = xstrdup(prefix);
496 level->prefix_state = PREFIX_WITHIN_DIR;
497 } else {
498 level->prefix_state = PREFIX_CONTAINS_DIR;
499 }
500
501 iter->repo = repo;
502
503 return ref_iterator;
504 }