]>
Commit | Line | Data |
---|---|---|
1 | #include "../cache.h" | |
2 | #include "../refs.h" | |
3 | #include "refs-internal.h" | |
4 | #include "ref-cache.h" | |
5 | #include "../iterator.h" | |
6 | ||
7 | /* FIXME: This declaration shouldn't be here */ | |
8 | void read_loose_refs(const char *dirname, struct ref_dir *dir); | |
9 | ||
10 | void add_entry_to_dir(struct ref_dir *dir, struct ref_entry *entry) | |
11 | { | |
12 | ALLOC_GROW(dir->entries, dir->nr + 1, dir->alloc); | |
13 | dir->entries[dir->nr++] = entry; | |
14 | /* optimize for the case that entries are added in order */ | |
15 | if (dir->nr == 1 || | |
16 | (dir->nr == dir->sorted + 1 && | |
17 | strcmp(dir->entries[dir->nr - 2]->name, | |
18 | dir->entries[dir->nr - 1]->name) < 0)) | |
19 | dir->sorted = dir->nr; | |
20 | } | |
21 | ||
22 | struct ref_dir *get_ref_dir(struct ref_entry *entry) | |
23 | { | |
24 | struct ref_dir *dir; | |
25 | assert(entry->flag & REF_DIR); | |
26 | dir = &entry->u.subdir; | |
27 | if (entry->flag & REF_INCOMPLETE) { | |
28 | read_loose_refs(entry->name, dir); | |
29 | ||
30 | /* | |
31 | * Manually add refs/bisect, which, being | |
32 | * per-worktree, might not appear in the directory | |
33 | * listing for refs/ in the main repo. | |
34 | */ | |
35 | if (!strcmp(entry->name, "refs/")) { | |
36 | int pos = search_ref_dir(dir, "refs/bisect/", 12); | |
37 | if (pos < 0) { | |
38 | struct ref_entry *child_entry; | |
39 | child_entry = create_dir_entry(dir->ref_store, | |
40 | "refs/bisect/", | |
41 | 12, 1); | |
42 | add_entry_to_dir(dir, child_entry); | |
43 | } | |
44 | } | |
45 | entry->flag &= ~REF_INCOMPLETE; | |
46 | } | |
47 | return dir; | |
48 | } | |
49 | ||
50 | struct ref_entry *create_ref_entry(const char *refname, | |
51 | const unsigned char *sha1, int flag, | |
52 | int check_name) | |
53 | { | |
54 | struct ref_entry *ref; | |
55 | ||
56 | if (check_name && | |
57 | check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) | |
58 | die("Reference has invalid format: '%s'", refname); | |
59 | FLEX_ALLOC_STR(ref, name, refname); | |
60 | hashcpy(ref->u.value.oid.hash, sha1); | |
61 | oidclr(&ref->u.value.peeled); | |
62 | ref->flag = flag; | |
63 | return ref; | |
64 | } | |
65 | ||
66 | static void clear_ref_dir(struct ref_dir *dir); | |
67 | ||
68 | void free_ref_entry(struct ref_entry *entry) | |
69 | { | |
70 | if (entry->flag & REF_DIR) { | |
71 | /* | |
72 | * Do not use get_ref_dir() here, as that might | |
73 | * trigger the reading of loose refs. | |
74 | */ | |
75 | clear_ref_dir(&entry->u.subdir); | |
76 | } | |
77 | free(entry); | |
78 | } | |
79 | ||
80 | /* | |
81 | * Clear and free all entries in dir, recursively. | |
82 | */ | |
83 | static void clear_ref_dir(struct ref_dir *dir) | |
84 | { | |
85 | int i; | |
86 | for (i = 0; i < dir->nr; i++) | |
87 | free_ref_entry(dir->entries[i]); | |
88 | free(dir->entries); | |
89 | dir->sorted = dir->nr = dir->alloc = 0; | |
90 | dir->entries = NULL; | |
91 | } | |
92 | ||
93 | struct ref_entry *create_dir_entry(struct files_ref_store *ref_store, | |
94 | const char *dirname, size_t len, | |
95 | int incomplete) | |
96 | { | |
97 | struct ref_entry *direntry; | |
98 | FLEX_ALLOC_MEM(direntry, name, dirname, len); | |
99 | direntry->u.subdir.ref_store = ref_store; | |
100 | direntry->flag = REF_DIR | (incomplete ? REF_INCOMPLETE : 0); | |
101 | return direntry; | |
102 | } | |
103 | ||
104 | static int ref_entry_cmp(const void *a, const void *b) | |
105 | { | |
106 | struct ref_entry *one = *(struct ref_entry **)a; | |
107 | struct ref_entry *two = *(struct ref_entry **)b; | |
108 | return strcmp(one->name, two->name); | |
109 | } | |
110 | ||
111 | static void sort_ref_dir(struct ref_dir *dir); | |
112 | ||
113 | struct string_slice { | |
114 | size_t len; | |
115 | const char *str; | |
116 | }; | |
117 | ||
118 | static int ref_entry_cmp_sslice(const void *key_, const void *ent_) | |
119 | { | |
120 | const struct string_slice *key = key_; | |
121 | const struct ref_entry *ent = *(const struct ref_entry * const *)ent_; | |
122 | int cmp = strncmp(key->str, ent->name, key->len); | |
123 | if (cmp) | |
124 | return cmp; | |
125 | return '\0' - (unsigned char)ent->name[key->len]; | |
126 | } | |
127 | ||
128 | int search_ref_dir(struct ref_dir *dir, const char *refname, size_t len) | |
129 | { | |
130 | struct ref_entry **r; | |
131 | struct string_slice key; | |
132 | ||
133 | if (refname == NULL || !dir->nr) | |
134 | return -1; | |
135 | ||
136 | sort_ref_dir(dir); | |
137 | key.len = len; | |
138 | key.str = refname; | |
139 | r = bsearch(&key, dir->entries, dir->nr, sizeof(*dir->entries), | |
140 | ref_entry_cmp_sslice); | |
141 | ||
142 | if (r == NULL) | |
143 | return -1; | |
144 | ||
145 | return r - dir->entries; | |
146 | } | |
147 | ||
148 | /* | |
149 | * Search for a directory entry directly within dir (without | |
150 | * recursing). Sort dir if necessary. subdirname must be a directory | |
151 | * name (i.e., end in '/'). If mkdir is set, then create the | |
152 | * directory if it is missing; otherwise, return NULL if the desired | |
153 | * directory cannot be found. dir must already be complete. | |
154 | */ | |
155 | static struct ref_dir *search_for_subdir(struct ref_dir *dir, | |
156 | const char *subdirname, size_t len, | |
157 | int mkdir) | |
158 | { | |
159 | int entry_index = search_ref_dir(dir, subdirname, len); | |
160 | struct ref_entry *entry; | |
161 | if (entry_index == -1) { | |
162 | if (!mkdir) | |
163 | return NULL; | |
164 | /* | |
165 | * Since dir is complete, the absence of a subdir | |
166 | * means that the subdir really doesn't exist; | |
167 | * therefore, create an empty record for it but mark | |
168 | * the record complete. | |
169 | */ | |
170 | entry = create_dir_entry(dir->ref_store, subdirname, len, 0); | |
171 | add_entry_to_dir(dir, entry); | |
172 | } else { | |
173 | entry = dir->entries[entry_index]; | |
174 | } | |
175 | return get_ref_dir(entry); | |
176 | } | |
177 | ||
178 | struct ref_dir *find_containing_dir(struct ref_dir *dir, | |
179 | const char *refname, int mkdir) | |
180 | { | |
181 | const char *slash; | |
182 | for (slash = strchr(refname, '/'); slash; slash = strchr(slash + 1, '/')) { | |
183 | size_t dirnamelen = slash - refname + 1; | |
184 | struct ref_dir *subdir; | |
185 | subdir = search_for_subdir(dir, refname, dirnamelen, mkdir); | |
186 | if (!subdir) { | |
187 | dir = NULL; | |
188 | break; | |
189 | } | |
190 | dir = subdir; | |
191 | } | |
192 | ||
193 | return dir; | |
194 | } | |
195 | ||
196 | struct ref_entry *find_ref_entry(struct ref_dir *dir, const char *refname) | |
197 | { | |
198 | int entry_index; | |
199 | struct ref_entry *entry; | |
200 | dir = find_containing_dir(dir, refname, 0); | |
201 | if (!dir) | |
202 | return NULL; | |
203 | entry_index = search_ref_dir(dir, refname, strlen(refname)); | |
204 | if (entry_index == -1) | |
205 | return NULL; | |
206 | entry = dir->entries[entry_index]; | |
207 | return (entry->flag & REF_DIR) ? NULL : entry; | |
208 | } | |
209 | ||
210 | int remove_entry_from_dir(struct ref_dir *dir, const char *refname) | |
211 | { | |
212 | int refname_len = strlen(refname); | |
213 | int entry_index; | |
214 | struct ref_entry *entry; | |
215 | int is_dir = refname[refname_len - 1] == '/'; | |
216 | if (is_dir) { | |
217 | /* | |
218 | * refname represents a reference directory. Remove | |
219 | * the trailing slash; otherwise we will get the | |
220 | * directory *representing* refname rather than the | |
221 | * one *containing* it. | |
222 | */ | |
223 | char *dirname = xmemdupz(refname, refname_len - 1); | |
224 | dir = find_containing_dir(dir, dirname, 0); | |
225 | free(dirname); | |
226 | } else { | |
227 | dir = find_containing_dir(dir, refname, 0); | |
228 | } | |
229 | if (!dir) | |
230 | return -1; | |
231 | entry_index = search_ref_dir(dir, refname, refname_len); | |
232 | if (entry_index == -1) | |
233 | return -1; | |
234 | entry = dir->entries[entry_index]; | |
235 | ||
236 | memmove(&dir->entries[entry_index], | |
237 | &dir->entries[entry_index + 1], | |
238 | (dir->nr - entry_index - 1) * sizeof(*dir->entries) | |
239 | ); | |
240 | dir->nr--; | |
241 | if (dir->sorted > entry_index) | |
242 | dir->sorted--; | |
243 | free_ref_entry(entry); | |
244 | return dir->nr; | |
245 | } | |
246 | ||
247 | int add_ref_entry(struct ref_dir *dir, struct ref_entry *ref) | |
248 | { | |
249 | dir = find_containing_dir(dir, ref->name, 1); | |
250 | if (!dir) | |
251 | return -1; | |
252 | add_entry_to_dir(dir, ref); | |
253 | return 0; | |
254 | } | |
255 | ||
256 | /* | |
257 | * Emit a warning and return true iff ref1 and ref2 have the same name | |
258 | * and the same sha1. Die if they have the same name but different | |
259 | * sha1s. | |
260 | */ | |
261 | static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2) | |
262 | { | |
263 | if (strcmp(ref1->name, ref2->name)) | |
264 | return 0; | |
265 | ||
266 | /* Duplicate name; make sure that they don't conflict: */ | |
267 | ||
268 | if ((ref1->flag & REF_DIR) || (ref2->flag & REF_DIR)) | |
269 | /* This is impossible by construction */ | |
270 | die("Reference directory conflict: %s", ref1->name); | |
271 | ||
272 | if (oidcmp(&ref1->u.value.oid, &ref2->u.value.oid)) | |
273 | die("Duplicated ref, and SHA1s don't match: %s", ref1->name); | |
274 | ||
275 | warning("Duplicated ref: %s", ref1->name); | |
276 | return 1; | |
277 | } | |
278 | ||
279 | /* | |
280 | * Sort the entries in dir non-recursively (if they are not already | |
281 | * sorted) and remove any duplicate entries. | |
282 | */ | |
283 | static void sort_ref_dir(struct ref_dir *dir) | |
284 | { | |
285 | int i, j; | |
286 | struct ref_entry *last = NULL; | |
287 | ||
288 | /* | |
289 | * This check also prevents passing a zero-length array to qsort(), | |
290 | * which is a problem on some platforms. | |
291 | */ | |
292 | if (dir->sorted == dir->nr) | |
293 | return; | |
294 | ||
295 | QSORT(dir->entries, dir->nr, ref_entry_cmp); | |
296 | ||
297 | /* Remove any duplicates: */ | |
298 | for (i = 0, j = 0; j < dir->nr; j++) { | |
299 | struct ref_entry *entry = dir->entries[j]; | |
300 | if (last && is_dup_ref(last, entry)) | |
301 | free_ref_entry(entry); | |
302 | else | |
303 | last = dir->entries[i++] = entry; | |
304 | } | |
305 | dir->sorted = dir->nr = i; | |
306 | } | |
307 | ||
308 | int do_for_each_entry_in_dir(struct ref_dir *dir, int offset, | |
309 | each_ref_entry_fn fn, void *cb_data) | |
310 | { | |
311 | int i; | |
312 | assert(dir->sorted == dir->nr); | |
313 | for (i = offset; i < dir->nr; i++) { | |
314 | struct ref_entry *entry = dir->entries[i]; | |
315 | int retval; | |
316 | if (entry->flag & REF_DIR) { | |
317 | struct ref_dir *subdir = get_ref_dir(entry); | |
318 | sort_ref_dir(subdir); | |
319 | retval = do_for_each_entry_in_dir(subdir, 0, fn, cb_data); | |
320 | } else { | |
321 | retval = fn(entry, cb_data); | |
322 | } | |
323 | if (retval) | |
324 | return retval; | |
325 | } | |
326 | return 0; | |
327 | } | |
328 | ||
329 | void prime_ref_dir(struct ref_dir *dir) | |
330 | { | |
331 | /* | |
332 | * The hard work of loading loose refs is done by get_ref_dir(), so we | |
333 | * just need to recurse through all of the sub-directories. We do not | |
334 | * even need to care about sorting, as traversal order does not matter | |
335 | * to us. | |
336 | */ | |
337 | int i; | |
338 | for (i = 0; i < dir->nr; i++) { | |
339 | struct ref_entry *entry = dir->entries[i]; | |
340 | if (entry->flag & REF_DIR) | |
341 | prime_ref_dir(get_ref_dir(entry)); | |
342 | } | |
343 | } | |
344 | ||
345 | /* | |
346 | * A level in the reference hierarchy that is currently being iterated | |
347 | * through. | |
348 | */ | |
349 | struct cache_ref_iterator_level { | |
350 | /* | |
351 | * The ref_dir being iterated over at this level. The ref_dir | |
352 | * is sorted before being stored here. | |
353 | */ | |
354 | struct ref_dir *dir; | |
355 | ||
356 | /* | |
357 | * The index of the current entry within dir (which might | |
358 | * itself be a directory). If index == -1, then the iteration | |
359 | * hasn't yet begun. If index == dir->nr, then the iteration | |
360 | * through this level is over. | |
361 | */ | |
362 | int index; | |
363 | }; | |
364 | ||
365 | /* | |
366 | * Represent an iteration through a ref_dir in the memory cache. The | |
367 | * iteration recurses through subdirectories. | |
368 | */ | |
369 | struct cache_ref_iterator { | |
370 | struct ref_iterator base; | |
371 | ||
372 | /* | |
373 | * The number of levels currently on the stack. This is always | |
374 | * at least 1, because when it becomes zero the iteration is | |
375 | * ended and this struct is freed. | |
376 | */ | |
377 | size_t levels_nr; | |
378 | ||
379 | /* The number of levels that have been allocated on the stack */ | |
380 | size_t levels_alloc; | |
381 | ||
382 | /* | |
383 | * A stack of levels. levels[0] is the uppermost level that is | |
384 | * being iterated over in this iteration. (This is not | |
385 | * necessary the top level in the references hierarchy. If we | |
386 | * are iterating through a subtree, then levels[0] will hold | |
387 | * the ref_dir for that subtree, and subsequent levels will go | |
388 | * on from there.) | |
389 | */ | |
390 | struct cache_ref_iterator_level *levels; | |
391 | }; | |
392 | ||
393 | static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator) | |
394 | { | |
395 | struct cache_ref_iterator *iter = | |
396 | (struct cache_ref_iterator *)ref_iterator; | |
397 | ||
398 | while (1) { | |
399 | struct cache_ref_iterator_level *level = | |
400 | &iter->levels[iter->levels_nr - 1]; | |
401 | struct ref_dir *dir = level->dir; | |
402 | struct ref_entry *entry; | |
403 | ||
404 | if (level->index == -1) | |
405 | sort_ref_dir(dir); | |
406 | ||
407 | if (++level->index == level->dir->nr) { | |
408 | /* This level is exhausted; pop up a level */ | |
409 | if (--iter->levels_nr == 0) | |
410 | return ref_iterator_abort(ref_iterator); | |
411 | ||
412 | continue; | |
413 | } | |
414 | ||
415 | entry = dir->entries[level->index]; | |
416 | ||
417 | if (entry->flag & REF_DIR) { | |
418 | /* push down a level */ | |
419 | ALLOC_GROW(iter->levels, iter->levels_nr + 1, | |
420 | iter->levels_alloc); | |
421 | ||
422 | level = &iter->levels[iter->levels_nr++]; | |
423 | level->dir = get_ref_dir(entry); | |
424 | level->index = -1; | |
425 | } else { | |
426 | iter->base.refname = entry->name; | |
427 | iter->base.oid = &entry->u.value.oid; | |
428 | iter->base.flags = entry->flag; | |
429 | return ITER_OK; | |
430 | } | |
431 | } | |
432 | } | |
433 | ||
434 | enum peel_status peel_entry(struct ref_entry *entry, int repeel) | |
435 | { | |
436 | enum peel_status status; | |
437 | ||
438 | if (entry->flag & REF_KNOWS_PEELED) { | |
439 | if (repeel) { | |
440 | entry->flag &= ~REF_KNOWS_PEELED; | |
441 | oidclr(&entry->u.value.peeled); | |
442 | } else { | |
443 | return is_null_oid(&entry->u.value.peeled) ? | |
444 | PEEL_NON_TAG : PEEL_PEELED; | |
445 | } | |
446 | } | |
447 | if (entry->flag & REF_ISBROKEN) | |
448 | return PEEL_BROKEN; | |
449 | if (entry->flag & REF_ISSYMREF) | |
450 | return PEEL_IS_SYMREF; | |
451 | ||
452 | status = peel_object(entry->u.value.oid.hash, entry->u.value.peeled.hash); | |
453 | if (status == PEEL_PEELED || status == PEEL_NON_TAG) | |
454 | entry->flag |= REF_KNOWS_PEELED; | |
455 | return status; | |
456 | } | |
457 | ||
458 | static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator, | |
459 | struct object_id *peeled) | |
460 | { | |
461 | struct cache_ref_iterator *iter = | |
462 | (struct cache_ref_iterator *)ref_iterator; | |
463 | struct cache_ref_iterator_level *level; | |
464 | struct ref_entry *entry; | |
465 | ||
466 | level = &iter->levels[iter->levels_nr - 1]; | |
467 | ||
468 | if (level->index == -1) | |
469 | die("BUG: peel called before advance for cache iterator"); | |
470 | ||
471 | entry = level->dir->entries[level->index]; | |
472 | ||
473 | if (peel_entry(entry, 0)) | |
474 | return -1; | |
475 | oidcpy(peeled, &entry->u.value.peeled); | |
476 | return 0; | |
477 | } | |
478 | ||
479 | static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator) | |
480 | { | |
481 | struct cache_ref_iterator *iter = | |
482 | (struct cache_ref_iterator *)ref_iterator; | |
483 | ||
484 | free(iter->levels); | |
485 | base_ref_iterator_free(ref_iterator); | |
486 | return ITER_DONE; | |
487 | } | |
488 | ||
489 | static struct ref_iterator_vtable cache_ref_iterator_vtable = { | |
490 | cache_ref_iterator_advance, | |
491 | cache_ref_iterator_peel, | |
492 | cache_ref_iterator_abort | |
493 | }; | |
494 | ||
495 | struct ref_iterator *cache_ref_iterator_begin(struct ref_dir *dir) | |
496 | { | |
497 | struct cache_ref_iterator *iter; | |
498 | struct ref_iterator *ref_iterator; | |
499 | struct cache_ref_iterator_level *level; | |
500 | ||
501 | iter = xcalloc(1, sizeof(*iter)); | |
502 | ref_iterator = &iter->base; | |
503 | base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable); | |
504 | ALLOC_GROW(iter->levels, 10, iter->levels_alloc); | |
505 | ||
506 | iter->levels_nr = 1; | |
507 | level = &iter->levels[0]; | |
508 | level->index = -1; | |
509 | level->dir = dir; | |
510 | ||
511 | return ref_iterator; | |
512 | } |