]>
Commit | Line | Data |
---|---|---|
3bc581b9 MH |
1 | /* |
2 | * Generic reference iterator infrastructure. See refs-internal.h for | |
3 | * documentation about the design and use of reference iterators. | |
4 | */ | |
5 | ||
4f6728d5 | 6 | #include "git-compat-util.h" |
3bc581b9 MH |
7 | #include "refs.h" |
8 | #include "refs/refs-internal.h" | |
9 | #include "iterator.h" | |
10 | ||
11 | int ref_iterator_advance(struct ref_iterator *ref_iterator) | |
12 | { | |
13 | return ref_iterator->vtable->advance(ref_iterator); | |
14 | } | |
15 | ||
16 | int ref_iterator_peel(struct ref_iterator *ref_iterator, | |
17 | struct object_id *peeled) | |
18 | { | |
19 | return ref_iterator->vtable->peel(ref_iterator, peeled); | |
20 | } | |
21 | ||
22 | int ref_iterator_abort(struct ref_iterator *ref_iterator) | |
23 | { | |
24 | return ref_iterator->vtable->abort(ref_iterator); | |
25 | } | |
26 | ||
27 | void base_ref_iterator_init(struct ref_iterator *iter, | |
5e01d838 | 28 | struct ref_iterator_vtable *vtable) |
3bc581b9 MH |
29 | { |
30 | iter->vtable = vtable; | |
31 | iter->refname = NULL; | |
32 | iter->oid = NULL; | |
33 | iter->flags = 0; | |
34 | } | |
35 | ||
36 | void base_ref_iterator_free(struct ref_iterator *iter) | |
37 | { | |
38 | /* Help make use-after-free bugs fail quickly: */ | |
39 | iter->vtable = NULL; | |
40 | free(iter); | |
41 | } | |
42 | ||
43 | struct empty_ref_iterator { | |
44 | struct ref_iterator base; | |
45 | }; | |
46 | ||
47 | static int empty_ref_iterator_advance(struct ref_iterator *ref_iterator) | |
48 | { | |
49 | return ref_iterator_abort(ref_iterator); | |
50 | } | |
51 | ||
5cf88fd8 ÆAB |
52 | static int empty_ref_iterator_peel(struct ref_iterator *ref_iterator UNUSED, |
53 | struct object_id *peeled UNUSED) | |
3bc581b9 | 54 | { |
033abf97 | 55 | BUG("peel called for empty iterator"); |
3bc581b9 MH |
56 | } |
57 | ||
58 | static int empty_ref_iterator_abort(struct ref_iterator *ref_iterator) | |
59 | { | |
60 | base_ref_iterator_free(ref_iterator); | |
61 | return ITER_DONE; | |
62 | } | |
63 | ||
64 | static struct ref_iterator_vtable empty_ref_iterator_vtable = { | |
e2f8acb6 ÆAB |
65 | .advance = empty_ref_iterator_advance, |
66 | .peel = empty_ref_iterator_peel, | |
67 | .abort = empty_ref_iterator_abort, | |
3bc581b9 MH |
68 | }; |
69 | ||
70 | struct ref_iterator *empty_ref_iterator_begin(void) | |
71 | { | |
72 | struct empty_ref_iterator *iter = xcalloc(1, sizeof(*iter)); | |
73 | struct ref_iterator *ref_iterator = &iter->base; | |
74 | ||
5e01d838 | 75 | base_ref_iterator_init(ref_iterator, &empty_ref_iterator_vtable); |
3bc581b9 MH |
76 | return ref_iterator; |
77 | } | |
78 | ||
79 | int is_empty_ref_iterator(struct ref_iterator *ref_iterator) | |
80 | { | |
81 | return ref_iterator->vtable == &empty_ref_iterator_vtable; | |
82 | } | |
83 | ||
84 | struct merge_ref_iterator { | |
85 | struct ref_iterator base; | |
86 | ||
87 | struct ref_iterator *iter0, *iter1; | |
88 | ||
89 | ref_iterator_select_fn *select; | |
90 | void *cb_data; | |
91 | ||
92 | /* | |
93 | * A pointer to iter0 or iter1 (whichever is supplying the | |
94 | * current value), or NULL if advance has not yet been called. | |
95 | */ | |
96 | struct ref_iterator **current; | |
97 | }; | |
98 | ||
6f227800 PS |
99 | enum iterator_selection ref_iterator_select(struct ref_iterator *iter_worktree, |
100 | struct ref_iterator *iter_common, | |
101 | void *cb_data UNUSED) | |
102 | { | |
103 | if (iter_worktree && !iter_common) { | |
104 | /* | |
105 | * Return the worktree ref if there are no more common refs. | |
106 | */ | |
107 | return ITER_SELECT_0; | |
108 | } else if (iter_common) { | |
109 | /* | |
110 | * In case we have pending worktree and common refs we need to | |
111 | * yield them based on their lexicographical order. Worktree | |
112 | * refs that have the same name as common refs shadow the | |
113 | * latter. | |
114 | */ | |
115 | if (iter_worktree) { | |
116 | int cmp = strcmp(iter_worktree->refname, | |
117 | iter_common->refname); | |
118 | if (cmp < 0) | |
119 | return ITER_SELECT_0; | |
120 | else if (!cmp) | |
121 | return ITER_SELECT_0_SKIP_1; | |
122 | } | |
123 | ||
124 | /* | |
125 | * We now know that the lexicographically-next ref is a common | |
126 | * ref. When the common ref is a shared one we return it. | |
127 | */ | |
128 | if (parse_worktree_ref(iter_common->refname, NULL, NULL, | |
129 | NULL) == REF_WORKTREE_SHARED) | |
130 | return ITER_SELECT_1; | |
131 | ||
132 | /* | |
133 | * Otherwise, if the common ref is a per-worktree ref we skip | |
134 | * it because it would belong to the main worktree, not ours. | |
135 | */ | |
136 | return ITER_SKIP_1; | |
137 | } else { | |
138 | return ITER_DONE; | |
139 | } | |
140 | } | |
141 | ||
3bc581b9 MH |
142 | static int merge_ref_iterator_advance(struct ref_iterator *ref_iterator) |
143 | { | |
144 | struct merge_ref_iterator *iter = | |
145 | (struct merge_ref_iterator *)ref_iterator; | |
146 | int ok; | |
147 | ||
148 | if (!iter->current) { | |
149 | /* Initialize: advance both iterators to their first entries */ | |
150 | if ((ok = ref_iterator_advance(iter->iter0)) != ITER_OK) { | |
151 | iter->iter0 = NULL; | |
152 | if (ok == ITER_ERROR) | |
153 | goto error; | |
154 | } | |
155 | if ((ok = ref_iterator_advance(iter->iter1)) != ITER_OK) { | |
156 | iter->iter1 = NULL; | |
157 | if (ok == ITER_ERROR) | |
158 | goto error; | |
159 | } | |
160 | } else { | |
161 | /* | |
162 | * Advance the current iterator past the just-used | |
163 | * entry: | |
164 | */ | |
165 | if ((ok = ref_iterator_advance(*iter->current)) != ITER_OK) { | |
166 | *iter->current = NULL; | |
167 | if (ok == ITER_ERROR) | |
168 | goto error; | |
169 | } | |
170 | } | |
171 | ||
172 | /* Loop until we find an entry that we can yield. */ | |
173 | while (1) { | |
174 | struct ref_iterator **secondary; | |
175 | enum iterator_selection selection = | |
176 | iter->select(iter->iter0, iter->iter1, iter->cb_data); | |
177 | ||
178 | if (selection == ITER_SELECT_DONE) { | |
179 | return ref_iterator_abort(ref_iterator); | |
180 | } else if (selection == ITER_SELECT_ERROR) { | |
181 | ref_iterator_abort(ref_iterator); | |
182 | return ITER_ERROR; | |
183 | } | |
184 | ||
185 | if ((selection & ITER_CURRENT_SELECTION_MASK) == 0) { | |
186 | iter->current = &iter->iter0; | |
187 | secondary = &iter->iter1; | |
188 | } else { | |
189 | iter->current = &iter->iter1; | |
190 | secondary = &iter->iter0; | |
191 | } | |
192 | ||
193 | if (selection & ITER_SKIP_SECONDARY) { | |
194 | if ((ok = ref_iterator_advance(*secondary)) != ITER_OK) { | |
195 | *secondary = NULL; | |
196 | if (ok == ITER_ERROR) | |
197 | goto error; | |
198 | } | |
199 | } | |
200 | ||
201 | if (selection & ITER_YIELD_CURRENT) { | |
202 | iter->base.refname = (*iter->current)->refname; | |
203 | iter->base.oid = (*iter->current)->oid; | |
204 | iter->base.flags = (*iter->current)->flags; | |
205 | return ITER_OK; | |
206 | } | |
207 | } | |
208 | ||
209 | error: | |
210 | ref_iterator_abort(ref_iterator); | |
211 | return ITER_ERROR; | |
212 | } | |
213 | ||
214 | static int merge_ref_iterator_peel(struct ref_iterator *ref_iterator, | |
215 | struct object_id *peeled) | |
216 | { | |
217 | struct merge_ref_iterator *iter = | |
218 | (struct merge_ref_iterator *)ref_iterator; | |
219 | ||
220 | if (!iter->current) { | |
033abf97 | 221 | BUG("peel called before advance for merge iterator"); |
3bc581b9 MH |
222 | } |
223 | return ref_iterator_peel(*iter->current, peeled); | |
224 | } | |
225 | ||
226 | static int merge_ref_iterator_abort(struct ref_iterator *ref_iterator) | |
227 | { | |
228 | struct merge_ref_iterator *iter = | |
229 | (struct merge_ref_iterator *)ref_iterator; | |
230 | int ok = ITER_DONE; | |
231 | ||
232 | if (iter->iter0) { | |
233 | if (ref_iterator_abort(iter->iter0) != ITER_DONE) | |
234 | ok = ITER_ERROR; | |
235 | } | |
236 | if (iter->iter1) { | |
237 | if (ref_iterator_abort(iter->iter1) != ITER_DONE) | |
238 | ok = ITER_ERROR; | |
239 | } | |
240 | base_ref_iterator_free(ref_iterator); | |
241 | return ok; | |
242 | } | |
243 | ||
244 | static struct ref_iterator_vtable merge_ref_iterator_vtable = { | |
e2f8acb6 ÆAB |
245 | .advance = merge_ref_iterator_advance, |
246 | .peel = merge_ref_iterator_peel, | |
247 | .abort = merge_ref_iterator_abort, | |
3bc581b9 MH |
248 | }; |
249 | ||
250 | struct ref_iterator *merge_ref_iterator_begin( | |
251 | struct ref_iterator *iter0, struct ref_iterator *iter1, | |
252 | ref_iterator_select_fn *select, void *cb_data) | |
253 | { | |
254 | struct merge_ref_iterator *iter = xcalloc(1, sizeof(*iter)); | |
255 | struct ref_iterator *ref_iterator = &iter->base; | |
256 | ||
257 | /* | |
258 | * We can't do the same kind of is_empty_ref_iterator()-style | |
259 | * optimization here as overlay_ref_iterator_begin() does, | |
260 | * because we don't know the semantics of the select function. | |
261 | * It might, for example, implement "intersect" by passing | |
262 | * references through only if they exist in both iterators. | |
263 | */ | |
264 | ||
5e01d838 | 265 | base_ref_iterator_init(ref_iterator, &merge_ref_iterator_vtable); |
3bc581b9 MH |
266 | iter->iter0 = iter0; |
267 | iter->iter1 = iter1; | |
268 | iter->select = select; | |
269 | iter->cb_data = cb_data; | |
270 | iter->current = NULL; | |
271 | return ref_iterator; | |
272 | } | |
273 | ||
274 | /* | |
275 | * A ref_iterator_select_fn that overlays the items from front on top | |
276 | * of those from back (like loose refs over packed refs). See | |
277 | * overlay_ref_iterator_begin(). | |
278 | */ | |
279 | static enum iterator_selection overlay_iterator_select( | |
280 | struct ref_iterator *front, struct ref_iterator *back, | |
5cf88fd8 | 281 | void *cb_data UNUSED) |
3bc581b9 MH |
282 | { |
283 | int cmp; | |
284 | ||
285 | if (!back) | |
286 | return front ? ITER_SELECT_0 : ITER_SELECT_DONE; | |
287 | else if (!front) | |
288 | return ITER_SELECT_1; | |
289 | ||
290 | cmp = strcmp(front->refname, back->refname); | |
291 | ||
292 | if (cmp < 0) | |
293 | return ITER_SELECT_0; | |
294 | else if (cmp > 0) | |
295 | return ITER_SELECT_1; | |
296 | else | |
297 | return ITER_SELECT_0_SKIP_1; | |
298 | } | |
299 | ||
300 | struct ref_iterator *overlay_ref_iterator_begin( | |
301 | struct ref_iterator *front, struct ref_iterator *back) | |
302 | { | |
303 | /* | |
304 | * Optimization: if one of the iterators is empty, return the | |
305 | * other one rather than incurring the overhead of wrapping | |
306 | * them. | |
307 | */ | |
308 | if (is_empty_ref_iterator(front)) { | |
309 | ref_iterator_abort(front); | |
310 | return back; | |
311 | } else if (is_empty_ref_iterator(back)) { | |
312 | ref_iterator_abort(back); | |
313 | return front; | |
314 | } | |
315 | ||
5e01d838 | 316 | return merge_ref_iterator_begin(front, back, overlay_iterator_select, NULL); |
3bc581b9 MH |
317 | } |
318 | ||
319 | struct prefix_ref_iterator { | |
320 | struct ref_iterator base; | |
321 | ||
322 | struct ref_iterator *iter0; | |
323 | char *prefix; | |
324 | int trim; | |
325 | }; | |
326 | ||
157113c6 JK |
327 | /* Return -1, 0, 1 if refname is before, inside, or after the prefix. */ |
328 | static int compare_prefix(const char *refname, const char *prefix) | |
329 | { | |
330 | while (*prefix) { | |
331 | if (*refname != *prefix) | |
332 | return ((unsigned char)*refname < (unsigned char)*prefix) ? -1 : +1; | |
333 | ||
334 | refname++; | |
335 | prefix++; | |
336 | } | |
337 | ||
338 | return 0; | |
339 | } | |
340 | ||
3bc581b9 MH |
341 | static int prefix_ref_iterator_advance(struct ref_iterator *ref_iterator) |
342 | { | |
343 | struct prefix_ref_iterator *iter = | |
344 | (struct prefix_ref_iterator *)ref_iterator; | |
345 | int ok; | |
346 | ||
347 | while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) { | |
157113c6 JK |
348 | int cmp = compare_prefix(iter->iter0->refname, iter->prefix); |
349 | ||
350 | if (cmp < 0) | |
3bc581b9 MH |
351 | continue; |
352 | ||
157113c6 JK |
353 | if (cmp > 0) { |
354 | /* | |
5e01d838 | 355 | * As the source iterator is ordered, we |
157113c6 JK |
356 | * can stop the iteration as soon as we see a |
357 | * refname that comes after the prefix: | |
358 | */ | |
5e01d838 PS |
359 | ok = ref_iterator_abort(iter->iter0); |
360 | break; | |
157113c6 JK |
361 | } |
362 | ||
b9c8e7f2 MH |
363 | if (iter->trim) { |
364 | /* | |
365 | * It is nonsense to trim off characters that | |
366 | * you haven't already checked for via a | |
367 | * prefix check, whether via this | |
368 | * `prefix_ref_iterator` or upstream in | |
369 | * `iter0`). So if there wouldn't be at least | |
370 | * one character left in the refname after | |
371 | * trimming, report it as a bug: | |
372 | */ | |
373 | if (strlen(iter->iter0->refname) <= iter->trim) | |
033abf97 | 374 | BUG("attempt to trim too many characters"); |
b9c8e7f2 MH |
375 | iter->base.refname = iter->iter0->refname + iter->trim; |
376 | } else { | |
377 | iter->base.refname = iter->iter0->refname; | |
378 | } | |
379 | ||
3bc581b9 MH |
380 | iter->base.oid = iter->iter0->oid; |
381 | iter->base.flags = iter->iter0->flags; | |
382 | return ITER_OK; | |
383 | } | |
384 | ||
385 | iter->iter0 = NULL; | |
386 | if (ref_iterator_abort(ref_iterator) != ITER_DONE) | |
387 | return ITER_ERROR; | |
388 | return ok; | |
389 | } | |
390 | ||
391 | static int prefix_ref_iterator_peel(struct ref_iterator *ref_iterator, | |
392 | struct object_id *peeled) | |
393 | { | |
394 | struct prefix_ref_iterator *iter = | |
395 | (struct prefix_ref_iterator *)ref_iterator; | |
396 | ||
397 | return ref_iterator_peel(iter->iter0, peeled); | |
398 | } | |
399 | ||
400 | static int prefix_ref_iterator_abort(struct ref_iterator *ref_iterator) | |
401 | { | |
402 | struct prefix_ref_iterator *iter = | |
403 | (struct prefix_ref_iterator *)ref_iterator; | |
404 | int ok = ITER_DONE; | |
405 | ||
406 | if (iter->iter0) | |
407 | ok = ref_iterator_abort(iter->iter0); | |
408 | free(iter->prefix); | |
409 | base_ref_iterator_free(ref_iterator); | |
410 | return ok; | |
411 | } | |
412 | ||
413 | static struct ref_iterator_vtable prefix_ref_iterator_vtable = { | |
e2f8acb6 ÆAB |
414 | .advance = prefix_ref_iterator_advance, |
415 | .peel = prefix_ref_iterator_peel, | |
416 | .abort = prefix_ref_iterator_abort, | |
3bc581b9 MH |
417 | }; |
418 | ||
419 | struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0, | |
420 | const char *prefix, | |
421 | int trim) | |
422 | { | |
423 | struct prefix_ref_iterator *iter; | |
424 | struct ref_iterator *ref_iterator; | |
425 | ||
426 | if (!*prefix && !trim) | |
427 | return iter0; /* optimization: no need to wrap iterator */ | |
428 | ||
ca56dadb | 429 | CALLOC_ARRAY(iter, 1); |
3bc581b9 MH |
430 | ref_iterator = &iter->base; |
431 | ||
5e01d838 | 432 | base_ref_iterator_init(ref_iterator, &prefix_ref_iterator_vtable); |
3bc581b9 MH |
433 | |
434 | iter->iter0 = iter0; | |
435 | iter->prefix = xstrdup(prefix); | |
436 | iter->trim = trim; | |
437 | ||
438 | return ref_iterator; | |
439 | } | |
4c4de895 MH |
440 | |
441 | struct ref_iterator *current_ref_iter = NULL; | |
442 | ||
4a6067cd SB |
443 | int do_for_each_repo_ref_iterator(struct repository *r, struct ref_iterator *iter, |
444 | each_repo_ref_fn fn, void *cb_data) | |
4c4de895 MH |
445 | { |
446 | int retval = 0, ok; | |
447 | struct ref_iterator *old_ref_iter = current_ref_iter; | |
448 | ||
449 | current_ref_iter = iter; | |
450 | while ((ok = ref_iterator_advance(iter)) == ITER_OK) { | |
4a6067cd | 451 | retval = fn(r, iter->refname, iter->oid, iter->flags, cb_data); |
4c4de895 MH |
452 | if (retval) { |
453 | /* | |
454 | * If ref_iterator_abort() returns ITER_ERROR, | |
455 | * we ignore that error in deference to the | |
456 | * callback function's return value. | |
457 | */ | |
458 | ref_iterator_abort(iter); | |
459 | goto out; | |
460 | } | |
461 | } | |
462 | ||
463 | out: | |
464 | current_ref_iter = old_ref_iter; | |
465 | if (ok == ITER_ERROR) | |
466 | return -1; | |
467 | return retval; | |
468 | } |