]>
Commit | Line | Data |
---|---|---|
a3437b8c JS |
1 | /* |
2 | * Copyright (c) 2005, Jon Seymour | |
3 | * | |
4 | * For more information about epoch theory on which this module is based, | |
5 | * refer to http://blackcubes.dyndns.org/epoch/. That web page defines | |
6 | * terms such as "epoch" and "minimal, non-linear epoch" and provides rationales | |
7 | * for some of the algorithms used here. | |
8 | * | |
9 | */ | |
10 | #include <stdlib.h> | |
17ebe977 PB |
11 | |
12 | /* Provides arbitrary precision integers required to accurately represent | |
13 | * fractional mass: */ | |
14 | #include <openssl/bn.h> | |
a3437b8c JS |
15 | |
16 | #include "cache.h" | |
17 | #include "commit.h" | |
ae563542 | 18 | #include "revision.h" |
a3437b8c JS |
19 | #include "epoch.h" |
20 | ||
21 | struct fraction { | |
22 | BIGNUM numerator; | |
23 | BIGNUM denominator; | |
24 | }; | |
25 | ||
26 | #define HAS_EXACTLY_ONE_PARENT(n) ((n)->parents && !(n)->parents->next) | |
27 | ||
28 | static BN_CTX *context = NULL; | |
29 | static struct fraction *one = NULL; | |
30 | static struct fraction *zero = NULL; | |
31 | ||
6da4016a | 32 | static BN_CTX *get_BN_CTX(void) |
a3437b8c JS |
33 | { |
34 | if (!context) { | |
35 | context = BN_CTX_new(); | |
36 | } | |
37 | return context; | |
38 | } | |
39 | ||
6da4016a | 40 | static struct fraction *new_zero(void) |
a3437b8c JS |
41 | { |
42 | struct fraction *result = xmalloc(sizeof(*result)); | |
43 | BN_init(&result->numerator); | |
44 | BN_init(&result->denominator); | |
45 | BN_zero(&result->numerator); | |
46 | BN_one(&result->denominator); | |
47 | return result; | |
48 | } | |
49 | ||
50 | static void clear_fraction(struct fraction *fraction) | |
51 | { | |
52 | BN_clear(&fraction->numerator); | |
53 | BN_clear(&fraction->denominator); | |
54 | } | |
55 | ||
56 | static struct fraction *divide(struct fraction *result, struct fraction *fraction, int divisor) | |
57 | { | |
58 | BIGNUM bn_divisor; | |
59 | ||
60 | BN_init(&bn_divisor); | |
61 | BN_set_word(&bn_divisor, divisor); | |
62 | ||
63 | BN_copy(&result->numerator, &fraction->numerator); | |
64 | BN_mul(&result->denominator, &fraction->denominator, &bn_divisor, get_BN_CTX()); | |
65 | ||
66 | BN_clear(&bn_divisor); | |
67 | return result; | |
68 | } | |
69 | ||
70 | static struct fraction *init_fraction(struct fraction *fraction) | |
71 | { | |
72 | BN_init(&fraction->numerator); | |
73 | BN_init(&fraction->denominator); | |
74 | BN_zero(&fraction->numerator); | |
75 | BN_one(&fraction->denominator); | |
76 | return fraction; | |
77 | } | |
78 | ||
6da4016a | 79 | static struct fraction *get_one(void) |
a3437b8c JS |
80 | { |
81 | if (!one) { | |
82 | one = new_zero(); | |
83 | BN_one(&one->numerator); | |
84 | } | |
85 | return one; | |
86 | } | |
87 | ||
6da4016a | 88 | static struct fraction *get_zero(void) |
a3437b8c JS |
89 | { |
90 | if (!zero) { | |
91 | zero = new_zero(); | |
92 | } | |
93 | return zero; | |
94 | } | |
95 | ||
96 | static struct fraction *copy(struct fraction *to, struct fraction *from) | |
97 | { | |
98 | BN_copy(&to->numerator, &from->numerator); | |
99 | BN_copy(&to->denominator, &from->denominator); | |
100 | return to; | |
101 | } | |
102 | ||
103 | static struct fraction *add(struct fraction *result, struct fraction *left, struct fraction *right) | |
104 | { | |
105 | BIGNUM a, b, gcd; | |
106 | ||
107 | BN_init(&a); | |
108 | BN_init(&b); | |
109 | BN_init(&gcd); | |
110 | ||
111 | BN_mul(&a, &left->numerator, &right->denominator, get_BN_CTX()); | |
112 | BN_mul(&b, &left->denominator, &right->numerator, get_BN_CTX()); | |
113 | BN_mul(&result->denominator, &left->denominator, &right->denominator, get_BN_CTX()); | |
114 | BN_add(&result->numerator, &a, &b); | |
115 | ||
116 | BN_gcd(&gcd, &result->denominator, &result->numerator, get_BN_CTX()); | |
117 | BN_div(&result->denominator, NULL, &result->denominator, &gcd, get_BN_CTX()); | |
118 | BN_div(&result->numerator, NULL, &result->numerator, &gcd, get_BN_CTX()); | |
119 | ||
120 | BN_clear(&a); | |
121 | BN_clear(&b); | |
122 | BN_clear(&gcd); | |
123 | ||
124 | return result; | |
125 | } | |
126 | ||
127 | static int compare(struct fraction *left, struct fraction *right) | |
128 | { | |
129 | BIGNUM a, b; | |
a3437b8c JS |
130 | int result; |
131 | ||
132 | BN_init(&a); | |
133 | BN_init(&b); | |
134 | ||
135 | BN_mul(&a, &left->numerator, &right->denominator, get_BN_CTX()); | |
136 | BN_mul(&b, &left->denominator, &right->numerator, get_BN_CTX()); | |
137 | ||
138 | result = BN_cmp(&a, &b); | |
139 | ||
140 | BN_clear(&a); | |
141 | BN_clear(&b); | |
142 | ||
143 | return result; | |
144 | } | |
145 | ||
146 | struct mass_counter { | |
147 | struct fraction seen; | |
148 | struct fraction pending; | |
149 | }; | |
150 | ||
151 | static struct mass_counter *new_mass_counter(struct commit *commit, struct fraction *pending) | |
152 | { | |
153 | struct mass_counter *mass_counter = xmalloc(sizeof(*mass_counter)); | |
154 | memset(mass_counter, 0, sizeof(*mass_counter)); | |
155 | ||
156 | init_fraction(&mass_counter->seen); | |
157 | init_fraction(&mass_counter->pending); | |
158 | ||
159 | copy(&mass_counter->pending, pending); | |
160 | copy(&mass_counter->seen, get_zero()); | |
161 | ||
162 | if (commit->object.util) { | |
17ebe977 PB |
163 | die("multiple attempts to initialize mass counter for %s", |
164 | sha1_to_hex(commit->object.sha1)); | |
a3437b8c JS |
165 | } |
166 | ||
167 | commit->object.util = mass_counter; | |
168 | ||
169 | return mass_counter; | |
170 | } | |
171 | ||
172 | static void free_mass_counter(struct mass_counter *counter) | |
173 | { | |
174 | clear_fraction(&counter->seen); | |
175 | clear_fraction(&counter->pending); | |
176 | free(counter); | |
177 | } | |
178 | ||
17ebe977 PB |
179 | /* |
180 | * Finds the base commit of a list of commits. | |
181 | * | |
182 | * One property of the commit being searched for is that every commit reachable | |
183 | * from the base commit is reachable from the commits in the starting list only | |
184 | * via paths that include the base commit. | |
185 | * | |
186 | * This algorithm uses a conservation of mass approach to find the base commit. | |
187 | * | |
188 | * We start by injecting one unit of mass into the graph at each | |
189 | * of the commits in the starting list. Injecting mass into a commit | |
190 | * is achieved by adding to its pending mass counter and, if it is not already | |
191 | * enqueued, enqueuing the commit in a list of pending commits, in latest | |
192 | * commit date first order. | |
193 | * | |
82f9d58a | 194 | * The algorithm then proceeds to visit each commit in the pending queue. |
17ebe977 PB |
195 | * Upon each visit, the pending mass is added to the mass already seen for that |
196 | * commit and then divided into N equal portions, where N is the number of | |
197 | * parents of the commit being visited. The divided portions are then injected | |
198 | * into each of the parents. | |
199 | * | |
200 | * The algorithm continues until we discover a commit which has seen all the | |
201 | * mass originally injected or until we run out of things to do. | |
202 | * | |
203 | * If we find a commit that has seen all the original mass, we have found | |
204 | * the common base of all the commits in the starting list. | |
205 | * | |
206 | * The algorithm does _not_ depend on accurate timestamps for correct operation. | |
207 | * However, reasonably sane (e.g. non-random) timestamps are required in order | |
208 | * to prevent an exponential performance characteristic. The occasional | |
209 | * timestamp inaccuracy will not dramatically affect performance but may | |
210 | * result in more nodes being processed than strictly necessary. | |
211 | * | |
212 | * This procedure sets *boundary to the address of the base commit. It returns | |
213 | * non-zero if, and only if, there was a problem parsing one of the | |
214 | * commits discovered during the traversal. | |
215 | */ | |
a3437b8c JS |
216 | static int find_base_for_list(struct commit_list *list, struct commit **boundary) |
217 | { | |
a3437b8c | 218 | int ret = 0; |
a3437b8c JS |
219 | struct commit_list *cleaner = NULL; |
220 | struct commit_list *pending = NULL; | |
a3437b8c | 221 | struct fraction injected; |
a3437b8c | 222 | init_fraction(&injected); |
17ebe977 | 223 | *boundary = NULL; |
a3437b8c JS |
224 | |
225 | for (; list; list = list->next) { | |
a3437b8c JS |
226 | struct commit *item = list->item; |
227 | ||
c3c11631 JS |
228 | if (!item->object.util) { |
229 | new_mass_counter(list->item, get_one()); | |
230 | add(&injected, &injected, get_one()); | |
a3437b8c | 231 | |
c3c11631 JS |
232 | commit_list_insert(list->item, &cleaner); |
233 | commit_list_insert(list->item, &pending); | |
234 | } | |
a3437b8c JS |
235 | } |
236 | ||
237 | while (!*boundary && pending && !ret) { | |
a3437b8c | 238 | struct commit *latest = pop_commit(&pending); |
a3437b8c | 239 | struct mass_counter *latest_node = (struct mass_counter *) latest->object.util; |
17ebe977 | 240 | int num_parents; |
a3437b8c JS |
241 | |
242 | if ((ret = parse_commit(latest))) | |
243 | continue; | |
a3437b8c JS |
244 | add(&latest_node->seen, &latest_node->seen, &latest_node->pending); |
245 | ||
17ebe977 | 246 | num_parents = count_parents(latest); |
a3437b8c | 247 | if (num_parents) { |
a3437b8c JS |
248 | struct fraction distribution; |
249 | struct commit_list *parents; | |
250 | ||
251 | divide(init_fraction(&distribution), &latest_node->pending, num_parents); | |
252 | ||
253 | for (parents = latest->parents; parents; parents = parents->next) { | |
a3437b8c JS |
254 | struct commit *parent = parents->item; |
255 | struct mass_counter *parent_node = (struct mass_counter *) parent->object.util; | |
256 | ||
257 | if (!parent_node) { | |
a3437b8c | 258 | parent_node = new_mass_counter(parent, &distribution); |
f755494c | 259 | insert_by_date(parent, &pending); |
a3437b8c | 260 | commit_list_insert(parent, &cleaner); |
a3437b8c | 261 | } else { |
17ebe977 | 262 | if (!compare(&parent_node->pending, get_zero())) |
f755494c | 263 | insert_by_date(parent, &pending); |
a3437b8c | 264 | add(&parent_node->pending, &parent_node->pending, &distribution); |
a3437b8c JS |
265 | } |
266 | } | |
267 | ||
268 | clear_fraction(&distribution); | |
a3437b8c JS |
269 | } |
270 | ||
17ebe977 | 271 | if (!compare(&latest_node->seen, &injected)) |
a3437b8c | 272 | *boundary = latest; |
a3437b8c | 273 | copy(&latest_node->pending, get_zero()); |
a3437b8c JS |
274 | } |
275 | ||
276 | while (cleaner) { | |
a3437b8c JS |
277 | struct commit *next = pop_commit(&cleaner); |
278 | free_mass_counter((struct mass_counter *) next->object.util); | |
279 | next->object.util = NULL; | |
a3437b8c JS |
280 | } |
281 | ||
282 | if (pending) | |
283 | free_commit_list(pending); | |
284 | ||
285 | clear_fraction(&injected); | |
a3437b8c | 286 | return ret; |
a3437b8c JS |
287 | } |
288 | ||
289 | ||
17ebe977 PB |
290 | /* |
291 | * Finds the base of an minimal, non-linear epoch, headed at head, by | |
292 | * applying the find_base_for_list to a list consisting of the parents | |
293 | */ | |
a3437b8c JS |
294 | static int find_base(struct commit *head, struct commit **boundary) |
295 | { | |
296 | int ret = 0; | |
297 | struct commit_list *pending = NULL; | |
298 | struct commit_list *next; | |
299 | ||
a3437b8c JS |
300 | for (next = head->parents; next; next = next->next) { |
301 | commit_list_insert(next->item, &pending); | |
302 | } | |
303 | ret = find_base_for_list(pending, boundary); | |
304 | free_commit_list(pending); | |
305 | ||
306 | return ret; | |
307 | } | |
308 | ||
17ebe977 PB |
309 | /* |
310 | * This procedure traverses to the boundary of the first epoch in the epoch | |
311 | * sequence of the epoch headed at head_of_epoch. This is either the end of | |
312 | * the maximal linear epoch or the base of a minimal non-linear epoch. | |
313 | * | |
314 | * The queue of pending nodes is sorted in reverse date order and each node | |
315 | * is currently in the queue at most once. | |
316 | */ | |
a3437b8c JS |
317 | static int find_next_epoch_boundary(struct commit *head_of_epoch, struct commit **boundary) |
318 | { | |
319 | int ret; | |
320 | struct commit *item = head_of_epoch; | |
321 | ||
322 | ret = parse_commit(item); | |
323 | if (ret) | |
324 | return ret; | |
325 | ||
326 | if (HAS_EXACTLY_ONE_PARENT(item)) { | |
17ebe977 PB |
327 | /* |
328 | * We are at the start of a maximimal linear epoch. | |
329 | * Traverse to the end. | |
330 | */ | |
a3437b8c JS |
331 | while (HAS_EXACTLY_ONE_PARENT(item) && !ret) { |
332 | item = item->parents->item; | |
333 | ret = parse_commit(item); | |
334 | } | |
335 | *boundary = item; | |
336 | ||
337 | } else { | |
17ebe977 PB |
338 | /* |
339 | * Otherwise, we are at the start of a minimal, non-linear | |
340 | * epoch - find the common base of all parents. | |
341 | */ | |
a3437b8c | 342 | ret = find_base(item, boundary); |
a3437b8c JS |
343 | } |
344 | ||
345 | return ret; | |
346 | } | |
347 | ||
17ebe977 PB |
348 | /* |
349 | * Returns non-zero if parent is known to be a parent of child. | |
350 | */ | |
a3437b8c JS |
351 | static int is_parent_of(struct commit *parent, struct commit *child) |
352 | { | |
353 | struct commit_list *parents; | |
354 | for (parents = child->parents; parents; parents = parents->next) { | |
17ebe977 PB |
355 | if (!memcmp(parent->object.sha1, parents->item->object.sha1, |
356 | sizeof(parents->item->object.sha1))) | |
a3437b8c JS |
357 | return 1; |
358 | } | |
359 | return 0; | |
360 | } | |
361 | ||
17ebe977 PB |
362 | /* |
363 | * Pushes an item onto the merge order stack. If the top of the stack is | |
364 | * marked as being a possible "break", we check to see whether it actually | |
365 | * is a break. | |
366 | */ | |
a3437b8c JS |
367 | static void push_onto_merge_order_stack(struct commit_list **stack, struct commit *item) |
368 | { | |
369 | struct commit_list *top = *stack; | |
370 | if (top && (top->item->object.flags & DISCONTINUITY)) { | |
371 | if (is_parent_of(top->item, item)) { | |
372 | top->item->object.flags &= ~DISCONTINUITY; | |
373 | } | |
374 | } | |
375 | commit_list_insert(item, stack); | |
376 | } | |
377 | ||
17ebe977 PB |
378 | /* |
379 | * Marks all interesting, visited commits reachable from this commit | |
380 | * as uninteresting. We stop recursing when we reach the epoch boundary, | |
381 | * an unvisited node or a node that has already been marking uninteresting. | |
382 | * | |
383 | * This doesn't actually mark all ancestors between the start node and the | |
384 | * epoch boundary uninteresting, but does ensure that they will eventually | |
385 | * be marked uninteresting when the main sort_first_epoch() traversal | |
386 | * eventually reaches them. | |
387 | */ | |
a3437b8c JS |
388 | static void mark_ancestors_uninteresting(struct commit *commit) |
389 | { | |
390 | unsigned int flags = commit->object.flags; | |
391 | int visited = flags & VISITED; | |
392 | int boundary = flags & BOUNDARY; | |
393 | int uninteresting = flags & UNINTERESTING; | |
17ebe977 | 394 | struct commit_list *next; |
a3437b8c | 395 | |
4e734673 | 396 | commit->object.flags |= UNINTERESTING; |
a3437b8c | 397 | |
17ebe977 PB |
398 | /* |
399 | * We only need to recurse if | |
400 | * we are not on the boundary and | |
401 | * we have not already been marked uninteresting and | |
402 | * we have already been visited. | |
403 | * | |
404 | * The main sort_first_epoch traverse will mark unreachable | |
405 | * all uninteresting, unvisited parents as they are visited | |
406 | * so there is no need to duplicate that traversal here. | |
407 | * | |
408 | * Similarly, if we are already marked uninteresting | |
409 | * then either all ancestors have already been marked | |
410 | * uninteresting or will be once the sort_first_epoch | |
411 | * traverse reaches them. | |
412 | */ | |
413 | ||
414 | if (uninteresting || boundary || !visited) | |
415 | return; | |
a3437b8c JS |
416 | |
417 | for (next = commit->parents; next; next = next->next) | |
418 | mark_ancestors_uninteresting(next->item); | |
419 | } | |
420 | ||
17ebe977 PB |
421 | /* |
422 | * Sorts the nodes of the first epoch of the epoch sequence of the epoch headed at head | |
423 | * into merge order. | |
424 | */ | |
a3437b8c JS |
425 | static void sort_first_epoch(struct commit *head, struct commit_list **stack) |
426 | { | |
427 | struct commit_list *parents; | |
a3437b8c JS |
428 | |
429 | head->object.flags |= VISITED; | |
430 | ||
17ebe977 PB |
431 | /* |
432 | * TODO: By sorting the parents in a different order, we can alter the | |
433 | * merge order to show contemporaneous changes in parallel branches | |
434 | * occurring after "local" changes. This is useful for a developer | |
435 | * when a developer wants to see all changes that were incorporated | |
436 | * into the same merge as her own changes occur after her own | |
437 | * changes. | |
438 | */ | |
a3437b8c | 439 | |
60646e9a JS |
440 | for (parents = head->parents; parents; parents = parents->next) { |
441 | struct commit *parent = parents->item; | |
a3437b8c JS |
442 | |
443 | if (head->object.flags & UNINTERESTING) { | |
17ebe977 PB |
444 | /* |
445 | * Propagates the uninteresting bit to all parents. | |
446 | * if we have already visited this parent, then | |
447 | * the uninteresting bit will be propagated to each | |
448 | * reachable commit that is still not marked | |
449 | * uninteresting and won't otherwise be reached. | |
450 | */ | |
a3437b8c JS |
451 | mark_ancestors_uninteresting(parent); |
452 | } | |
453 | ||
454 | if (!(parent->object.flags & VISITED)) { | |
455 | if (parent->object.flags & BOUNDARY) { | |
a3437b8c | 456 | if (*stack) { |
17ebe977 PB |
457 | die("something else is on the stack - %s", |
458 | sha1_to_hex((*stack)->item->object.sha1)); | |
a3437b8c | 459 | } |
a3437b8c JS |
460 | push_onto_merge_order_stack(stack, parent); |
461 | parent->object.flags |= VISITED; | |
462 | ||
463 | } else { | |
a3437b8c | 464 | sort_first_epoch(parent, stack); |
60646e9a | 465 | if (parents) { |
17ebe977 PB |
466 | /* |
467 | * This indicates a possible | |
468 | * discontinuity it may not be be | |
469 | * actual discontinuity if the head | |
470 | * of parent N happens to be the tail | |
471 | * of parent N+1. | |
472 | * | |
473 | * The next push onto the stack will | |
474 | * resolve the question. | |
475 | */ | |
a3437b8c JS |
476 | (*stack)->item->object.flags |= DISCONTINUITY; |
477 | } | |
478 | } | |
479 | } | |
480 | } | |
481 | ||
482 | push_onto_merge_order_stack(stack, head); | |
483 | } | |
484 | ||
17ebe977 PB |
485 | /* |
486 | * Emit the contents of the stack. | |
487 | * | |
488 | * The stack is freed and replaced by NULL. | |
489 | * | |
490 | * Sets the return value to STOP if no further output should be generated. | |
491 | */ | |
170774a3 | 492 | static int emit_stack(struct commit_list **stack, emitter_func emitter, int include_last) |
a3437b8c JS |
493 | { |
494 | unsigned int seen = 0; | |
495 | int action = CONTINUE; | |
496 | ||
497 | while (*stack && (action != STOP)) { | |
a3437b8c | 498 | struct commit *next = pop_commit(stack); |
a3437b8c | 499 | seen |= next->object.flags; |
170774a3 JS |
500 | if (*stack || include_last) { |
501 | if (!*stack) | |
502 | next->object.flags |= BOUNDARY; | |
84b18a8e | 503 | action = emitter(next); |
170774a3 | 504 | } |
a3437b8c JS |
505 | } |
506 | ||
507 | if (*stack) { | |
508 | free_commit_list(*stack); | |
509 | *stack = NULL; | |
510 | } | |
511 | ||
512 | return (action == STOP || (seen & UNINTERESTING)) ? STOP : CONTINUE; | |
513 | } | |
514 | ||
17ebe977 PB |
515 | /* |
516 | * Sorts an arbitrary epoch into merge order by sorting each epoch | |
517 | * of its epoch sequence into order. | |
518 | * | |
519 | * Note: this algorithm currently leaves traces of its execution in the | |
520 | * object flags of nodes it discovers. This should probably be fixed. | |
521 | */ | |
a3437b8c JS |
522 | static int sort_in_merge_order(struct commit *head_of_epoch, emitter_func emitter) |
523 | { | |
524 | struct commit *next = head_of_epoch; | |
525 | int ret = 0; | |
526 | int action = CONTINUE; | |
527 | ||
528 | ret = parse_commit(head_of_epoch); | |
529 | ||
8cd1033e JS |
530 | next->object.flags |= BOUNDARY; |
531 | ||
a3437b8c | 532 | while (next && next->parents && !ret && (action != STOP)) { |
a3437b8c JS |
533 | struct commit *base = NULL; |
534 | ||
17ebe977 PB |
535 | ret = find_next_epoch_boundary(next, &base); |
536 | if (ret) | |
a3437b8c | 537 | return ret; |
a3437b8c | 538 | next->object.flags |= BOUNDARY; |
17ebe977 | 539 | if (base) |
a3437b8c | 540 | base->object.flags |= BOUNDARY; |
a3437b8c JS |
541 | |
542 | if (HAS_EXACTLY_ONE_PARENT(next)) { | |
a3437b8c JS |
543 | while (HAS_EXACTLY_ONE_PARENT(next) |
544 | && (action != STOP) | |
545 | && !ret) { | |
a3437b8c JS |
546 | if (next->object.flags & UNINTERESTING) { |
547 | action = STOP; | |
548 | } else { | |
84b18a8e | 549 | action = emitter(next); |
a3437b8c | 550 | } |
a3437b8c JS |
551 | if (action != STOP) { |
552 | next = next->parents->item; | |
553 | ret = parse_commit(next); | |
554 | } | |
555 | } | |
556 | ||
557 | } else { | |
a3437b8c JS |
558 | struct commit_list *stack = NULL; |
559 | sort_first_epoch(next, &stack); | |
170774a3 | 560 | action = emit_stack(&stack, emitter, (base == NULL)); |
a3437b8c | 561 | next = base; |
a3437b8c | 562 | } |
a3437b8c JS |
563 | } |
564 | ||
565 | if (next && (action != STOP) && !ret) { | |
84b18a8e | 566 | emitter(next); |
a3437b8c JS |
567 | } |
568 | ||
569 | return ret; | |
570 | } | |
571 | ||
17ebe977 PB |
572 | /* |
573 | * Sorts the nodes reachable from a starting list in merge order, we | |
574 | * first find the base for the starting list and then sort all nodes | |
575 | * in this subgraph using the sort_first_epoch algorithm. Once we have | |
576 | * reached the base we can continue sorting using sort_in_merge_order. | |
577 | */ | |
a3437b8c JS |
578 | int sort_list_in_merge_order(struct commit_list *list, emitter_func emitter) |
579 | { | |
580 | struct commit_list *stack = NULL; | |
581 | struct commit *base; | |
a3437b8c JS |
582 | int ret = 0; |
583 | int action = CONTINUE; | |
a3437b8c JS |
584 | struct commit_list *reversed = NULL; |
585 | ||
bce62866 LT |
586 | for (; list; list = list->next) |
587 | commit_list_insert(list->item, &reversed); | |
a3437b8c | 588 | |
d6bd56a0 JS |
589 | if (!reversed) |
590 | return ret; | |
591 | else if (!reversed->next) { | |
17ebe977 PB |
592 | /* |
593 | * If there is only one element in the list, we can sort it | |
594 | * using sort_in_merge_order. | |
595 | */ | |
a3437b8c | 596 | base = reversed->item; |
a3437b8c | 597 | } else { |
17ebe977 PB |
598 | /* |
599 | * Otherwise, we search for the base of the list. | |
600 | */ | |
601 | ret = find_base_for_list(reversed, &base); | |
602 | if (ret) | |
a3437b8c | 603 | return ret; |
17ebe977 | 604 | if (base) |
a3437b8c | 605 | base->object.flags |= BOUNDARY; |
a3437b8c JS |
606 | |
607 | while (reversed) { | |
eff19d5e JS |
608 | struct commit * next = pop_commit(&reversed); |
609 | ||
ff9206e7 | 610 | if (!(next->object.flags & VISITED) && next!=base) { |
eff19d5e JS |
611 | sort_first_epoch(next, &stack); |
612 | if (reversed) { | |
613 | /* | |
614 | * If we have more commits | |
615 | * to push, then the first | |
616 | * push for the next parent may | |
617 | * (or may * not) represent a | |
618 | * discontinuity with respect | |
619 | * to the parent currently on | |
620 | * the top of the stack. | |
621 | * | |
622 | * Mark it for checking here, | |
623 | * and check it with the next | |
624 | * push. See sort_first_epoch() | |
625 | * for more details. | |
626 | */ | |
627 | stack->item->object.flags |= DISCONTINUITY; | |
628 | } | |
a3437b8c JS |
629 | } |
630 | } | |
631 | ||
170774a3 | 632 | action = emit_stack(&stack, emitter, (base==NULL)); |
a3437b8c JS |
633 | } |
634 | ||
635 | if (base && (action != STOP)) { | |
636 | ret = sort_in_merge_order(base, emitter); | |
637 | } | |
638 | ||
639 | return ret; | |
640 | } |