]> git.ipfire.org Git - thirdparty/git.git/blob - shallow.c
shallow.c: don't free unallocated slabs
[thirdparty/git.git] / shallow.c
1 #include "cache.h"
2 #include "repository.h"
3 #include "tempfile.h"
4 #include "lockfile.h"
5 #include "object-store.h"
6 #include "commit.h"
7 #include "tag.h"
8 #include "pkt-line.h"
9 #include "remote.h"
10 #include "refs.h"
11 #include "sha1-array.h"
12 #include "diff.h"
13 #include "revision.h"
14 #include "commit-slab.h"
15 #include "revision.h"
16 #include "list-objects.h"
17 #include "commit-slab.h"
18 #include "repository.h"
19
20 void set_alternate_shallow_file(struct repository *r, const char *path, int override)
21 {
22 if (r->parsed_objects->is_shallow != -1)
23 BUG("is_repository_shallow must not be called before set_alternate_shallow_file");
24 if (r->parsed_objects->alternate_shallow_file && !override)
25 return;
26 free(r->parsed_objects->alternate_shallow_file);
27 r->parsed_objects->alternate_shallow_file = xstrdup_or_null(path);
28 }
29
30 int register_shallow(struct repository *r, const struct object_id *oid)
31 {
32 struct commit_graft *graft =
33 xmalloc(sizeof(struct commit_graft));
34 struct commit *commit = lookup_commit(the_repository, oid);
35
36 oidcpy(&graft->oid, oid);
37 graft->nr_parent = -1;
38 if (commit && commit->object.parsed)
39 commit->parents = NULL;
40 return register_commit_graft(r, graft, 0);
41 }
42
43 int is_repository_shallow(struct repository *r)
44 {
45 FILE *fp;
46 char buf[1024];
47 const char *path = r->parsed_objects->alternate_shallow_file;
48
49 if (r->parsed_objects->is_shallow >= 0)
50 return r->parsed_objects->is_shallow;
51
52 if (!path)
53 path = git_path_shallow(r);
54 /*
55 * fetch-pack sets '--shallow-file ""' as an indicator that no
56 * shallow file should be used. We could just open it and it
57 * will likely fail. But let's do an explicit check instead.
58 */
59 if (!*path || (fp = fopen(path, "r")) == NULL) {
60 stat_validity_clear(r->parsed_objects->shallow_stat);
61 r->parsed_objects->is_shallow = 0;
62 return r->parsed_objects->is_shallow;
63 }
64 stat_validity_update(r->parsed_objects->shallow_stat, fileno(fp));
65 r->parsed_objects->is_shallow = 1;
66
67 while (fgets(buf, sizeof(buf), fp)) {
68 struct object_id oid;
69 if (get_oid_hex(buf, &oid))
70 die("bad shallow line: %s", buf);
71 register_shallow(r, &oid);
72 }
73 fclose(fp);
74 return r->parsed_objects->is_shallow;
75 }
76
77 /*
78 * TODO: use "int" elemtype instead of "int *" when/if commit-slab
79 * supports a "valid" flag.
80 */
81 define_commit_slab(commit_depth, int *);
82 struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
83 int shallow_flag, int not_shallow_flag)
84 {
85 int i = 0, cur_depth = 0;
86 struct commit_list *result = NULL;
87 struct object_array stack = OBJECT_ARRAY_INIT;
88 struct commit *commit = NULL;
89 struct commit_graft *graft;
90 struct commit_depth depths;
91
92 init_commit_depth(&depths);
93 while (commit || i < heads->nr || stack.nr) {
94 struct commit_list *p;
95 if (!commit) {
96 if (i < heads->nr) {
97 int **depth_slot;
98 commit = (struct commit *)
99 deref_tag(the_repository,
100 heads->objects[i++].item,
101 NULL, 0);
102 if (!commit || commit->object.type != OBJ_COMMIT) {
103 commit = NULL;
104 continue;
105 }
106 depth_slot = commit_depth_at(&depths, commit);
107 if (!*depth_slot)
108 *depth_slot = xmalloc(sizeof(int));
109 **depth_slot = 0;
110 cur_depth = 0;
111 } else {
112 commit = (struct commit *)
113 object_array_pop(&stack);
114 cur_depth = **commit_depth_at(&depths, commit);
115 }
116 }
117 parse_commit_or_die(commit);
118 cur_depth++;
119 if ((depth != INFINITE_DEPTH && cur_depth >= depth) ||
120 (is_repository_shallow(the_repository) && !commit->parents &&
121 (graft = lookup_commit_graft(the_repository, &commit->object.oid)) != NULL &&
122 graft->nr_parent < 0)) {
123 commit_list_insert(commit, &result);
124 commit->object.flags |= shallow_flag;
125 commit = NULL;
126 continue;
127 }
128 commit->object.flags |= not_shallow_flag;
129 for (p = commit->parents, commit = NULL; p; p = p->next) {
130 int **depth_slot = commit_depth_at(&depths, p->item);
131 if (!*depth_slot) {
132 *depth_slot = xmalloc(sizeof(int));
133 **depth_slot = cur_depth;
134 } else {
135 if (cur_depth >= **depth_slot)
136 continue;
137 **depth_slot = cur_depth;
138 }
139 if (p->next)
140 add_object_array(&p->item->object,
141 NULL, &stack);
142 else {
143 commit = p->item;
144 cur_depth = **commit_depth_at(&depths, commit);
145 }
146 }
147 }
148 for (i = 0; i < depths.slab_count; i++) {
149 int j;
150
151 if (!depths.slab[i])
152 continue;
153 for (j = 0; j < depths.slab_size; j++)
154 free(depths.slab[i][j]);
155 }
156 clear_commit_depth(&depths);
157
158 return result;
159 }
160
161 static void show_commit(struct commit *commit, void *data)
162 {
163 commit_list_insert(commit, data);
164 }
165
166 /*
167 * Given rev-list arguments, run rev-list. All reachable commits
168 * except border ones are marked with not_shallow_flag. Border commits
169 * are marked with shallow_flag. The list of border/shallow commits
170 * are also returned.
171 */
172 struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av,
173 int shallow_flag,
174 int not_shallow_flag)
175 {
176 struct commit_list *result = NULL, *p;
177 struct commit_list *not_shallow_list = NULL;
178 struct rev_info revs;
179 int both_flags = shallow_flag | not_shallow_flag;
180
181 /*
182 * SHALLOW (excluded) and NOT_SHALLOW (included) should not be
183 * set at this point. But better be safe than sorry.
184 */
185 clear_object_flags(both_flags);
186
187 is_repository_shallow(the_repository); /* make sure shallows are read */
188
189 init_revisions(&revs, NULL);
190 save_commit_buffer = 0;
191 setup_revisions(ac, av, &revs, NULL);
192
193 if (prepare_revision_walk(&revs))
194 die("revision walk setup failed");
195 traverse_commit_list(&revs, show_commit, NULL, &not_shallow_list);
196
197 if (!not_shallow_list)
198 die("no commits selected for shallow requests");
199
200 /* Mark all reachable commits as NOT_SHALLOW */
201 for (p = not_shallow_list; p; p = p->next)
202 p->item->object.flags |= not_shallow_flag;
203
204 /*
205 * mark border commits SHALLOW + NOT_SHALLOW.
206 * We cannot clear NOT_SHALLOW right now. Imagine border
207 * commit A is processed first, then commit B, whose parent is
208 * A, later. If NOT_SHALLOW on A is cleared at step 1, B
209 * itself is considered border at step 2, which is incorrect.
210 */
211 for (p = not_shallow_list; p; p = p->next) {
212 struct commit *c = p->item;
213 struct commit_list *parent;
214
215 if (parse_commit(c))
216 die("unable to parse commit %s",
217 oid_to_hex(&c->object.oid));
218
219 for (parent = c->parents; parent; parent = parent->next)
220 if (!(parent->item->object.flags & not_shallow_flag)) {
221 c->object.flags |= shallow_flag;
222 commit_list_insert(c, &result);
223 break;
224 }
225 }
226 free_commit_list(not_shallow_list);
227
228 /*
229 * Now we can clean up NOT_SHALLOW on border commits. Having
230 * both flags set can confuse the caller.
231 */
232 for (p = result; p; p = p->next) {
233 struct object *o = &p->item->object;
234 if ((o->flags & both_flags) == both_flags)
235 o->flags &= ~not_shallow_flag;
236 }
237 return result;
238 }
239
240 static void check_shallow_file_for_update(struct repository *r)
241 {
242 if (r->parsed_objects->is_shallow == -1)
243 BUG("shallow must be initialized by now");
244
245 if (!stat_validity_check(r->parsed_objects->shallow_stat, git_path_shallow(the_repository)))
246 die("shallow file has changed since we read it");
247 }
248
249 #define SEEN_ONLY 1
250 #define VERBOSE 2
251 #define QUICK 4
252
253 struct write_shallow_data {
254 struct strbuf *out;
255 int use_pack_protocol;
256 int count;
257 unsigned flags;
258 };
259
260 static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
261 {
262 struct write_shallow_data *data = cb_data;
263 const char *hex = oid_to_hex(&graft->oid);
264 if (graft->nr_parent != -1)
265 return 0;
266 if (data->flags & QUICK) {
267 if (!has_object_file(&graft->oid))
268 return 0;
269 } else if (data->flags & SEEN_ONLY) {
270 struct commit *c = lookup_commit(the_repository, &graft->oid);
271 if (!c || !(c->object.flags & SEEN)) {
272 if (data->flags & VERBOSE)
273 printf("Removing %s from .git/shallow\n",
274 oid_to_hex(&c->object.oid));
275 return 0;
276 }
277 }
278 data->count++;
279 if (data->use_pack_protocol)
280 packet_buf_write(data->out, "shallow %s", hex);
281 else {
282 strbuf_addstr(data->out, hex);
283 strbuf_addch(data->out, '\n');
284 }
285 return 0;
286 }
287
288 static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol,
289 const struct oid_array *extra,
290 unsigned flags)
291 {
292 struct write_shallow_data data;
293 int i;
294 data.out = out;
295 data.use_pack_protocol = use_pack_protocol;
296 data.count = 0;
297 data.flags = flags;
298 for_each_commit_graft(write_one_shallow, &data);
299 if (!extra)
300 return data.count;
301 for (i = 0; i < extra->nr; i++) {
302 strbuf_addstr(out, oid_to_hex(extra->oid + i));
303 strbuf_addch(out, '\n');
304 data.count++;
305 }
306 return data.count;
307 }
308
309 int write_shallow_commits(struct strbuf *out, int use_pack_protocol,
310 const struct oid_array *extra)
311 {
312 return write_shallow_commits_1(out, use_pack_protocol, extra, 0);
313 }
314
315 const char *setup_temporary_shallow(const struct oid_array *extra)
316 {
317 struct tempfile *temp;
318 struct strbuf sb = STRBUF_INIT;
319
320 if (write_shallow_commits(&sb, 0, extra)) {
321 temp = xmks_tempfile(git_path("shallow_XXXXXX"));
322
323 if (write_in_full(temp->fd, sb.buf, sb.len) < 0 ||
324 close_tempfile_gently(temp) < 0)
325 die_errno("failed to write to %s",
326 get_tempfile_path(temp));
327 strbuf_release(&sb);
328 return get_tempfile_path(temp);
329 }
330 /*
331 * is_repository_shallow() sees empty string as "no shallow
332 * file".
333 */
334 return "";
335 }
336
337 void setup_alternate_shallow(struct lock_file *shallow_lock,
338 const char **alternate_shallow_file,
339 const struct oid_array *extra)
340 {
341 struct strbuf sb = STRBUF_INIT;
342 int fd;
343
344 fd = hold_lock_file_for_update(shallow_lock,
345 git_path_shallow(the_repository),
346 LOCK_DIE_ON_ERROR);
347 check_shallow_file_for_update(the_repository);
348 if (write_shallow_commits(&sb, 0, extra)) {
349 if (write_in_full(fd, sb.buf, sb.len) < 0)
350 die_errno("failed to write to %s",
351 get_lock_file_path(shallow_lock));
352 *alternate_shallow_file = get_lock_file_path(shallow_lock);
353 } else
354 /*
355 * is_repository_shallow() sees empty string as "no
356 * shallow file".
357 */
358 *alternate_shallow_file = "";
359 strbuf_release(&sb);
360 }
361
362 static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb)
363 {
364 int fd = *(int *)cb;
365 if (graft->nr_parent == -1)
366 packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid));
367 return 0;
368 }
369
370 void advertise_shallow_grafts(int fd)
371 {
372 if (!is_repository_shallow(the_repository))
373 return;
374 for_each_commit_graft(advertise_shallow_grafts_cb, &fd);
375 }
376
377 /*
378 * mark_reachable_objects() should have been run prior to this and all
379 * reachable commits marked as "SEEN", except when quick_prune is non-zero,
380 * in which case lines are excised from the shallow file if they refer to
381 * commits that do not exist (any longer).
382 */
383 void prune_shallow(unsigned options)
384 {
385 struct lock_file shallow_lock = LOCK_INIT;
386 struct strbuf sb = STRBUF_INIT;
387 unsigned flags = SEEN_ONLY;
388 int fd;
389
390 if (options & PRUNE_QUICK)
391 flags |= QUICK;
392
393 if (options & PRUNE_SHOW_ONLY) {
394 flags |= VERBOSE;
395 write_shallow_commits_1(&sb, 0, NULL, flags);
396 strbuf_release(&sb);
397 return;
398 }
399 fd = hold_lock_file_for_update(&shallow_lock,
400 git_path_shallow(the_repository),
401 LOCK_DIE_ON_ERROR);
402 check_shallow_file_for_update(the_repository);
403 if (write_shallow_commits_1(&sb, 0, NULL, flags)) {
404 if (write_in_full(fd, sb.buf, sb.len) < 0)
405 die_errno("failed to write to %s",
406 get_lock_file_path(&shallow_lock));
407 commit_lock_file(&shallow_lock);
408 } else {
409 unlink(git_path_shallow(the_repository));
410 rollback_lock_file(&shallow_lock);
411 }
412 strbuf_release(&sb);
413 }
414
415 struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW);
416
417 /*
418 * Step 1, split sender shallow commits into "ours" and "theirs"
419 * Step 2, clean "ours" based on .git/shallow
420 */
421 void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa)
422 {
423 int i;
424 trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n");
425 memset(info, 0, sizeof(*info));
426 info->shallow = sa;
427 if (!sa)
428 return;
429 ALLOC_ARRAY(info->ours, sa->nr);
430 ALLOC_ARRAY(info->theirs, sa->nr);
431 for (i = 0; i < sa->nr; i++) {
432 if (has_object_file(sa->oid + i)) {
433 struct commit_graft *graft;
434 graft = lookup_commit_graft(the_repository,
435 &sa->oid[i]);
436 if (graft && graft->nr_parent < 0)
437 continue;
438 info->ours[info->nr_ours++] = i;
439 } else
440 info->theirs[info->nr_theirs++] = i;
441 }
442 }
443
444 void clear_shallow_info(struct shallow_info *info)
445 {
446 free(info->ours);
447 free(info->theirs);
448 }
449
450 /* Step 4, remove non-existent ones in "theirs" after getting the pack */
451
452 void remove_nonexistent_theirs_shallow(struct shallow_info *info)
453 {
454 struct object_id *oid = info->shallow->oid;
455 int i, dst;
456 trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n");
457 for (i = dst = 0; i < info->nr_theirs; i++) {
458 if (i != dst)
459 info->theirs[dst] = info->theirs[i];
460 if (has_object_file(oid + info->theirs[i]))
461 dst++;
462 }
463 info->nr_theirs = dst;
464 }
465
466 define_commit_slab(ref_bitmap, uint32_t *);
467
468 #define POOL_SIZE (512 * 1024)
469
470 struct paint_info {
471 struct ref_bitmap ref_bitmap;
472 unsigned nr_bits;
473 char **pools;
474 char *free, *end;
475 unsigned pool_count;
476 };
477
478 static uint32_t *paint_alloc(struct paint_info *info)
479 {
480 unsigned nr = DIV_ROUND_UP(info->nr_bits, 32);
481 unsigned size = nr * sizeof(uint32_t);
482 void *p;
483 if (!info->pool_count || size > info->end - info->free) {
484 if (size > POOL_SIZE)
485 BUG("pool size too small for %d in paint_alloc()",
486 size);
487 info->pool_count++;
488 REALLOC_ARRAY(info->pools, info->pool_count);
489 info->free = xmalloc(POOL_SIZE);
490 info->pools[info->pool_count - 1] = info->free;
491 info->end = info->free + POOL_SIZE;
492 }
493 p = info->free;
494 info->free += size;
495 return p;
496 }
497
498 /*
499 * Given a commit SHA-1, walk down to parents until either SEEN,
500 * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for
501 * all walked commits.
502 */
503 static void paint_down(struct paint_info *info, const struct object_id *oid,
504 unsigned int id)
505 {
506 unsigned int i, nr;
507 struct commit_list *head = NULL;
508 int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32);
509 size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr);
510 struct commit *c = lookup_commit_reference_gently(the_repository, oid,
511 1);
512 uint32_t *tmp; /* to be freed before return */
513 uint32_t *bitmap;
514
515 if (!c)
516 return;
517
518 tmp = xmalloc(bitmap_size);
519 bitmap = paint_alloc(info);
520 memset(bitmap, 0, bitmap_size);
521 bitmap[id / 32] |= (1U << (id % 32));
522 commit_list_insert(c, &head);
523 while (head) {
524 struct commit_list *p;
525 struct commit *c = pop_commit(&head);
526 uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c);
527
528 /* XXX check "UNINTERESTING" from pack bitmaps if available */
529 if (c->object.flags & (SEEN | UNINTERESTING))
530 continue;
531 else
532 c->object.flags |= SEEN;
533
534 if (*refs == NULL)
535 *refs = bitmap;
536 else {
537 memcpy(tmp, *refs, bitmap_size);
538 for (i = 0; i < bitmap_nr; i++)
539 tmp[i] |= bitmap[i];
540 if (memcmp(tmp, *refs, bitmap_size)) {
541 *refs = paint_alloc(info);
542 memcpy(*refs, tmp, bitmap_size);
543 }
544 }
545
546 if (c->object.flags & BOTTOM)
547 continue;
548
549 if (parse_commit(c))
550 die("unable to parse commit %s",
551 oid_to_hex(&c->object.oid));
552
553 for (p = c->parents; p; p = p->next) {
554 if (p->item->object.flags & SEEN)
555 continue;
556 commit_list_insert(p->item, &head);
557 }
558 }
559
560 nr = get_max_object_index();
561 for (i = 0; i < nr; i++) {
562 struct object *o = get_indexed_object(i);
563 if (o && o->type == OBJ_COMMIT)
564 o->flags &= ~SEEN;
565 }
566
567 free(tmp);
568 }
569
570 static int mark_uninteresting(const char *refname, const struct object_id *oid,
571 int flags, void *cb_data)
572 {
573 struct commit *commit = lookup_commit_reference_gently(the_repository,
574 oid, 1);
575 if (!commit)
576 return 0;
577 commit->object.flags |= UNINTERESTING;
578 mark_parents_uninteresting(commit);
579 return 0;
580 }
581
582 static void post_assign_shallow(struct shallow_info *info,
583 struct ref_bitmap *ref_bitmap,
584 int *ref_status);
585 /*
586 * Step 6(+7), associate shallow commits with new refs
587 *
588 * info->ref must be initialized before calling this function.
589 *
590 * If used is not NULL, it's an array of info->shallow->nr
591 * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the
592 * m-th shallow commit from info->shallow.
593 *
594 * If used is NULL, "ours" and "theirs" are updated. And if ref_status
595 * is not NULL it's an array of ref->nr ints. ref_status[i] is true if
596 * the ref needs some shallow commits from either info->ours or
597 * info->theirs.
598 */
599 void assign_shallow_commits_to_refs(struct shallow_info *info,
600 uint32_t **used, int *ref_status)
601 {
602 struct object_id *oid = info->shallow->oid;
603 struct oid_array *ref = info->ref;
604 unsigned int i, nr;
605 int *shallow, nr_shallow = 0;
606 struct paint_info pi;
607
608 trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n");
609 ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs);
610 for (i = 0; i < info->nr_ours; i++)
611 shallow[nr_shallow++] = info->ours[i];
612 for (i = 0; i < info->nr_theirs; i++)
613 shallow[nr_shallow++] = info->theirs[i];
614
615 /*
616 * Prepare the commit graph to track what refs can reach what
617 * (new) shallow commits.
618 */
619 nr = get_max_object_index();
620 for (i = 0; i < nr; i++) {
621 struct object *o = get_indexed_object(i);
622 if (!o || o->type != OBJ_COMMIT)
623 continue;
624
625 o->flags &= ~(UNINTERESTING | BOTTOM | SEEN);
626 }
627
628 memset(&pi, 0, sizeof(pi));
629 init_ref_bitmap(&pi.ref_bitmap);
630 pi.nr_bits = ref->nr;
631
632 /*
633 * "--not --all" to cut short the traversal if new refs
634 * connect to old refs. If not (e.g. force ref updates) it'll
635 * have to go down to the current shallow commits.
636 */
637 head_ref(mark_uninteresting, NULL);
638 for_each_ref(mark_uninteresting, NULL);
639
640 /* Mark potential bottoms so we won't go out of bound */
641 for (i = 0; i < nr_shallow; i++) {
642 struct commit *c = lookup_commit(the_repository,
643 &oid[shallow[i]]);
644 c->object.flags |= BOTTOM;
645 }
646
647 for (i = 0; i < ref->nr; i++)
648 paint_down(&pi, ref->oid + i, i);
649
650 if (used) {
651 int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t);
652 memset(used, 0, sizeof(*used) * info->shallow->nr);
653 for (i = 0; i < nr_shallow; i++) {
654 const struct commit *c = lookup_commit(the_repository,
655 &oid[shallow[i]]);
656 uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c);
657 if (*map)
658 used[shallow[i]] = xmemdupz(*map, bitmap_size);
659 }
660 /*
661 * unreachable shallow commits are not removed from
662 * "ours" and "theirs". The user is supposed to run
663 * step 7 on every ref separately and not trust "ours"
664 * and "theirs" any more.
665 */
666 } else
667 post_assign_shallow(info, &pi.ref_bitmap, ref_status);
668
669 clear_ref_bitmap(&pi.ref_bitmap);
670 for (i = 0; i < pi.pool_count; i++)
671 free(pi.pools[i]);
672 free(pi.pools);
673 free(shallow);
674 }
675
676 struct commit_array {
677 struct commit **commits;
678 int nr, alloc;
679 };
680
681 static int add_ref(const char *refname, const struct object_id *oid,
682 int flags, void *cb_data)
683 {
684 struct commit_array *ca = cb_data;
685 ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc);
686 ca->commits[ca->nr] = lookup_commit_reference_gently(the_repository,
687 oid, 1);
688 if (ca->commits[ca->nr])
689 ca->nr++;
690 return 0;
691 }
692
693 static void update_refstatus(int *ref_status, int nr, uint32_t *bitmap)
694 {
695 unsigned int i;
696 if (!ref_status)
697 return;
698 for (i = 0; i < nr; i++)
699 if (bitmap[i / 32] & (1U << (i % 32)))
700 ref_status[i]++;
701 }
702
703 /*
704 * Step 7, reachability test on "ours" at commit level
705 */
706 static void post_assign_shallow(struct shallow_info *info,
707 struct ref_bitmap *ref_bitmap,
708 int *ref_status)
709 {
710 struct object_id *oid = info->shallow->oid;
711 struct commit *c;
712 uint32_t **bitmap;
713 int dst, i, j;
714 int bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32);
715 struct commit_array ca;
716
717 trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n");
718 if (ref_status)
719 memset(ref_status, 0, sizeof(*ref_status) * info->ref->nr);
720
721 /* Remove unreachable shallow commits from "theirs" */
722 for (i = dst = 0; i < info->nr_theirs; i++) {
723 if (i != dst)
724 info->theirs[dst] = info->theirs[i];
725 c = lookup_commit(the_repository, &oid[info->theirs[i]]);
726 bitmap = ref_bitmap_at(ref_bitmap, c);
727 if (!*bitmap)
728 continue;
729 for (j = 0; j < bitmap_nr; j++)
730 if (bitmap[0][j]) {
731 update_refstatus(ref_status, info->ref->nr, *bitmap);
732 dst++;
733 break;
734 }
735 }
736 info->nr_theirs = dst;
737
738 memset(&ca, 0, sizeof(ca));
739 head_ref(add_ref, &ca);
740 for_each_ref(add_ref, &ca);
741
742 /* Remove unreachable shallow commits from "ours" */
743 for (i = dst = 0; i < info->nr_ours; i++) {
744 if (i != dst)
745 info->ours[dst] = info->ours[i];
746 c = lookup_commit(the_repository, &oid[info->ours[i]]);
747 bitmap = ref_bitmap_at(ref_bitmap, c);
748 if (!*bitmap)
749 continue;
750 for (j = 0; j < bitmap_nr; j++)
751 if (bitmap[0][j] &&
752 /* Step 7, reachability test at commit level */
753 !in_merge_bases_many(c, ca.nr, ca.commits)) {
754 update_refstatus(ref_status, info->ref->nr, *bitmap);
755 dst++;
756 break;
757 }
758 }
759 info->nr_ours = dst;
760
761 free(ca.commits);
762 }
763
764 /* (Delayed) step 7, reachability test at commit level */
765 int delayed_reachability_test(struct shallow_info *si, int c)
766 {
767 if (si->need_reachability_test[c]) {
768 struct commit *commit = lookup_commit(the_repository,
769 &si->shallow->oid[c]);
770
771 if (!si->commits) {
772 struct commit_array ca;
773
774 memset(&ca, 0, sizeof(ca));
775 head_ref(add_ref, &ca);
776 for_each_ref(add_ref, &ca);
777 si->commits = ca.commits;
778 si->nr_commits = ca.nr;
779 }
780
781 si->reachable[c] = in_merge_bases_many(commit,
782 si->nr_commits,
783 si->commits);
784 si->need_reachability_test[c] = 0;
785 }
786 return si->reachable[c];
787 }