]> git.ipfire.org Git - thirdparty/git.git/blob - shallow.c
Merge branch 'pm/p4-auto-delete-named-temporary'
[thirdparty/git.git] / shallow.c
1 #include "cache.h"
2 #include "repository.h"
3 #include "tempfile.h"
4 #include "lockfile.h"
5 #include "object-store.h"
6 #include "commit.h"
7 #include "tag.h"
8 #include "pkt-line.h"
9 #include "remote.h"
10 #include "refs.h"
11 #include "sha1-array.h"
12 #include "diff.h"
13 #include "revision.h"
14 #include "commit-slab.h"
15 #include "revision.h"
16 #include "list-objects.h"
17 #include "commit-slab.h"
18 #include "repository.h"
19 #include "commit-reach.h"
20
21 void set_alternate_shallow_file(struct repository *r, const char *path, int override)
22 {
23 if (r->parsed_objects->is_shallow != -1)
24 BUG("is_repository_shallow must not be called before set_alternate_shallow_file");
25 if (r->parsed_objects->alternate_shallow_file && !override)
26 return;
27 free(r->parsed_objects->alternate_shallow_file);
28 r->parsed_objects->alternate_shallow_file = xstrdup_or_null(path);
29 }
30
31 int register_shallow(struct repository *r, const struct object_id *oid)
32 {
33 struct commit_graft *graft =
34 xmalloc(sizeof(struct commit_graft));
35 struct commit *commit = lookup_commit(the_repository, oid);
36
37 oidcpy(&graft->oid, oid);
38 graft->nr_parent = -1;
39 if (commit && commit->object.parsed)
40 commit->parents = NULL;
41 return register_commit_graft(r, graft, 0);
42 }
43
44 int is_repository_shallow(struct repository *r)
45 {
46 /*
47 * NEEDSWORK: This function updates
48 * r->parsed_objects->{is_shallow,shallow_stat} as a side effect but
49 * there is no corresponding function to clear them when the shallow
50 * file is updated.
51 */
52
53 FILE *fp;
54 char buf[1024];
55 const char *path = r->parsed_objects->alternate_shallow_file;
56
57 if (r->parsed_objects->is_shallow >= 0)
58 return r->parsed_objects->is_shallow;
59
60 if (!path)
61 path = git_path_shallow(r);
62 /*
63 * fetch-pack sets '--shallow-file ""' as an indicator that no
64 * shallow file should be used. We could just open it and it
65 * will likely fail. But let's do an explicit check instead.
66 */
67 if (!*path || (fp = fopen(path, "r")) == NULL) {
68 stat_validity_clear(r->parsed_objects->shallow_stat);
69 r->parsed_objects->is_shallow = 0;
70 return r->parsed_objects->is_shallow;
71 }
72 stat_validity_update(r->parsed_objects->shallow_stat, fileno(fp));
73 r->parsed_objects->is_shallow = 1;
74
75 while (fgets(buf, sizeof(buf), fp)) {
76 struct object_id oid;
77 if (get_oid_hex(buf, &oid))
78 die("bad shallow line: %s", buf);
79 register_shallow(r, &oid);
80 }
81 fclose(fp);
82 return r->parsed_objects->is_shallow;
83 }
84
85 /*
86 * TODO: use "int" elemtype instead of "int *" when/if commit-slab
87 * supports a "valid" flag.
88 */
89 define_commit_slab(commit_depth, int *);
90 struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
91 int shallow_flag, int not_shallow_flag)
92 {
93 int i = 0, cur_depth = 0;
94 struct commit_list *result = NULL;
95 struct object_array stack = OBJECT_ARRAY_INIT;
96 struct commit *commit = NULL;
97 struct commit_graft *graft;
98 struct commit_depth depths;
99
100 init_commit_depth(&depths);
101 while (commit || i < heads->nr || stack.nr) {
102 struct commit_list *p;
103 if (!commit) {
104 if (i < heads->nr) {
105 int **depth_slot;
106 commit = (struct commit *)
107 deref_tag(the_repository,
108 heads->objects[i++].item,
109 NULL, 0);
110 if (!commit || commit->object.type != OBJ_COMMIT) {
111 commit = NULL;
112 continue;
113 }
114 depth_slot = commit_depth_at(&depths, commit);
115 if (!*depth_slot)
116 *depth_slot = xmalloc(sizeof(int));
117 **depth_slot = 0;
118 cur_depth = 0;
119 } else {
120 commit = (struct commit *)
121 object_array_pop(&stack);
122 cur_depth = **commit_depth_at(&depths, commit);
123 }
124 }
125 parse_commit_or_die(commit);
126 cur_depth++;
127 if ((depth != INFINITE_DEPTH && cur_depth >= depth) ||
128 (is_repository_shallow(the_repository) && !commit->parents &&
129 (graft = lookup_commit_graft(the_repository, &commit->object.oid)) != NULL &&
130 graft->nr_parent < 0)) {
131 commit_list_insert(commit, &result);
132 commit->object.flags |= shallow_flag;
133 commit = NULL;
134 continue;
135 }
136 commit->object.flags |= not_shallow_flag;
137 for (p = commit->parents, commit = NULL; p; p = p->next) {
138 int **depth_slot = commit_depth_at(&depths, p->item);
139 if (!*depth_slot) {
140 *depth_slot = xmalloc(sizeof(int));
141 **depth_slot = cur_depth;
142 } else {
143 if (cur_depth >= **depth_slot)
144 continue;
145 **depth_slot = cur_depth;
146 }
147 if (p->next)
148 add_object_array(&p->item->object,
149 NULL, &stack);
150 else {
151 commit = p->item;
152 cur_depth = **commit_depth_at(&depths, commit);
153 }
154 }
155 }
156 for (i = 0; i < depths.slab_count; i++) {
157 int j;
158
159 if (!depths.slab[i])
160 continue;
161 for (j = 0; j < depths.slab_size; j++)
162 free(depths.slab[i][j]);
163 }
164 clear_commit_depth(&depths);
165
166 return result;
167 }
168
169 static void show_commit(struct commit *commit, void *data)
170 {
171 commit_list_insert(commit, data);
172 }
173
174 /*
175 * Given rev-list arguments, run rev-list. All reachable commits
176 * except border ones are marked with not_shallow_flag. Border commits
177 * are marked with shallow_flag. The list of border/shallow commits
178 * are also returned.
179 */
180 struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av,
181 int shallow_flag,
182 int not_shallow_flag)
183 {
184 struct commit_list *result = NULL, *p;
185 struct commit_list *not_shallow_list = NULL;
186 struct rev_info revs;
187 int both_flags = shallow_flag | not_shallow_flag;
188
189 /*
190 * SHALLOW (excluded) and NOT_SHALLOW (included) should not be
191 * set at this point. But better be safe than sorry.
192 */
193 clear_object_flags(both_flags);
194
195 is_repository_shallow(the_repository); /* make sure shallows are read */
196
197 repo_init_revisions(the_repository, &revs, NULL);
198 save_commit_buffer = 0;
199 setup_revisions(ac, av, &revs, NULL);
200
201 if (prepare_revision_walk(&revs))
202 die("revision walk setup failed");
203 traverse_commit_list(&revs, show_commit, NULL, &not_shallow_list);
204
205 if (!not_shallow_list)
206 die("no commits selected for shallow requests");
207
208 /* Mark all reachable commits as NOT_SHALLOW */
209 for (p = not_shallow_list; p; p = p->next)
210 p->item->object.flags |= not_shallow_flag;
211
212 /*
213 * mark border commits SHALLOW + NOT_SHALLOW.
214 * We cannot clear NOT_SHALLOW right now. Imagine border
215 * commit A is processed first, then commit B, whose parent is
216 * A, later. If NOT_SHALLOW on A is cleared at step 1, B
217 * itself is considered border at step 2, which is incorrect.
218 */
219 for (p = not_shallow_list; p; p = p->next) {
220 struct commit *c = p->item;
221 struct commit_list *parent;
222
223 if (parse_commit(c))
224 die("unable to parse commit %s",
225 oid_to_hex(&c->object.oid));
226
227 for (parent = c->parents; parent; parent = parent->next)
228 if (!(parent->item->object.flags & not_shallow_flag)) {
229 c->object.flags |= shallow_flag;
230 commit_list_insert(c, &result);
231 break;
232 }
233 }
234 free_commit_list(not_shallow_list);
235
236 /*
237 * Now we can clean up NOT_SHALLOW on border commits. Having
238 * both flags set can confuse the caller.
239 */
240 for (p = result; p; p = p->next) {
241 struct object *o = &p->item->object;
242 if ((o->flags & both_flags) == both_flags)
243 o->flags &= ~not_shallow_flag;
244 }
245 return result;
246 }
247
248 static void check_shallow_file_for_update(struct repository *r)
249 {
250 if (r->parsed_objects->is_shallow == -1)
251 BUG("shallow must be initialized by now");
252
253 if (!stat_validity_check(r->parsed_objects->shallow_stat,
254 git_path_shallow(r)))
255 die("shallow file has changed since we read it");
256 }
257
258 #define SEEN_ONLY 1
259 #define VERBOSE 2
260 #define QUICK 4
261
262 struct write_shallow_data {
263 struct strbuf *out;
264 int use_pack_protocol;
265 int count;
266 unsigned flags;
267 };
268
269 static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
270 {
271 struct write_shallow_data *data = cb_data;
272 const char *hex = oid_to_hex(&graft->oid);
273 if (graft->nr_parent != -1)
274 return 0;
275 if (data->flags & QUICK) {
276 if (!has_object_file(&graft->oid))
277 return 0;
278 } else if (data->flags & SEEN_ONLY) {
279 struct commit *c = lookup_commit(the_repository, &graft->oid);
280 if (!c || !(c->object.flags & SEEN)) {
281 if (data->flags & VERBOSE)
282 printf("Removing %s from .git/shallow\n",
283 oid_to_hex(&c->object.oid));
284 return 0;
285 }
286 }
287 data->count++;
288 if (data->use_pack_protocol)
289 packet_buf_write(data->out, "shallow %s", hex);
290 else {
291 strbuf_addstr(data->out, hex);
292 strbuf_addch(data->out, '\n');
293 }
294 return 0;
295 }
296
297 static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol,
298 const struct oid_array *extra,
299 unsigned flags)
300 {
301 struct write_shallow_data data;
302 int i;
303 data.out = out;
304 data.use_pack_protocol = use_pack_protocol;
305 data.count = 0;
306 data.flags = flags;
307 for_each_commit_graft(write_one_shallow, &data);
308 if (!extra)
309 return data.count;
310 for (i = 0; i < extra->nr; i++) {
311 strbuf_addstr(out, oid_to_hex(extra->oid + i));
312 strbuf_addch(out, '\n');
313 data.count++;
314 }
315 return data.count;
316 }
317
318 int write_shallow_commits(struct strbuf *out, int use_pack_protocol,
319 const struct oid_array *extra)
320 {
321 return write_shallow_commits_1(out, use_pack_protocol, extra, 0);
322 }
323
324 const char *setup_temporary_shallow(const struct oid_array *extra)
325 {
326 struct tempfile *temp;
327 struct strbuf sb = STRBUF_INIT;
328
329 if (write_shallow_commits(&sb, 0, extra)) {
330 temp = xmks_tempfile(git_path("shallow_XXXXXX"));
331
332 if (write_in_full(temp->fd, sb.buf, sb.len) < 0 ||
333 close_tempfile_gently(temp) < 0)
334 die_errno("failed to write to %s",
335 get_tempfile_path(temp));
336 strbuf_release(&sb);
337 return get_tempfile_path(temp);
338 }
339 /*
340 * is_repository_shallow() sees empty string as "no shallow
341 * file".
342 */
343 return "";
344 }
345
346 void setup_alternate_shallow(struct lock_file *shallow_lock,
347 const char **alternate_shallow_file,
348 const struct oid_array *extra)
349 {
350 struct strbuf sb = STRBUF_INIT;
351 int fd;
352
353 fd = hold_lock_file_for_update(shallow_lock,
354 git_path_shallow(the_repository),
355 LOCK_DIE_ON_ERROR);
356 check_shallow_file_for_update(the_repository);
357 if (write_shallow_commits(&sb, 0, extra)) {
358 if (write_in_full(fd, sb.buf, sb.len) < 0)
359 die_errno("failed to write to %s",
360 get_lock_file_path(shallow_lock));
361 *alternate_shallow_file = get_lock_file_path(shallow_lock);
362 } else
363 /*
364 * is_repository_shallow() sees empty string as "no
365 * shallow file".
366 */
367 *alternate_shallow_file = "";
368 strbuf_release(&sb);
369 }
370
371 static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb)
372 {
373 int fd = *(int *)cb;
374 if (graft->nr_parent == -1)
375 packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid));
376 return 0;
377 }
378
379 void advertise_shallow_grafts(int fd)
380 {
381 if (!is_repository_shallow(the_repository))
382 return;
383 for_each_commit_graft(advertise_shallow_grafts_cb, &fd);
384 }
385
386 /*
387 * mark_reachable_objects() should have been run prior to this and all
388 * reachable commits marked as "SEEN", except when quick_prune is non-zero,
389 * in which case lines are excised from the shallow file if they refer to
390 * commits that do not exist (any longer).
391 */
392 void prune_shallow(unsigned options)
393 {
394 struct lock_file shallow_lock = LOCK_INIT;
395 struct strbuf sb = STRBUF_INIT;
396 unsigned flags = SEEN_ONLY;
397 int fd;
398
399 if (options & PRUNE_QUICK)
400 flags |= QUICK;
401
402 if (options & PRUNE_SHOW_ONLY) {
403 flags |= VERBOSE;
404 write_shallow_commits_1(&sb, 0, NULL, flags);
405 strbuf_release(&sb);
406 return;
407 }
408 fd = hold_lock_file_for_update(&shallow_lock,
409 git_path_shallow(the_repository),
410 LOCK_DIE_ON_ERROR);
411 check_shallow_file_for_update(the_repository);
412 if (write_shallow_commits_1(&sb, 0, NULL, flags)) {
413 if (write_in_full(fd, sb.buf, sb.len) < 0)
414 die_errno("failed to write to %s",
415 get_lock_file_path(&shallow_lock));
416 commit_lock_file(&shallow_lock);
417 } else {
418 unlink(git_path_shallow(the_repository));
419 rollback_lock_file(&shallow_lock);
420 }
421 strbuf_release(&sb);
422 }
423
424 struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW);
425
426 /*
427 * Step 1, split sender shallow commits into "ours" and "theirs"
428 * Step 2, clean "ours" based on .git/shallow
429 */
430 void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa)
431 {
432 int i;
433 trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n");
434 memset(info, 0, sizeof(*info));
435 info->shallow = sa;
436 if (!sa)
437 return;
438 ALLOC_ARRAY(info->ours, sa->nr);
439 ALLOC_ARRAY(info->theirs, sa->nr);
440 for (i = 0; i < sa->nr; i++) {
441 if (has_object_file(sa->oid + i)) {
442 struct commit_graft *graft;
443 graft = lookup_commit_graft(the_repository,
444 &sa->oid[i]);
445 if (graft && graft->nr_parent < 0)
446 continue;
447 info->ours[info->nr_ours++] = i;
448 } else
449 info->theirs[info->nr_theirs++] = i;
450 }
451 }
452
453 void clear_shallow_info(struct shallow_info *info)
454 {
455 free(info->ours);
456 free(info->theirs);
457 }
458
459 /* Step 4, remove non-existent ones in "theirs" after getting the pack */
460
461 void remove_nonexistent_theirs_shallow(struct shallow_info *info)
462 {
463 struct object_id *oid = info->shallow->oid;
464 int i, dst;
465 trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n");
466 for (i = dst = 0; i < info->nr_theirs; i++) {
467 if (i != dst)
468 info->theirs[dst] = info->theirs[i];
469 if (has_object_file(oid + info->theirs[i]))
470 dst++;
471 }
472 info->nr_theirs = dst;
473 }
474
475 define_commit_slab(ref_bitmap, uint32_t *);
476
477 #define POOL_SIZE (512 * 1024)
478
479 struct paint_info {
480 struct ref_bitmap ref_bitmap;
481 unsigned nr_bits;
482 char **pools;
483 char *free, *end;
484 unsigned pool_count;
485 };
486
487 static uint32_t *paint_alloc(struct paint_info *info)
488 {
489 unsigned nr = DIV_ROUND_UP(info->nr_bits, 32);
490 unsigned size = nr * sizeof(uint32_t);
491 void *p;
492 if (!info->pool_count || size > info->end - info->free) {
493 if (size > POOL_SIZE)
494 BUG("pool size too small for %d in paint_alloc()",
495 size);
496 info->pool_count++;
497 REALLOC_ARRAY(info->pools, info->pool_count);
498 info->free = xmalloc(POOL_SIZE);
499 info->pools[info->pool_count - 1] = info->free;
500 info->end = info->free + POOL_SIZE;
501 }
502 p = info->free;
503 info->free += size;
504 return p;
505 }
506
507 /*
508 * Given a commit SHA-1, walk down to parents until either SEEN,
509 * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for
510 * all walked commits.
511 */
512 static void paint_down(struct paint_info *info, const struct object_id *oid,
513 unsigned int id)
514 {
515 unsigned int i, nr;
516 struct commit_list *head = NULL;
517 int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32);
518 size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr);
519 struct commit *c = lookup_commit_reference_gently(the_repository, oid,
520 1);
521 uint32_t *tmp; /* to be freed before return */
522 uint32_t *bitmap;
523
524 if (!c)
525 return;
526
527 tmp = xmalloc(bitmap_size);
528 bitmap = paint_alloc(info);
529 memset(bitmap, 0, bitmap_size);
530 bitmap[id / 32] |= (1U << (id % 32));
531 commit_list_insert(c, &head);
532 while (head) {
533 struct commit_list *p;
534 struct commit *c = pop_commit(&head);
535 uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c);
536
537 /* XXX check "UNINTERESTING" from pack bitmaps if available */
538 if (c->object.flags & (SEEN | UNINTERESTING))
539 continue;
540 else
541 c->object.flags |= SEEN;
542
543 if (*refs == NULL)
544 *refs = bitmap;
545 else {
546 memcpy(tmp, *refs, bitmap_size);
547 for (i = 0; i < bitmap_nr; i++)
548 tmp[i] |= bitmap[i];
549 if (memcmp(tmp, *refs, bitmap_size)) {
550 *refs = paint_alloc(info);
551 memcpy(*refs, tmp, bitmap_size);
552 }
553 }
554
555 if (c->object.flags & BOTTOM)
556 continue;
557
558 if (parse_commit(c))
559 die("unable to parse commit %s",
560 oid_to_hex(&c->object.oid));
561
562 for (p = c->parents; p; p = p->next) {
563 if (p->item->object.flags & SEEN)
564 continue;
565 commit_list_insert(p->item, &head);
566 }
567 }
568
569 nr = get_max_object_index();
570 for (i = 0; i < nr; i++) {
571 struct object *o = get_indexed_object(i);
572 if (o && o->type == OBJ_COMMIT)
573 o->flags &= ~SEEN;
574 }
575
576 free(tmp);
577 }
578
579 static int mark_uninteresting(const char *refname, const struct object_id *oid,
580 int flags, void *cb_data)
581 {
582 struct commit *commit = lookup_commit_reference_gently(the_repository,
583 oid, 1);
584 if (!commit)
585 return 0;
586 commit->object.flags |= UNINTERESTING;
587 mark_parents_uninteresting(commit);
588 return 0;
589 }
590
591 static void post_assign_shallow(struct shallow_info *info,
592 struct ref_bitmap *ref_bitmap,
593 int *ref_status);
594 /*
595 * Step 6(+7), associate shallow commits with new refs
596 *
597 * info->ref must be initialized before calling this function.
598 *
599 * If used is not NULL, it's an array of info->shallow->nr
600 * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the
601 * m-th shallow commit from info->shallow.
602 *
603 * If used is NULL, "ours" and "theirs" are updated. And if ref_status
604 * is not NULL it's an array of ref->nr ints. ref_status[i] is true if
605 * the ref needs some shallow commits from either info->ours or
606 * info->theirs.
607 */
608 void assign_shallow_commits_to_refs(struct shallow_info *info,
609 uint32_t **used, int *ref_status)
610 {
611 struct object_id *oid = info->shallow->oid;
612 struct oid_array *ref = info->ref;
613 unsigned int i, nr;
614 int *shallow, nr_shallow = 0;
615 struct paint_info pi;
616
617 trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n");
618 ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs);
619 for (i = 0; i < info->nr_ours; i++)
620 shallow[nr_shallow++] = info->ours[i];
621 for (i = 0; i < info->nr_theirs; i++)
622 shallow[nr_shallow++] = info->theirs[i];
623
624 /*
625 * Prepare the commit graph to track what refs can reach what
626 * (new) shallow commits.
627 */
628 nr = get_max_object_index();
629 for (i = 0; i < nr; i++) {
630 struct object *o = get_indexed_object(i);
631 if (!o || o->type != OBJ_COMMIT)
632 continue;
633
634 o->flags &= ~(UNINTERESTING | BOTTOM | SEEN);
635 }
636
637 memset(&pi, 0, sizeof(pi));
638 init_ref_bitmap(&pi.ref_bitmap);
639 pi.nr_bits = ref->nr;
640
641 /*
642 * "--not --all" to cut short the traversal if new refs
643 * connect to old refs. If not (e.g. force ref updates) it'll
644 * have to go down to the current shallow commits.
645 */
646 head_ref(mark_uninteresting, NULL);
647 for_each_ref(mark_uninteresting, NULL);
648
649 /* Mark potential bottoms so we won't go out of bound */
650 for (i = 0; i < nr_shallow; i++) {
651 struct commit *c = lookup_commit(the_repository,
652 &oid[shallow[i]]);
653 c->object.flags |= BOTTOM;
654 }
655
656 for (i = 0; i < ref->nr; i++)
657 paint_down(&pi, ref->oid + i, i);
658
659 if (used) {
660 int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t);
661 memset(used, 0, sizeof(*used) * info->shallow->nr);
662 for (i = 0; i < nr_shallow; i++) {
663 const struct commit *c = lookup_commit(the_repository,
664 &oid[shallow[i]]);
665 uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c);
666 if (*map)
667 used[shallow[i]] = xmemdupz(*map, bitmap_size);
668 }
669 /*
670 * unreachable shallow commits are not removed from
671 * "ours" and "theirs". The user is supposed to run
672 * step 7 on every ref separately and not trust "ours"
673 * and "theirs" any more.
674 */
675 } else
676 post_assign_shallow(info, &pi.ref_bitmap, ref_status);
677
678 clear_ref_bitmap(&pi.ref_bitmap);
679 for (i = 0; i < pi.pool_count; i++)
680 free(pi.pools[i]);
681 free(pi.pools);
682 free(shallow);
683 }
684
685 struct commit_array {
686 struct commit **commits;
687 int nr, alloc;
688 };
689
690 static int add_ref(const char *refname, const struct object_id *oid,
691 int flags, void *cb_data)
692 {
693 struct commit_array *ca = cb_data;
694 ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc);
695 ca->commits[ca->nr] = lookup_commit_reference_gently(the_repository,
696 oid, 1);
697 if (ca->commits[ca->nr])
698 ca->nr++;
699 return 0;
700 }
701
702 static void update_refstatus(int *ref_status, int nr, uint32_t *bitmap)
703 {
704 unsigned int i;
705 if (!ref_status)
706 return;
707 for (i = 0; i < nr; i++)
708 if (bitmap[i / 32] & (1U << (i % 32)))
709 ref_status[i]++;
710 }
711
712 /*
713 * Step 7, reachability test on "ours" at commit level
714 */
715 static void post_assign_shallow(struct shallow_info *info,
716 struct ref_bitmap *ref_bitmap,
717 int *ref_status)
718 {
719 struct object_id *oid = info->shallow->oid;
720 struct commit *c;
721 uint32_t **bitmap;
722 int dst, i, j;
723 int bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32);
724 struct commit_array ca;
725
726 trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n");
727 if (ref_status)
728 memset(ref_status, 0, sizeof(*ref_status) * info->ref->nr);
729
730 /* Remove unreachable shallow commits from "theirs" */
731 for (i = dst = 0; i < info->nr_theirs; i++) {
732 if (i != dst)
733 info->theirs[dst] = info->theirs[i];
734 c = lookup_commit(the_repository, &oid[info->theirs[i]]);
735 bitmap = ref_bitmap_at(ref_bitmap, c);
736 if (!*bitmap)
737 continue;
738 for (j = 0; j < bitmap_nr; j++)
739 if (bitmap[0][j]) {
740 update_refstatus(ref_status, info->ref->nr, *bitmap);
741 dst++;
742 break;
743 }
744 }
745 info->nr_theirs = dst;
746
747 memset(&ca, 0, sizeof(ca));
748 head_ref(add_ref, &ca);
749 for_each_ref(add_ref, &ca);
750
751 /* Remove unreachable shallow commits from "ours" */
752 for (i = dst = 0; i < info->nr_ours; i++) {
753 if (i != dst)
754 info->ours[dst] = info->ours[i];
755 c = lookup_commit(the_repository, &oid[info->ours[i]]);
756 bitmap = ref_bitmap_at(ref_bitmap, c);
757 if (!*bitmap)
758 continue;
759 for (j = 0; j < bitmap_nr; j++)
760 if (bitmap[0][j] &&
761 /* Step 7, reachability test at commit level */
762 !in_merge_bases_many(c, ca.nr, ca.commits)) {
763 update_refstatus(ref_status, info->ref->nr, *bitmap);
764 dst++;
765 break;
766 }
767 }
768 info->nr_ours = dst;
769
770 free(ca.commits);
771 }
772
773 /* (Delayed) step 7, reachability test at commit level */
774 int delayed_reachability_test(struct shallow_info *si, int c)
775 {
776 if (si->need_reachability_test[c]) {
777 struct commit *commit = lookup_commit(the_repository,
778 &si->shallow->oid[c]);
779
780 if (!si->commits) {
781 struct commit_array ca;
782
783 memset(&ca, 0, sizeof(ca));
784 head_ref(add_ref, &ca);
785 for_each_ref(add_ref, &ca);
786 si->commits = ca.commits;
787 si->nr_commits = ca.nr;
788 }
789
790 si->reachable[c] = in_merge_bases_many(commit,
791 si->nr_commits,
792 si->commits);
793 si->need_reachability_test[c] = 0;
794 }
795 return si->reachable[c];
796 }