]>
Commit | Line | Data |
---|---|---|
1 | #include "cache.h" | |
2 | #include "repository.h" | |
3 | #include "tempfile.h" | |
4 | #include "lockfile.h" | |
5 | #include "object-store.h" | |
6 | #include "commit.h" | |
7 | #include "tag.h" | |
8 | #include "pkt-line.h" | |
9 | #include "remote.h" | |
10 | #include "refs.h" | |
11 | #include "sha1-array.h" | |
12 | #include "diff.h" | |
13 | #include "revision.h" | |
14 | #include "commit-slab.h" | |
15 | #include "revision.h" | |
16 | #include "list-objects.h" | |
17 | #include "commit-slab.h" | |
18 | #include "repository.h" | |
19 | #include "commit-reach.h" | |
20 | ||
21 | void set_alternate_shallow_file(struct repository *r, const char *path, int override) | |
22 | { | |
23 | if (r->parsed_objects->is_shallow != -1) | |
24 | BUG("is_repository_shallow must not be called before set_alternate_shallow_file"); | |
25 | if (r->parsed_objects->alternate_shallow_file && !override) | |
26 | return; | |
27 | free(r->parsed_objects->alternate_shallow_file); | |
28 | r->parsed_objects->alternate_shallow_file = xstrdup_or_null(path); | |
29 | } | |
30 | ||
31 | int register_shallow(struct repository *r, const struct object_id *oid) | |
32 | { | |
33 | struct commit_graft *graft = | |
34 | xmalloc(sizeof(struct commit_graft)); | |
35 | struct commit *commit = lookup_commit(the_repository, oid); | |
36 | ||
37 | oidcpy(&graft->oid, oid); | |
38 | graft->nr_parent = -1; | |
39 | if (commit && commit->object.parsed) | |
40 | commit->parents = NULL; | |
41 | return register_commit_graft(r, graft, 0); | |
42 | } | |
43 | ||
44 | int is_repository_shallow(struct repository *r) | |
45 | { | |
46 | /* | |
47 | * NEEDSWORK: This function updates | |
48 | * r->parsed_objects->{is_shallow,shallow_stat} as a side effect but | |
49 | * there is no corresponding function to clear them when the shallow | |
50 | * file is updated. | |
51 | */ | |
52 | ||
53 | FILE *fp; | |
54 | char buf[1024]; | |
55 | const char *path = r->parsed_objects->alternate_shallow_file; | |
56 | ||
57 | if (r->parsed_objects->is_shallow >= 0) | |
58 | return r->parsed_objects->is_shallow; | |
59 | ||
60 | if (!path) | |
61 | path = git_path_shallow(r); | |
62 | /* | |
63 | * fetch-pack sets '--shallow-file ""' as an indicator that no | |
64 | * shallow file should be used. We could just open it and it | |
65 | * will likely fail. But let's do an explicit check instead. | |
66 | */ | |
67 | if (!*path || (fp = fopen(path, "r")) == NULL) { | |
68 | stat_validity_clear(r->parsed_objects->shallow_stat); | |
69 | r->parsed_objects->is_shallow = 0; | |
70 | return r->parsed_objects->is_shallow; | |
71 | } | |
72 | stat_validity_update(r->parsed_objects->shallow_stat, fileno(fp)); | |
73 | r->parsed_objects->is_shallow = 1; | |
74 | ||
75 | while (fgets(buf, sizeof(buf), fp)) { | |
76 | struct object_id oid; | |
77 | if (get_oid_hex(buf, &oid)) | |
78 | die("bad shallow line: %s", buf); | |
79 | register_shallow(r, &oid); | |
80 | } | |
81 | fclose(fp); | |
82 | return r->parsed_objects->is_shallow; | |
83 | } | |
84 | ||
85 | /* | |
86 | * TODO: use "int" elemtype instead of "int *" when/if commit-slab | |
87 | * supports a "valid" flag. | |
88 | */ | |
89 | define_commit_slab(commit_depth, int *); | |
90 | struct commit_list *get_shallow_commits(struct object_array *heads, int depth, | |
91 | int shallow_flag, int not_shallow_flag) | |
92 | { | |
93 | int i = 0, cur_depth = 0; | |
94 | struct commit_list *result = NULL; | |
95 | struct object_array stack = OBJECT_ARRAY_INIT; | |
96 | struct commit *commit = NULL; | |
97 | struct commit_graft *graft; | |
98 | struct commit_depth depths; | |
99 | ||
100 | init_commit_depth(&depths); | |
101 | while (commit || i < heads->nr || stack.nr) { | |
102 | struct commit_list *p; | |
103 | if (!commit) { | |
104 | if (i < heads->nr) { | |
105 | int **depth_slot; | |
106 | commit = (struct commit *) | |
107 | deref_tag(the_repository, | |
108 | heads->objects[i++].item, | |
109 | NULL, 0); | |
110 | if (!commit || commit->object.type != OBJ_COMMIT) { | |
111 | commit = NULL; | |
112 | continue; | |
113 | } | |
114 | depth_slot = commit_depth_at(&depths, commit); | |
115 | if (!*depth_slot) | |
116 | *depth_slot = xmalloc(sizeof(int)); | |
117 | **depth_slot = 0; | |
118 | cur_depth = 0; | |
119 | } else { | |
120 | commit = (struct commit *) | |
121 | object_array_pop(&stack); | |
122 | cur_depth = **commit_depth_at(&depths, commit); | |
123 | } | |
124 | } | |
125 | parse_commit_or_die(commit); | |
126 | cur_depth++; | |
127 | if ((depth != INFINITE_DEPTH && cur_depth >= depth) || | |
128 | (is_repository_shallow(the_repository) && !commit->parents && | |
129 | (graft = lookup_commit_graft(the_repository, &commit->object.oid)) != NULL && | |
130 | graft->nr_parent < 0)) { | |
131 | commit_list_insert(commit, &result); | |
132 | commit->object.flags |= shallow_flag; | |
133 | commit = NULL; | |
134 | continue; | |
135 | } | |
136 | commit->object.flags |= not_shallow_flag; | |
137 | for (p = commit->parents, commit = NULL; p; p = p->next) { | |
138 | int **depth_slot = commit_depth_at(&depths, p->item); | |
139 | if (!*depth_slot) { | |
140 | *depth_slot = xmalloc(sizeof(int)); | |
141 | **depth_slot = cur_depth; | |
142 | } else { | |
143 | if (cur_depth >= **depth_slot) | |
144 | continue; | |
145 | **depth_slot = cur_depth; | |
146 | } | |
147 | if (p->next) | |
148 | add_object_array(&p->item->object, | |
149 | NULL, &stack); | |
150 | else { | |
151 | commit = p->item; | |
152 | cur_depth = **commit_depth_at(&depths, commit); | |
153 | } | |
154 | } | |
155 | } | |
156 | for (i = 0; i < depths.slab_count; i++) { | |
157 | int j; | |
158 | ||
159 | for (j = 0; j < depths.slab_size; j++) | |
160 | free(depths.slab[i][j]); | |
161 | } | |
162 | clear_commit_depth(&depths); | |
163 | ||
164 | return result; | |
165 | } | |
166 | ||
167 | static void show_commit(struct commit *commit, void *data) | |
168 | { | |
169 | commit_list_insert(commit, data); | |
170 | } | |
171 | ||
172 | /* | |
173 | * Given rev-list arguments, run rev-list. All reachable commits | |
174 | * except border ones are marked with not_shallow_flag. Border commits | |
175 | * are marked with shallow_flag. The list of border/shallow commits | |
176 | * are also returned. | |
177 | */ | |
178 | struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av, | |
179 | int shallow_flag, | |
180 | int not_shallow_flag) | |
181 | { | |
182 | struct commit_list *result = NULL, *p; | |
183 | struct commit_list *not_shallow_list = NULL; | |
184 | struct rev_info revs; | |
185 | int both_flags = shallow_flag | not_shallow_flag; | |
186 | ||
187 | /* | |
188 | * SHALLOW (excluded) and NOT_SHALLOW (included) should not be | |
189 | * set at this point. But better be safe than sorry. | |
190 | */ | |
191 | clear_object_flags(both_flags); | |
192 | ||
193 | is_repository_shallow(the_repository); /* make sure shallows are read */ | |
194 | ||
195 | repo_init_revisions(the_repository, &revs, NULL); | |
196 | save_commit_buffer = 0; | |
197 | setup_revisions(ac, av, &revs, NULL); | |
198 | ||
199 | if (prepare_revision_walk(&revs)) | |
200 | die("revision walk setup failed"); | |
201 | traverse_commit_list(&revs, show_commit, NULL, ¬_shallow_list); | |
202 | ||
203 | if (!not_shallow_list) | |
204 | die("no commits selected for shallow requests"); | |
205 | ||
206 | /* Mark all reachable commits as NOT_SHALLOW */ | |
207 | for (p = not_shallow_list; p; p = p->next) | |
208 | p->item->object.flags |= not_shallow_flag; | |
209 | ||
210 | /* | |
211 | * mark border commits SHALLOW + NOT_SHALLOW. | |
212 | * We cannot clear NOT_SHALLOW right now. Imagine border | |
213 | * commit A is processed first, then commit B, whose parent is | |
214 | * A, later. If NOT_SHALLOW on A is cleared at step 1, B | |
215 | * itself is considered border at step 2, which is incorrect. | |
216 | */ | |
217 | for (p = not_shallow_list; p; p = p->next) { | |
218 | struct commit *c = p->item; | |
219 | struct commit_list *parent; | |
220 | ||
221 | if (parse_commit(c)) | |
222 | die("unable to parse commit %s", | |
223 | oid_to_hex(&c->object.oid)); | |
224 | ||
225 | for (parent = c->parents; parent; parent = parent->next) | |
226 | if (!(parent->item->object.flags & not_shallow_flag)) { | |
227 | c->object.flags |= shallow_flag; | |
228 | commit_list_insert(c, &result); | |
229 | break; | |
230 | } | |
231 | } | |
232 | free_commit_list(not_shallow_list); | |
233 | ||
234 | /* | |
235 | * Now we can clean up NOT_SHALLOW on border commits. Having | |
236 | * both flags set can confuse the caller. | |
237 | */ | |
238 | for (p = result; p; p = p->next) { | |
239 | struct object *o = &p->item->object; | |
240 | if ((o->flags & both_flags) == both_flags) | |
241 | o->flags &= ~not_shallow_flag; | |
242 | } | |
243 | return result; | |
244 | } | |
245 | ||
246 | static void check_shallow_file_for_update(struct repository *r) | |
247 | { | |
248 | if (r->parsed_objects->is_shallow == -1) | |
249 | BUG("shallow must be initialized by now"); | |
250 | ||
251 | if (!stat_validity_check(r->parsed_objects->shallow_stat, git_path_shallow(the_repository))) | |
252 | die("shallow file has changed since we read it"); | |
253 | } | |
254 | ||
255 | #define SEEN_ONLY 1 | |
256 | #define VERBOSE 2 | |
257 | #define QUICK 4 | |
258 | ||
259 | struct write_shallow_data { | |
260 | struct strbuf *out; | |
261 | int use_pack_protocol; | |
262 | int count; | |
263 | unsigned flags; | |
264 | }; | |
265 | ||
266 | static int write_one_shallow(const struct commit_graft *graft, void *cb_data) | |
267 | { | |
268 | struct write_shallow_data *data = cb_data; | |
269 | const char *hex = oid_to_hex(&graft->oid); | |
270 | if (graft->nr_parent != -1) | |
271 | return 0; | |
272 | if (data->flags & QUICK) { | |
273 | if (!has_object_file(&graft->oid)) | |
274 | return 0; | |
275 | } else if (data->flags & SEEN_ONLY) { | |
276 | struct commit *c = lookup_commit(the_repository, &graft->oid); | |
277 | if (!c || !(c->object.flags & SEEN)) { | |
278 | if (data->flags & VERBOSE) | |
279 | printf("Removing %s from .git/shallow\n", | |
280 | oid_to_hex(&c->object.oid)); | |
281 | return 0; | |
282 | } | |
283 | } | |
284 | data->count++; | |
285 | if (data->use_pack_protocol) | |
286 | packet_buf_write(data->out, "shallow %s", hex); | |
287 | else { | |
288 | strbuf_addstr(data->out, hex); | |
289 | strbuf_addch(data->out, '\n'); | |
290 | } | |
291 | return 0; | |
292 | } | |
293 | ||
294 | static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol, | |
295 | const struct oid_array *extra, | |
296 | unsigned flags) | |
297 | { | |
298 | struct write_shallow_data data; | |
299 | int i; | |
300 | data.out = out; | |
301 | data.use_pack_protocol = use_pack_protocol; | |
302 | data.count = 0; | |
303 | data.flags = flags; | |
304 | for_each_commit_graft(write_one_shallow, &data); | |
305 | if (!extra) | |
306 | return data.count; | |
307 | for (i = 0; i < extra->nr; i++) { | |
308 | strbuf_addstr(out, oid_to_hex(extra->oid + i)); | |
309 | strbuf_addch(out, '\n'); | |
310 | data.count++; | |
311 | } | |
312 | return data.count; | |
313 | } | |
314 | ||
315 | int write_shallow_commits(struct strbuf *out, int use_pack_protocol, | |
316 | const struct oid_array *extra) | |
317 | { | |
318 | return write_shallow_commits_1(out, use_pack_protocol, extra, 0); | |
319 | } | |
320 | ||
321 | const char *setup_temporary_shallow(const struct oid_array *extra) | |
322 | { | |
323 | struct tempfile *temp; | |
324 | struct strbuf sb = STRBUF_INIT; | |
325 | ||
326 | if (write_shallow_commits(&sb, 0, extra)) { | |
327 | temp = xmks_tempfile(git_path("shallow_XXXXXX")); | |
328 | ||
329 | if (write_in_full(temp->fd, sb.buf, sb.len) < 0 || | |
330 | close_tempfile_gently(temp) < 0) | |
331 | die_errno("failed to write to %s", | |
332 | get_tempfile_path(temp)); | |
333 | strbuf_release(&sb); | |
334 | return get_tempfile_path(temp); | |
335 | } | |
336 | /* | |
337 | * is_repository_shallow() sees empty string as "no shallow | |
338 | * file". | |
339 | */ | |
340 | return ""; | |
341 | } | |
342 | ||
343 | void setup_alternate_shallow(struct lock_file *shallow_lock, | |
344 | const char **alternate_shallow_file, | |
345 | const struct oid_array *extra) | |
346 | { | |
347 | struct strbuf sb = STRBUF_INIT; | |
348 | int fd; | |
349 | ||
350 | fd = hold_lock_file_for_update(shallow_lock, | |
351 | git_path_shallow(the_repository), | |
352 | LOCK_DIE_ON_ERROR); | |
353 | check_shallow_file_for_update(the_repository); | |
354 | if (write_shallow_commits(&sb, 0, extra)) { | |
355 | if (write_in_full(fd, sb.buf, sb.len) < 0) | |
356 | die_errno("failed to write to %s", | |
357 | get_lock_file_path(shallow_lock)); | |
358 | *alternate_shallow_file = get_lock_file_path(shallow_lock); | |
359 | } else | |
360 | /* | |
361 | * is_repository_shallow() sees empty string as "no | |
362 | * shallow file". | |
363 | */ | |
364 | *alternate_shallow_file = ""; | |
365 | strbuf_release(&sb); | |
366 | } | |
367 | ||
368 | static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb) | |
369 | { | |
370 | int fd = *(int *)cb; | |
371 | if (graft->nr_parent == -1) | |
372 | packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid)); | |
373 | return 0; | |
374 | } | |
375 | ||
376 | void advertise_shallow_grafts(int fd) | |
377 | { | |
378 | if (!is_repository_shallow(the_repository)) | |
379 | return; | |
380 | for_each_commit_graft(advertise_shallow_grafts_cb, &fd); | |
381 | } | |
382 | ||
383 | /* | |
384 | * mark_reachable_objects() should have been run prior to this and all | |
385 | * reachable commits marked as "SEEN", except when quick_prune is non-zero, | |
386 | * in which case lines are excised from the shallow file if they refer to | |
387 | * commits that do not exist (any longer). | |
388 | */ | |
389 | void prune_shallow(unsigned options) | |
390 | { | |
391 | struct lock_file shallow_lock = LOCK_INIT; | |
392 | struct strbuf sb = STRBUF_INIT; | |
393 | unsigned flags = SEEN_ONLY; | |
394 | int fd; | |
395 | ||
396 | if (options & PRUNE_QUICK) | |
397 | flags |= QUICK; | |
398 | ||
399 | if (options & PRUNE_SHOW_ONLY) { | |
400 | flags |= VERBOSE; | |
401 | write_shallow_commits_1(&sb, 0, NULL, flags); | |
402 | strbuf_release(&sb); | |
403 | return; | |
404 | } | |
405 | fd = hold_lock_file_for_update(&shallow_lock, | |
406 | git_path_shallow(the_repository), | |
407 | LOCK_DIE_ON_ERROR); | |
408 | check_shallow_file_for_update(the_repository); | |
409 | if (write_shallow_commits_1(&sb, 0, NULL, flags)) { | |
410 | if (write_in_full(fd, sb.buf, sb.len) < 0) | |
411 | die_errno("failed to write to %s", | |
412 | get_lock_file_path(&shallow_lock)); | |
413 | commit_lock_file(&shallow_lock); | |
414 | } else { | |
415 | unlink(git_path_shallow(the_repository)); | |
416 | rollback_lock_file(&shallow_lock); | |
417 | } | |
418 | strbuf_release(&sb); | |
419 | } | |
420 | ||
421 | struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW); | |
422 | ||
423 | /* | |
424 | * Step 1, split sender shallow commits into "ours" and "theirs" | |
425 | * Step 2, clean "ours" based on .git/shallow | |
426 | */ | |
427 | void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa) | |
428 | { | |
429 | int i; | |
430 | trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n"); | |
431 | memset(info, 0, sizeof(*info)); | |
432 | info->shallow = sa; | |
433 | if (!sa) | |
434 | return; | |
435 | ALLOC_ARRAY(info->ours, sa->nr); | |
436 | ALLOC_ARRAY(info->theirs, sa->nr); | |
437 | for (i = 0; i < sa->nr; i++) { | |
438 | if (has_object_file(sa->oid + i)) { | |
439 | struct commit_graft *graft; | |
440 | graft = lookup_commit_graft(the_repository, | |
441 | &sa->oid[i]); | |
442 | if (graft && graft->nr_parent < 0) | |
443 | continue; | |
444 | info->ours[info->nr_ours++] = i; | |
445 | } else | |
446 | info->theirs[info->nr_theirs++] = i; | |
447 | } | |
448 | } | |
449 | ||
450 | void clear_shallow_info(struct shallow_info *info) | |
451 | { | |
452 | free(info->ours); | |
453 | free(info->theirs); | |
454 | } | |
455 | ||
456 | /* Step 4, remove non-existent ones in "theirs" after getting the pack */ | |
457 | ||
458 | void remove_nonexistent_theirs_shallow(struct shallow_info *info) | |
459 | { | |
460 | struct object_id *oid = info->shallow->oid; | |
461 | int i, dst; | |
462 | trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n"); | |
463 | for (i = dst = 0; i < info->nr_theirs; i++) { | |
464 | if (i != dst) | |
465 | info->theirs[dst] = info->theirs[i]; | |
466 | if (has_object_file(oid + info->theirs[i])) | |
467 | dst++; | |
468 | } | |
469 | info->nr_theirs = dst; | |
470 | } | |
471 | ||
472 | define_commit_slab(ref_bitmap, uint32_t *); | |
473 | ||
474 | #define POOL_SIZE (512 * 1024) | |
475 | ||
476 | struct paint_info { | |
477 | struct ref_bitmap ref_bitmap; | |
478 | unsigned nr_bits; | |
479 | char **pools; | |
480 | char *free, *end; | |
481 | unsigned pool_count; | |
482 | }; | |
483 | ||
484 | static uint32_t *paint_alloc(struct paint_info *info) | |
485 | { | |
486 | unsigned nr = DIV_ROUND_UP(info->nr_bits, 32); | |
487 | unsigned size = nr * sizeof(uint32_t); | |
488 | void *p; | |
489 | if (!info->pool_count || size > info->end - info->free) { | |
490 | if (size > POOL_SIZE) | |
491 | BUG("pool size too small for %d in paint_alloc()", | |
492 | size); | |
493 | info->pool_count++; | |
494 | REALLOC_ARRAY(info->pools, info->pool_count); | |
495 | info->free = xmalloc(POOL_SIZE); | |
496 | info->pools[info->pool_count - 1] = info->free; | |
497 | info->end = info->free + POOL_SIZE; | |
498 | } | |
499 | p = info->free; | |
500 | info->free += size; | |
501 | return p; | |
502 | } | |
503 | ||
504 | /* | |
505 | * Given a commit SHA-1, walk down to parents until either SEEN, | |
506 | * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for | |
507 | * all walked commits. | |
508 | */ | |
509 | static void paint_down(struct paint_info *info, const struct object_id *oid, | |
510 | unsigned int id) | |
511 | { | |
512 | unsigned int i, nr; | |
513 | struct commit_list *head = NULL; | |
514 | int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32); | |
515 | size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr); | |
516 | struct commit *c = lookup_commit_reference_gently(the_repository, oid, | |
517 | 1); | |
518 | uint32_t *tmp; /* to be freed before return */ | |
519 | uint32_t *bitmap; | |
520 | ||
521 | if (!c) | |
522 | return; | |
523 | ||
524 | tmp = xmalloc(bitmap_size); | |
525 | bitmap = paint_alloc(info); | |
526 | memset(bitmap, 0, bitmap_size); | |
527 | bitmap[id / 32] |= (1U << (id % 32)); | |
528 | commit_list_insert(c, &head); | |
529 | while (head) { | |
530 | struct commit_list *p; | |
531 | struct commit *c = pop_commit(&head); | |
532 | uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c); | |
533 | ||
534 | /* XXX check "UNINTERESTING" from pack bitmaps if available */ | |
535 | if (c->object.flags & (SEEN | UNINTERESTING)) | |
536 | continue; | |
537 | else | |
538 | c->object.flags |= SEEN; | |
539 | ||
540 | if (*refs == NULL) | |
541 | *refs = bitmap; | |
542 | else { | |
543 | memcpy(tmp, *refs, bitmap_size); | |
544 | for (i = 0; i < bitmap_nr; i++) | |
545 | tmp[i] |= bitmap[i]; | |
546 | if (memcmp(tmp, *refs, bitmap_size)) { | |
547 | *refs = paint_alloc(info); | |
548 | memcpy(*refs, tmp, bitmap_size); | |
549 | } | |
550 | } | |
551 | ||
552 | if (c->object.flags & BOTTOM) | |
553 | continue; | |
554 | ||
555 | if (parse_commit(c)) | |
556 | die("unable to parse commit %s", | |
557 | oid_to_hex(&c->object.oid)); | |
558 | ||
559 | for (p = c->parents; p; p = p->next) { | |
560 | if (p->item->object.flags & SEEN) | |
561 | continue; | |
562 | commit_list_insert(p->item, &head); | |
563 | } | |
564 | } | |
565 | ||
566 | nr = get_max_object_index(); | |
567 | for (i = 0; i < nr; i++) { | |
568 | struct object *o = get_indexed_object(i); | |
569 | if (o && o->type == OBJ_COMMIT) | |
570 | o->flags &= ~SEEN; | |
571 | } | |
572 | ||
573 | free(tmp); | |
574 | } | |
575 | ||
576 | static int mark_uninteresting(const char *refname, const struct object_id *oid, | |
577 | int flags, void *cb_data) | |
578 | { | |
579 | struct commit *commit = lookup_commit_reference_gently(the_repository, | |
580 | oid, 1); | |
581 | if (!commit) | |
582 | return 0; | |
583 | commit->object.flags |= UNINTERESTING; | |
584 | mark_parents_uninteresting(commit); | |
585 | return 0; | |
586 | } | |
587 | ||
588 | static void post_assign_shallow(struct shallow_info *info, | |
589 | struct ref_bitmap *ref_bitmap, | |
590 | int *ref_status); | |
591 | /* | |
592 | * Step 6(+7), associate shallow commits with new refs | |
593 | * | |
594 | * info->ref must be initialized before calling this function. | |
595 | * | |
596 | * If used is not NULL, it's an array of info->shallow->nr | |
597 | * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the | |
598 | * m-th shallow commit from info->shallow. | |
599 | * | |
600 | * If used is NULL, "ours" and "theirs" are updated. And if ref_status | |
601 | * is not NULL it's an array of ref->nr ints. ref_status[i] is true if | |
602 | * the ref needs some shallow commits from either info->ours or | |
603 | * info->theirs. | |
604 | */ | |
605 | void assign_shallow_commits_to_refs(struct shallow_info *info, | |
606 | uint32_t **used, int *ref_status) | |
607 | { | |
608 | struct object_id *oid = info->shallow->oid; | |
609 | struct oid_array *ref = info->ref; | |
610 | unsigned int i, nr; | |
611 | int *shallow, nr_shallow = 0; | |
612 | struct paint_info pi; | |
613 | ||
614 | trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n"); | |
615 | ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs); | |
616 | for (i = 0; i < info->nr_ours; i++) | |
617 | shallow[nr_shallow++] = info->ours[i]; | |
618 | for (i = 0; i < info->nr_theirs; i++) | |
619 | shallow[nr_shallow++] = info->theirs[i]; | |
620 | ||
621 | /* | |
622 | * Prepare the commit graph to track what refs can reach what | |
623 | * (new) shallow commits. | |
624 | */ | |
625 | nr = get_max_object_index(); | |
626 | for (i = 0; i < nr; i++) { | |
627 | struct object *o = get_indexed_object(i); | |
628 | if (!o || o->type != OBJ_COMMIT) | |
629 | continue; | |
630 | ||
631 | o->flags &= ~(UNINTERESTING | BOTTOM | SEEN); | |
632 | } | |
633 | ||
634 | memset(&pi, 0, sizeof(pi)); | |
635 | init_ref_bitmap(&pi.ref_bitmap); | |
636 | pi.nr_bits = ref->nr; | |
637 | ||
638 | /* | |
639 | * "--not --all" to cut short the traversal if new refs | |
640 | * connect to old refs. If not (e.g. force ref updates) it'll | |
641 | * have to go down to the current shallow commits. | |
642 | */ | |
643 | head_ref(mark_uninteresting, NULL); | |
644 | for_each_ref(mark_uninteresting, NULL); | |
645 | ||
646 | /* Mark potential bottoms so we won't go out of bound */ | |
647 | for (i = 0; i < nr_shallow; i++) { | |
648 | struct commit *c = lookup_commit(the_repository, | |
649 | &oid[shallow[i]]); | |
650 | c->object.flags |= BOTTOM; | |
651 | } | |
652 | ||
653 | for (i = 0; i < ref->nr; i++) | |
654 | paint_down(&pi, ref->oid + i, i); | |
655 | ||
656 | if (used) { | |
657 | int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t); | |
658 | memset(used, 0, sizeof(*used) * info->shallow->nr); | |
659 | for (i = 0; i < nr_shallow; i++) { | |
660 | const struct commit *c = lookup_commit(the_repository, | |
661 | &oid[shallow[i]]); | |
662 | uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c); | |
663 | if (*map) | |
664 | used[shallow[i]] = xmemdupz(*map, bitmap_size); | |
665 | } | |
666 | /* | |
667 | * unreachable shallow commits are not removed from | |
668 | * "ours" and "theirs". The user is supposed to run | |
669 | * step 7 on every ref separately and not trust "ours" | |
670 | * and "theirs" any more. | |
671 | */ | |
672 | } else | |
673 | post_assign_shallow(info, &pi.ref_bitmap, ref_status); | |
674 | ||
675 | clear_ref_bitmap(&pi.ref_bitmap); | |
676 | for (i = 0; i < pi.pool_count; i++) | |
677 | free(pi.pools[i]); | |
678 | free(pi.pools); | |
679 | free(shallow); | |
680 | } | |
681 | ||
682 | struct commit_array { | |
683 | struct commit **commits; | |
684 | int nr, alloc; | |
685 | }; | |
686 | ||
687 | static int add_ref(const char *refname, const struct object_id *oid, | |
688 | int flags, void *cb_data) | |
689 | { | |
690 | struct commit_array *ca = cb_data; | |
691 | ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc); | |
692 | ca->commits[ca->nr] = lookup_commit_reference_gently(the_repository, | |
693 | oid, 1); | |
694 | if (ca->commits[ca->nr]) | |
695 | ca->nr++; | |
696 | return 0; | |
697 | } | |
698 | ||
699 | static void update_refstatus(int *ref_status, int nr, uint32_t *bitmap) | |
700 | { | |
701 | unsigned int i; | |
702 | if (!ref_status) | |
703 | return; | |
704 | for (i = 0; i < nr; i++) | |
705 | if (bitmap[i / 32] & (1U << (i % 32))) | |
706 | ref_status[i]++; | |
707 | } | |
708 | ||
709 | /* | |
710 | * Step 7, reachability test on "ours" at commit level | |
711 | */ | |
712 | static void post_assign_shallow(struct shallow_info *info, | |
713 | struct ref_bitmap *ref_bitmap, | |
714 | int *ref_status) | |
715 | { | |
716 | struct object_id *oid = info->shallow->oid; | |
717 | struct commit *c; | |
718 | uint32_t **bitmap; | |
719 | int dst, i, j; | |
720 | int bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32); | |
721 | struct commit_array ca; | |
722 | ||
723 | trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n"); | |
724 | if (ref_status) | |
725 | memset(ref_status, 0, sizeof(*ref_status) * info->ref->nr); | |
726 | ||
727 | /* Remove unreachable shallow commits from "theirs" */ | |
728 | for (i = dst = 0; i < info->nr_theirs; i++) { | |
729 | if (i != dst) | |
730 | info->theirs[dst] = info->theirs[i]; | |
731 | c = lookup_commit(the_repository, &oid[info->theirs[i]]); | |
732 | bitmap = ref_bitmap_at(ref_bitmap, c); | |
733 | if (!*bitmap) | |
734 | continue; | |
735 | for (j = 0; j < bitmap_nr; j++) | |
736 | if (bitmap[0][j]) { | |
737 | update_refstatus(ref_status, info->ref->nr, *bitmap); | |
738 | dst++; | |
739 | break; | |
740 | } | |
741 | } | |
742 | info->nr_theirs = dst; | |
743 | ||
744 | memset(&ca, 0, sizeof(ca)); | |
745 | head_ref(add_ref, &ca); | |
746 | for_each_ref(add_ref, &ca); | |
747 | ||
748 | /* Remove unreachable shallow commits from "ours" */ | |
749 | for (i = dst = 0; i < info->nr_ours; i++) { | |
750 | if (i != dst) | |
751 | info->ours[dst] = info->ours[i]; | |
752 | c = lookup_commit(the_repository, &oid[info->ours[i]]); | |
753 | bitmap = ref_bitmap_at(ref_bitmap, c); | |
754 | if (!*bitmap) | |
755 | continue; | |
756 | for (j = 0; j < bitmap_nr; j++) | |
757 | if (bitmap[0][j] && | |
758 | /* Step 7, reachability test at commit level */ | |
759 | !in_merge_bases_many(c, ca.nr, ca.commits)) { | |
760 | update_refstatus(ref_status, info->ref->nr, *bitmap); | |
761 | dst++; | |
762 | break; | |
763 | } | |
764 | } | |
765 | info->nr_ours = dst; | |
766 | ||
767 | free(ca.commits); | |
768 | } | |
769 | ||
770 | /* (Delayed) step 7, reachability test at commit level */ | |
771 | int delayed_reachability_test(struct shallow_info *si, int c) | |
772 | { | |
773 | if (si->need_reachability_test[c]) { | |
774 | struct commit *commit = lookup_commit(the_repository, | |
775 | &si->shallow->oid[c]); | |
776 | ||
777 | if (!si->commits) { | |
778 | struct commit_array ca; | |
779 | ||
780 | memset(&ca, 0, sizeof(ca)); | |
781 | head_ref(add_ref, &ca); | |
782 | for_each_ref(add_ref, &ca); | |
783 | si->commits = ca.commits; | |
784 | si->nr_commits = ca.nr; | |
785 | } | |
786 | ||
787 | si->reachable[c] = in_merge_bases_many(commit, | |
788 | si->nr_commits, | |
789 | si->commits); | |
790 | si->need_reachability_test[c] = 0; | |
791 | } | |
792 | return si->reachable[c]; | |
793 | } |