]>
Commit | Line | Data |
---|---|---|
1 | #include "cache.h" | |
2 | #include "repository.h" | |
3 | #include "tempfile.h" | |
4 | #include "lockfile.h" | |
5 | #include "object-store.h" | |
6 | #include "commit.h" | |
7 | #include "tag.h" | |
8 | #include "pkt-line.h" | |
9 | #include "remote.h" | |
10 | #include "refs.h" | |
11 | #include "oid-array.h" | |
12 | #include "diff.h" | |
13 | #include "revision.h" | |
14 | #include "commit-slab.h" | |
15 | #include "list-objects.h" | |
16 | #include "commit-reach.h" | |
17 | #include "shallow.h" | |
18 | ||
19 | void set_alternate_shallow_file(struct repository *r, const char *path, int override) | |
20 | { | |
21 | if (r->parsed_objects->is_shallow != -1) | |
22 | BUG("is_repository_shallow must not be called before set_alternate_shallow_file"); | |
23 | if (r->parsed_objects->alternate_shallow_file && !override) | |
24 | return; | |
25 | free(r->parsed_objects->alternate_shallow_file); | |
26 | r->parsed_objects->alternate_shallow_file = xstrdup_or_null(path); | |
27 | } | |
28 | ||
29 | int register_shallow(struct repository *r, const struct object_id *oid) | |
30 | { | |
31 | struct commit_graft *graft = | |
32 | xmalloc(sizeof(struct commit_graft)); | |
33 | struct commit *commit = lookup_commit(the_repository, oid); | |
34 | ||
35 | oidcpy(&graft->oid, oid); | |
36 | graft->nr_parent = -1; | |
37 | if (commit && commit->object.parsed) | |
38 | commit->parents = NULL; | |
39 | return register_commit_graft(r, graft, 0); | |
40 | } | |
41 | ||
42 | int unregister_shallow(const struct object_id *oid) | |
43 | { | |
44 | int pos = commit_graft_pos(the_repository, oid->hash); | |
45 | if (pos < 0) | |
46 | return -1; | |
47 | if (pos + 1 < the_repository->parsed_objects->grafts_nr) | |
48 | MOVE_ARRAY(the_repository->parsed_objects->grafts + pos, | |
49 | the_repository->parsed_objects->grafts + pos + 1, | |
50 | the_repository->parsed_objects->grafts_nr - pos - 1); | |
51 | the_repository->parsed_objects->grafts_nr--; | |
52 | return 0; | |
53 | } | |
54 | ||
55 | int is_repository_shallow(struct repository *r) | |
56 | { | |
57 | FILE *fp; | |
58 | char buf[1024]; | |
59 | const char *path = r->parsed_objects->alternate_shallow_file; | |
60 | ||
61 | if (r->parsed_objects->is_shallow >= 0) | |
62 | return r->parsed_objects->is_shallow; | |
63 | ||
64 | if (!path) | |
65 | path = git_path_shallow(r); | |
66 | /* | |
67 | * fetch-pack sets '--shallow-file ""' as an indicator that no | |
68 | * shallow file should be used. We could just open it and it | |
69 | * will likely fail. But let's do an explicit check instead. | |
70 | */ | |
71 | if (!*path || (fp = fopen(path, "r")) == NULL) { | |
72 | stat_validity_clear(r->parsed_objects->shallow_stat); | |
73 | r->parsed_objects->is_shallow = 0; | |
74 | return r->parsed_objects->is_shallow; | |
75 | } | |
76 | stat_validity_update(r->parsed_objects->shallow_stat, fileno(fp)); | |
77 | r->parsed_objects->is_shallow = 1; | |
78 | ||
79 | while (fgets(buf, sizeof(buf), fp)) { | |
80 | struct object_id oid; | |
81 | if (get_oid_hex(buf, &oid)) | |
82 | die("bad shallow line: %s", buf); | |
83 | register_shallow(r, &oid); | |
84 | } | |
85 | fclose(fp); | |
86 | return r->parsed_objects->is_shallow; | |
87 | } | |
88 | ||
89 | static void reset_repository_shallow(struct repository *r) | |
90 | { | |
91 | r->parsed_objects->is_shallow = -1; | |
92 | stat_validity_clear(r->parsed_objects->shallow_stat); | |
93 | } | |
94 | ||
95 | int commit_shallow_file(struct repository *r, struct shallow_lock *lk) | |
96 | { | |
97 | int res = commit_lock_file(&lk->lock); | |
98 | reset_repository_shallow(r); | |
99 | return res; | |
100 | } | |
101 | ||
102 | void rollback_shallow_file(struct repository *r, struct shallow_lock *lk) | |
103 | { | |
104 | rollback_lock_file(&lk->lock); | |
105 | reset_repository_shallow(r); | |
106 | } | |
107 | ||
108 | /* | |
109 | * TODO: use "int" elemtype instead of "int *" when/if commit-slab | |
110 | * supports a "valid" flag. | |
111 | */ | |
112 | define_commit_slab(commit_depth, int *); | |
113 | struct commit_list *get_shallow_commits(struct object_array *heads, int depth, | |
114 | int shallow_flag, int not_shallow_flag) | |
115 | { | |
116 | int i = 0, cur_depth = 0; | |
117 | struct commit_list *result = NULL; | |
118 | struct object_array stack = OBJECT_ARRAY_INIT; | |
119 | struct commit *commit = NULL; | |
120 | struct commit_graft *graft; | |
121 | struct commit_depth depths; | |
122 | ||
123 | init_commit_depth(&depths); | |
124 | while (commit || i < heads->nr || stack.nr) { | |
125 | struct commit_list *p; | |
126 | if (!commit) { | |
127 | if (i < heads->nr) { | |
128 | int **depth_slot; | |
129 | commit = (struct commit *) | |
130 | deref_tag(the_repository, | |
131 | heads->objects[i++].item, | |
132 | NULL, 0); | |
133 | if (!commit || commit->object.type != OBJ_COMMIT) { | |
134 | commit = NULL; | |
135 | continue; | |
136 | } | |
137 | depth_slot = commit_depth_at(&depths, commit); | |
138 | if (!*depth_slot) | |
139 | *depth_slot = xmalloc(sizeof(int)); | |
140 | **depth_slot = 0; | |
141 | cur_depth = 0; | |
142 | } else { | |
143 | commit = (struct commit *) | |
144 | object_array_pop(&stack); | |
145 | cur_depth = **commit_depth_at(&depths, commit); | |
146 | } | |
147 | } | |
148 | parse_commit_or_die(commit); | |
149 | cur_depth++; | |
150 | if ((depth != INFINITE_DEPTH && cur_depth >= depth) || | |
151 | (is_repository_shallow(the_repository) && !commit->parents && | |
152 | (graft = lookup_commit_graft(the_repository, &commit->object.oid)) != NULL && | |
153 | graft->nr_parent < 0)) { | |
154 | commit_list_insert(commit, &result); | |
155 | commit->object.flags |= shallow_flag; | |
156 | commit = NULL; | |
157 | continue; | |
158 | } | |
159 | commit->object.flags |= not_shallow_flag; | |
160 | for (p = commit->parents, commit = NULL; p; p = p->next) { | |
161 | int **depth_slot = commit_depth_at(&depths, p->item); | |
162 | if (!*depth_slot) { | |
163 | *depth_slot = xmalloc(sizeof(int)); | |
164 | **depth_slot = cur_depth; | |
165 | } else { | |
166 | if (cur_depth >= **depth_slot) | |
167 | continue; | |
168 | **depth_slot = cur_depth; | |
169 | } | |
170 | if (p->next) | |
171 | add_object_array(&p->item->object, | |
172 | NULL, &stack); | |
173 | else { | |
174 | commit = p->item; | |
175 | cur_depth = **commit_depth_at(&depths, commit); | |
176 | } | |
177 | } | |
178 | } | |
179 | for (i = 0; i < depths.slab_count; i++) { | |
180 | int j; | |
181 | ||
182 | if (!depths.slab[i]) | |
183 | continue; | |
184 | for (j = 0; j < depths.slab_size; j++) | |
185 | free(depths.slab[i][j]); | |
186 | } | |
187 | clear_commit_depth(&depths); | |
188 | ||
189 | return result; | |
190 | } | |
191 | ||
192 | static void show_commit(struct commit *commit, void *data) | |
193 | { | |
194 | commit_list_insert(commit, data); | |
195 | } | |
196 | ||
197 | /* | |
198 | * Given rev-list arguments, run rev-list. All reachable commits | |
199 | * except border ones are marked with not_shallow_flag. Border commits | |
200 | * are marked with shallow_flag. The list of border/shallow commits | |
201 | * are also returned. | |
202 | */ | |
203 | struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av, | |
204 | int shallow_flag, | |
205 | int not_shallow_flag) | |
206 | { | |
207 | struct commit_list *result = NULL, *p; | |
208 | struct commit_list *not_shallow_list = NULL; | |
209 | struct rev_info revs; | |
210 | int both_flags = shallow_flag | not_shallow_flag; | |
211 | ||
212 | /* | |
213 | * SHALLOW (excluded) and NOT_SHALLOW (included) should not be | |
214 | * set at this point. But better be safe than sorry. | |
215 | */ | |
216 | clear_object_flags(both_flags); | |
217 | ||
218 | is_repository_shallow(the_repository); /* make sure shallows are read */ | |
219 | ||
220 | repo_init_revisions(the_repository, &revs, NULL); | |
221 | save_commit_buffer = 0; | |
222 | setup_revisions(ac, av, &revs, NULL); | |
223 | ||
224 | if (prepare_revision_walk(&revs)) | |
225 | die("revision walk setup failed"); | |
226 | traverse_commit_list(&revs, show_commit, NULL, ¬_shallow_list); | |
227 | ||
228 | if (!not_shallow_list) | |
229 | die("no commits selected for shallow requests"); | |
230 | ||
231 | /* Mark all reachable commits as NOT_SHALLOW */ | |
232 | for (p = not_shallow_list; p; p = p->next) | |
233 | p->item->object.flags |= not_shallow_flag; | |
234 | ||
235 | /* | |
236 | * mark border commits SHALLOW + NOT_SHALLOW. | |
237 | * We cannot clear NOT_SHALLOW right now. Imagine border | |
238 | * commit A is processed first, then commit B, whose parent is | |
239 | * A, later. If NOT_SHALLOW on A is cleared at step 1, B | |
240 | * itself is considered border at step 2, which is incorrect. | |
241 | */ | |
242 | for (p = not_shallow_list; p; p = p->next) { | |
243 | struct commit *c = p->item; | |
244 | struct commit_list *parent; | |
245 | ||
246 | if (parse_commit(c)) | |
247 | die("unable to parse commit %s", | |
248 | oid_to_hex(&c->object.oid)); | |
249 | ||
250 | for (parent = c->parents; parent; parent = parent->next) | |
251 | if (!(parent->item->object.flags & not_shallow_flag)) { | |
252 | c->object.flags |= shallow_flag; | |
253 | commit_list_insert(c, &result); | |
254 | break; | |
255 | } | |
256 | } | |
257 | free_commit_list(not_shallow_list); | |
258 | ||
259 | /* | |
260 | * Now we can clean up NOT_SHALLOW on border commits. Having | |
261 | * both flags set can confuse the caller. | |
262 | */ | |
263 | for (p = result; p; p = p->next) { | |
264 | struct object *o = &p->item->object; | |
265 | if ((o->flags & both_flags) == both_flags) | |
266 | o->flags &= ~not_shallow_flag; | |
267 | } | |
268 | return result; | |
269 | } | |
270 | ||
271 | static void check_shallow_file_for_update(struct repository *r) | |
272 | { | |
273 | if (r->parsed_objects->is_shallow == -1) | |
274 | BUG("shallow must be initialized by now"); | |
275 | ||
276 | if (!stat_validity_check(r->parsed_objects->shallow_stat, | |
277 | git_path_shallow(r))) | |
278 | die("shallow file has changed since we read it"); | |
279 | } | |
280 | ||
281 | #define SEEN_ONLY 1 | |
282 | #define VERBOSE 2 | |
283 | #define QUICK 4 | |
284 | ||
285 | struct write_shallow_data { | |
286 | struct strbuf *out; | |
287 | int use_pack_protocol; | |
288 | int count; | |
289 | unsigned flags; | |
290 | }; | |
291 | ||
292 | static int write_one_shallow(const struct commit_graft *graft, void *cb_data) | |
293 | { | |
294 | struct write_shallow_data *data = cb_data; | |
295 | const char *hex = oid_to_hex(&graft->oid); | |
296 | if (graft->nr_parent != -1) | |
297 | return 0; | |
298 | if (data->flags & QUICK) { | |
299 | if (!has_object_file(&graft->oid)) | |
300 | return 0; | |
301 | } else if (data->flags & SEEN_ONLY) { | |
302 | struct commit *c = lookup_commit(the_repository, &graft->oid); | |
303 | if (!c || !(c->object.flags & SEEN)) { | |
304 | if (data->flags & VERBOSE) | |
305 | printf("Removing %s from .git/shallow\n", | |
306 | oid_to_hex(&c->object.oid)); | |
307 | return 0; | |
308 | } | |
309 | } | |
310 | data->count++; | |
311 | if (data->use_pack_protocol) | |
312 | packet_buf_write(data->out, "shallow %s", hex); | |
313 | else { | |
314 | strbuf_addstr(data->out, hex); | |
315 | strbuf_addch(data->out, '\n'); | |
316 | } | |
317 | return 0; | |
318 | } | |
319 | ||
320 | static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol, | |
321 | const struct oid_array *extra, | |
322 | unsigned flags) | |
323 | { | |
324 | struct write_shallow_data data; | |
325 | int i; | |
326 | data.out = out; | |
327 | data.use_pack_protocol = use_pack_protocol; | |
328 | data.count = 0; | |
329 | data.flags = flags; | |
330 | for_each_commit_graft(write_one_shallow, &data); | |
331 | if (!extra) | |
332 | return data.count; | |
333 | for (i = 0; i < extra->nr; i++) { | |
334 | strbuf_addstr(out, oid_to_hex(extra->oid + i)); | |
335 | strbuf_addch(out, '\n'); | |
336 | data.count++; | |
337 | } | |
338 | return data.count; | |
339 | } | |
340 | ||
341 | int write_shallow_commits(struct strbuf *out, int use_pack_protocol, | |
342 | const struct oid_array *extra) | |
343 | { | |
344 | return write_shallow_commits_1(out, use_pack_protocol, extra, 0); | |
345 | } | |
346 | ||
347 | const char *setup_temporary_shallow(const struct oid_array *extra) | |
348 | { | |
349 | struct tempfile *temp; | |
350 | struct strbuf sb = STRBUF_INIT; | |
351 | ||
352 | if (write_shallow_commits(&sb, 0, extra)) { | |
353 | temp = xmks_tempfile(git_path("shallow_XXXXXX")); | |
354 | ||
355 | if (write_in_full(temp->fd, sb.buf, sb.len) < 0 || | |
356 | close_tempfile_gently(temp) < 0) | |
357 | die_errno("failed to write to %s", | |
358 | get_tempfile_path(temp)); | |
359 | strbuf_release(&sb); | |
360 | return get_tempfile_path(temp); | |
361 | } | |
362 | /* | |
363 | * is_repository_shallow() sees empty string as "no shallow | |
364 | * file". | |
365 | */ | |
366 | return ""; | |
367 | } | |
368 | ||
369 | void setup_alternate_shallow(struct shallow_lock *shallow_lock, | |
370 | const char **alternate_shallow_file, | |
371 | const struct oid_array *extra) | |
372 | { | |
373 | struct strbuf sb = STRBUF_INIT; | |
374 | int fd; | |
375 | ||
376 | fd = hold_lock_file_for_update(&shallow_lock->lock, | |
377 | git_path_shallow(the_repository), | |
378 | LOCK_DIE_ON_ERROR); | |
379 | check_shallow_file_for_update(the_repository); | |
380 | if (write_shallow_commits(&sb, 0, extra)) { | |
381 | if (write_in_full(fd, sb.buf, sb.len) < 0) | |
382 | die_errno("failed to write to %s", | |
383 | get_lock_file_path(&shallow_lock->lock)); | |
384 | *alternate_shallow_file = get_lock_file_path(&shallow_lock->lock); | |
385 | } else | |
386 | /* | |
387 | * is_repository_shallow() sees empty string as "no | |
388 | * shallow file". | |
389 | */ | |
390 | *alternate_shallow_file = ""; | |
391 | strbuf_release(&sb); | |
392 | } | |
393 | ||
394 | static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb) | |
395 | { | |
396 | int fd = *(int *)cb; | |
397 | if (graft->nr_parent == -1) | |
398 | packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid)); | |
399 | return 0; | |
400 | } | |
401 | ||
402 | void advertise_shallow_grafts(int fd) | |
403 | { | |
404 | if (!is_repository_shallow(the_repository)) | |
405 | return; | |
406 | for_each_commit_graft(advertise_shallow_grafts_cb, &fd); | |
407 | } | |
408 | ||
409 | /* | |
410 | * mark_reachable_objects() should have been run prior to this and all | |
411 | * reachable commits marked as "SEEN", except when quick_prune is non-zero, | |
412 | * in which case lines are excised from the shallow file if they refer to | |
413 | * commits that do not exist (any longer). | |
414 | */ | |
415 | void prune_shallow(unsigned options) | |
416 | { | |
417 | struct shallow_lock shallow_lock = SHALLOW_LOCK_INIT; | |
418 | struct strbuf sb = STRBUF_INIT; | |
419 | unsigned flags = SEEN_ONLY; | |
420 | int fd; | |
421 | ||
422 | if (options & PRUNE_QUICK) | |
423 | flags |= QUICK; | |
424 | ||
425 | if (options & PRUNE_SHOW_ONLY) { | |
426 | flags |= VERBOSE; | |
427 | write_shallow_commits_1(&sb, 0, NULL, flags); | |
428 | strbuf_release(&sb); | |
429 | return; | |
430 | } | |
431 | fd = hold_lock_file_for_update(&shallow_lock.lock, | |
432 | git_path_shallow(the_repository), | |
433 | LOCK_DIE_ON_ERROR); | |
434 | check_shallow_file_for_update(the_repository); | |
435 | if (write_shallow_commits_1(&sb, 0, NULL, flags)) { | |
436 | if (write_in_full(fd, sb.buf, sb.len) < 0) | |
437 | die_errno("failed to write to %s", | |
438 | get_lock_file_path(&shallow_lock.lock)); | |
439 | commit_shallow_file(the_repository, &shallow_lock); | |
440 | } else { | |
441 | unlink(git_path_shallow(the_repository)); | |
442 | rollback_shallow_file(the_repository, &shallow_lock); | |
443 | } | |
444 | strbuf_release(&sb); | |
445 | } | |
446 | ||
447 | struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW); | |
448 | ||
449 | /* | |
450 | * Step 1, split sender shallow commits into "ours" and "theirs" | |
451 | * Step 2, clean "ours" based on .git/shallow | |
452 | */ | |
453 | void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa) | |
454 | { | |
455 | int i; | |
456 | trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n"); | |
457 | memset(info, 0, sizeof(*info)); | |
458 | info->shallow = sa; | |
459 | if (!sa) | |
460 | return; | |
461 | ALLOC_ARRAY(info->ours, sa->nr); | |
462 | ALLOC_ARRAY(info->theirs, sa->nr); | |
463 | for (i = 0; i < sa->nr; i++) { | |
464 | if (has_object_file(sa->oid + i)) { | |
465 | struct commit_graft *graft; | |
466 | graft = lookup_commit_graft(the_repository, | |
467 | &sa->oid[i]); | |
468 | if (graft && graft->nr_parent < 0) | |
469 | continue; | |
470 | info->ours[info->nr_ours++] = i; | |
471 | } else | |
472 | info->theirs[info->nr_theirs++] = i; | |
473 | } | |
474 | } | |
475 | ||
476 | void clear_shallow_info(struct shallow_info *info) | |
477 | { | |
478 | free(info->ours); | |
479 | free(info->theirs); | |
480 | } | |
481 | ||
482 | /* Step 4, remove non-existent ones in "theirs" after getting the pack */ | |
483 | ||
484 | void remove_nonexistent_theirs_shallow(struct shallow_info *info) | |
485 | { | |
486 | struct object_id *oid = info->shallow->oid; | |
487 | int i, dst; | |
488 | trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n"); | |
489 | for (i = dst = 0; i < info->nr_theirs; i++) { | |
490 | if (i != dst) | |
491 | info->theirs[dst] = info->theirs[i]; | |
492 | if (has_object_file(oid + info->theirs[i])) | |
493 | dst++; | |
494 | } | |
495 | info->nr_theirs = dst; | |
496 | } | |
497 | ||
498 | define_commit_slab(ref_bitmap, uint32_t *); | |
499 | ||
500 | #define POOL_SIZE (512 * 1024) | |
501 | ||
502 | struct paint_info { | |
503 | struct ref_bitmap ref_bitmap; | |
504 | unsigned nr_bits; | |
505 | char **pools; | |
506 | char *free, *end; | |
507 | unsigned pool_count; | |
508 | }; | |
509 | ||
510 | static uint32_t *paint_alloc(struct paint_info *info) | |
511 | { | |
512 | unsigned nr = DIV_ROUND_UP(info->nr_bits, 32); | |
513 | unsigned size = nr * sizeof(uint32_t); | |
514 | void *p; | |
515 | if (!info->pool_count || size > info->end - info->free) { | |
516 | if (size > POOL_SIZE) | |
517 | BUG("pool size too small for %d in paint_alloc()", | |
518 | size); | |
519 | info->pool_count++; | |
520 | REALLOC_ARRAY(info->pools, info->pool_count); | |
521 | info->free = xmalloc(POOL_SIZE); | |
522 | info->pools[info->pool_count - 1] = info->free; | |
523 | info->end = info->free + POOL_SIZE; | |
524 | } | |
525 | p = info->free; | |
526 | info->free += size; | |
527 | return p; | |
528 | } | |
529 | ||
530 | /* | |
531 | * Given a commit SHA-1, walk down to parents until either SEEN, | |
532 | * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for | |
533 | * all walked commits. | |
534 | */ | |
535 | static void paint_down(struct paint_info *info, const struct object_id *oid, | |
536 | unsigned int id) | |
537 | { | |
538 | unsigned int i, nr; | |
539 | struct commit_list *head = NULL; | |
540 | int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32); | |
541 | size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr); | |
542 | struct commit *c = lookup_commit_reference_gently(the_repository, oid, | |
543 | 1); | |
544 | uint32_t *tmp; /* to be freed before return */ | |
545 | uint32_t *bitmap; | |
546 | ||
547 | if (!c) | |
548 | return; | |
549 | ||
550 | tmp = xmalloc(bitmap_size); | |
551 | bitmap = paint_alloc(info); | |
552 | memset(bitmap, 0, bitmap_size); | |
553 | bitmap[id / 32] |= (1U << (id % 32)); | |
554 | commit_list_insert(c, &head); | |
555 | while (head) { | |
556 | struct commit_list *p; | |
557 | struct commit *c = pop_commit(&head); | |
558 | uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c); | |
559 | ||
560 | /* XXX check "UNINTERESTING" from pack bitmaps if available */ | |
561 | if (c->object.flags & (SEEN | UNINTERESTING)) | |
562 | continue; | |
563 | else | |
564 | c->object.flags |= SEEN; | |
565 | ||
566 | if (*refs == NULL) | |
567 | *refs = bitmap; | |
568 | else { | |
569 | memcpy(tmp, *refs, bitmap_size); | |
570 | for (i = 0; i < bitmap_nr; i++) | |
571 | tmp[i] |= bitmap[i]; | |
572 | if (memcmp(tmp, *refs, bitmap_size)) { | |
573 | *refs = paint_alloc(info); | |
574 | memcpy(*refs, tmp, bitmap_size); | |
575 | } | |
576 | } | |
577 | ||
578 | if (c->object.flags & BOTTOM) | |
579 | continue; | |
580 | ||
581 | if (parse_commit(c)) | |
582 | die("unable to parse commit %s", | |
583 | oid_to_hex(&c->object.oid)); | |
584 | ||
585 | for (p = c->parents; p; p = p->next) { | |
586 | if (p->item->object.flags & SEEN) | |
587 | continue; | |
588 | commit_list_insert(p->item, &head); | |
589 | } | |
590 | } | |
591 | ||
592 | nr = get_max_object_index(); | |
593 | for (i = 0; i < nr; i++) { | |
594 | struct object *o = get_indexed_object(i); | |
595 | if (o && o->type == OBJ_COMMIT) | |
596 | o->flags &= ~SEEN; | |
597 | } | |
598 | ||
599 | free(tmp); | |
600 | } | |
601 | ||
602 | static int mark_uninteresting(const char *refname, const struct object_id *oid, | |
603 | int flags, void *cb_data) | |
604 | { | |
605 | struct commit *commit = lookup_commit_reference_gently(the_repository, | |
606 | oid, 1); | |
607 | if (!commit) | |
608 | return 0; | |
609 | commit->object.flags |= UNINTERESTING; | |
610 | mark_parents_uninteresting(commit); | |
611 | return 0; | |
612 | } | |
613 | ||
614 | static void post_assign_shallow(struct shallow_info *info, | |
615 | struct ref_bitmap *ref_bitmap, | |
616 | int *ref_status); | |
617 | /* | |
618 | * Step 6(+7), associate shallow commits with new refs | |
619 | * | |
620 | * info->ref must be initialized before calling this function. | |
621 | * | |
622 | * If used is not NULL, it's an array of info->shallow->nr | |
623 | * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the | |
624 | * m-th shallow commit from info->shallow. | |
625 | * | |
626 | * If used is NULL, "ours" and "theirs" are updated. And if ref_status | |
627 | * is not NULL it's an array of ref->nr ints. ref_status[i] is true if | |
628 | * the ref needs some shallow commits from either info->ours or | |
629 | * info->theirs. | |
630 | */ | |
631 | void assign_shallow_commits_to_refs(struct shallow_info *info, | |
632 | uint32_t **used, int *ref_status) | |
633 | { | |
634 | struct object_id *oid = info->shallow->oid; | |
635 | struct oid_array *ref = info->ref; | |
636 | unsigned int i, nr; | |
637 | int *shallow, nr_shallow = 0; | |
638 | struct paint_info pi; | |
639 | ||
640 | trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n"); | |
641 | ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs); | |
642 | for (i = 0; i < info->nr_ours; i++) | |
643 | shallow[nr_shallow++] = info->ours[i]; | |
644 | for (i = 0; i < info->nr_theirs; i++) | |
645 | shallow[nr_shallow++] = info->theirs[i]; | |
646 | ||
647 | /* | |
648 | * Prepare the commit graph to track what refs can reach what | |
649 | * (new) shallow commits. | |
650 | */ | |
651 | nr = get_max_object_index(); | |
652 | for (i = 0; i < nr; i++) { | |
653 | struct object *o = get_indexed_object(i); | |
654 | if (!o || o->type != OBJ_COMMIT) | |
655 | continue; | |
656 | ||
657 | o->flags &= ~(UNINTERESTING | BOTTOM | SEEN); | |
658 | } | |
659 | ||
660 | memset(&pi, 0, sizeof(pi)); | |
661 | init_ref_bitmap(&pi.ref_bitmap); | |
662 | pi.nr_bits = ref->nr; | |
663 | ||
664 | /* | |
665 | * "--not --all" to cut short the traversal if new refs | |
666 | * connect to old refs. If not (e.g. force ref updates) it'll | |
667 | * have to go down to the current shallow commits. | |
668 | */ | |
669 | head_ref(mark_uninteresting, NULL); | |
670 | for_each_ref(mark_uninteresting, NULL); | |
671 | ||
672 | /* Mark potential bottoms so we won't go out of bound */ | |
673 | for (i = 0; i < nr_shallow; i++) { | |
674 | struct commit *c = lookup_commit(the_repository, | |
675 | &oid[shallow[i]]); | |
676 | c->object.flags |= BOTTOM; | |
677 | } | |
678 | ||
679 | for (i = 0; i < ref->nr; i++) | |
680 | paint_down(&pi, ref->oid + i, i); | |
681 | ||
682 | if (used) { | |
683 | int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t); | |
684 | memset(used, 0, sizeof(*used) * info->shallow->nr); | |
685 | for (i = 0; i < nr_shallow; i++) { | |
686 | const struct commit *c = lookup_commit(the_repository, | |
687 | &oid[shallow[i]]); | |
688 | uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c); | |
689 | if (*map) | |
690 | used[shallow[i]] = xmemdupz(*map, bitmap_size); | |
691 | } | |
692 | /* | |
693 | * unreachable shallow commits are not removed from | |
694 | * "ours" and "theirs". The user is supposed to run | |
695 | * step 7 on every ref separately and not trust "ours" | |
696 | * and "theirs" any more. | |
697 | */ | |
698 | } else | |
699 | post_assign_shallow(info, &pi.ref_bitmap, ref_status); | |
700 | ||
701 | clear_ref_bitmap(&pi.ref_bitmap); | |
702 | for (i = 0; i < pi.pool_count; i++) | |
703 | free(pi.pools[i]); | |
704 | free(pi.pools); | |
705 | free(shallow); | |
706 | } | |
707 | ||
708 | struct commit_array { | |
709 | struct commit **commits; | |
710 | int nr, alloc; | |
711 | }; | |
712 | ||
713 | static int add_ref(const char *refname, const struct object_id *oid, | |
714 | int flags, void *cb_data) | |
715 | { | |
716 | struct commit_array *ca = cb_data; | |
717 | ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc); | |
718 | ca->commits[ca->nr] = lookup_commit_reference_gently(the_repository, | |
719 | oid, 1); | |
720 | if (ca->commits[ca->nr]) | |
721 | ca->nr++; | |
722 | return 0; | |
723 | } | |
724 | ||
725 | static void update_refstatus(int *ref_status, int nr, uint32_t *bitmap) | |
726 | { | |
727 | unsigned int i; | |
728 | if (!ref_status) | |
729 | return; | |
730 | for (i = 0; i < nr; i++) | |
731 | if (bitmap[i / 32] & (1U << (i % 32))) | |
732 | ref_status[i]++; | |
733 | } | |
734 | ||
735 | /* | |
736 | * Step 7, reachability test on "ours" at commit level | |
737 | */ | |
738 | static void post_assign_shallow(struct shallow_info *info, | |
739 | struct ref_bitmap *ref_bitmap, | |
740 | int *ref_status) | |
741 | { | |
742 | struct object_id *oid = info->shallow->oid; | |
743 | struct commit *c; | |
744 | uint32_t **bitmap; | |
745 | int dst, i, j; | |
746 | int bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32); | |
747 | struct commit_array ca; | |
748 | ||
749 | trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n"); | |
750 | if (ref_status) | |
751 | memset(ref_status, 0, sizeof(*ref_status) * info->ref->nr); | |
752 | ||
753 | /* Remove unreachable shallow commits from "theirs" */ | |
754 | for (i = dst = 0; i < info->nr_theirs; i++) { | |
755 | if (i != dst) | |
756 | info->theirs[dst] = info->theirs[i]; | |
757 | c = lookup_commit(the_repository, &oid[info->theirs[i]]); | |
758 | bitmap = ref_bitmap_at(ref_bitmap, c); | |
759 | if (!*bitmap) | |
760 | continue; | |
761 | for (j = 0; j < bitmap_nr; j++) | |
762 | if (bitmap[0][j]) { | |
763 | update_refstatus(ref_status, info->ref->nr, *bitmap); | |
764 | dst++; | |
765 | break; | |
766 | } | |
767 | } | |
768 | info->nr_theirs = dst; | |
769 | ||
770 | memset(&ca, 0, sizeof(ca)); | |
771 | head_ref(add_ref, &ca); | |
772 | for_each_ref(add_ref, &ca); | |
773 | ||
774 | /* Remove unreachable shallow commits from "ours" */ | |
775 | for (i = dst = 0; i < info->nr_ours; i++) { | |
776 | if (i != dst) | |
777 | info->ours[dst] = info->ours[i]; | |
778 | c = lookup_commit(the_repository, &oid[info->ours[i]]); | |
779 | bitmap = ref_bitmap_at(ref_bitmap, c); | |
780 | if (!*bitmap) | |
781 | continue; | |
782 | for (j = 0; j < bitmap_nr; j++) | |
783 | if (bitmap[0][j] && | |
784 | /* Step 7, reachability test at commit level */ | |
785 | !in_merge_bases_many(c, ca.nr, ca.commits)) { | |
786 | update_refstatus(ref_status, info->ref->nr, *bitmap); | |
787 | dst++; | |
788 | break; | |
789 | } | |
790 | } | |
791 | info->nr_ours = dst; | |
792 | ||
793 | free(ca.commits); | |
794 | } | |
795 | ||
796 | /* (Delayed) step 7, reachability test at commit level */ | |
797 | int delayed_reachability_test(struct shallow_info *si, int c) | |
798 | { | |
799 | if (si->need_reachability_test[c]) { | |
800 | struct commit *commit = lookup_commit(the_repository, | |
801 | &si->shallow->oid[c]); | |
802 | ||
803 | if (!si->commits) { | |
804 | struct commit_array ca; | |
805 | ||
806 | memset(&ca, 0, sizeof(ca)); | |
807 | head_ref(add_ref, &ca); | |
808 | for_each_ref(add_ref, &ca); | |
809 | si->commits = ca.commits; | |
810 | si->nr_commits = ca.nr; | |
811 | } | |
812 | ||
813 | si->reachable[c] = in_merge_bases_many(commit, | |
814 | si->nr_commits, | |
815 | si->commits); | |
816 | si->need_reachability_test[c] = 0; | |
817 | } | |
818 | return si->reachable[c]; | |
819 | } |