]>
Commit | Line | Data |
---|---|---|
072bf432 JS |
1 | #include "cache.h" |
2 | #include "refs.h" | |
cbd53a21 | 3 | #include "object-store.h" |
072bf432 | 4 | #include "cache-tree.h" |
b543bb1c JS |
5 | #include "mergesort.h" |
6 | #include "diff.h" | |
7 | #include "diffcore.h" | |
09002f1b | 8 | #include "tag.h" |
f5dd754c | 9 | #include "blame.h" |
14ba97f8 | 10 | #include "alloc.h" |
f5dd754c JS |
11 | |
12 | void blame_origin_decref(struct blame_origin *o) | |
13 | { | |
14 | if (o && --o->refcnt <= 0) { | |
15 | struct blame_origin *p, *l = NULL; | |
16 | if (o->previous) | |
17 | blame_origin_decref(o->previous); | |
18 | free(o->file.ptr); | |
19 | /* Should be present exactly once in commit chain */ | |
20 | for (p = o->commit->util; p; l = p, p = p->next) { | |
21 | if (p == o) { | |
22 | if (l) | |
23 | l->next = p->next; | |
24 | else | |
25 | o->commit->util = p->next; | |
26 | free(o); | |
27 | return; | |
28 | } | |
29 | } | |
30 | die("internal error in blame_origin_decref"); | |
31 | } | |
32 | } | |
33 | ||
34 | /* | |
35 | * Given a commit and a path in it, create a new origin structure. | |
36 | * The callers that add blame to the scoreboard should use | |
37 | * get_origin() to obtain shared, refcounted copy instead of calling | |
38 | * this function directly. | |
39 | */ | |
072bf432 | 40 | static struct blame_origin *make_origin(struct commit *commit, const char *path) |
f5dd754c JS |
41 | { |
42 | struct blame_origin *o; | |
43 | FLEX_ALLOC_STR(o, path, path); | |
44 | o->commit = commit; | |
45 | o->refcnt = 1; | |
46 | o->next = commit->util; | |
47 | commit->util = o; | |
48 | return o; | |
49 | } | |
50 | ||
51 | /* | |
52 | * Locate an existing origin or create a new one. | |
53 | * This moves the origin to front position in the commit util list. | |
54 | */ | |
09002f1b | 55 | static struct blame_origin *get_origin(struct commit *commit, const char *path) |
f5dd754c JS |
56 | { |
57 | struct blame_origin *o, *l; | |
58 | ||
59 | for (o = commit->util, l = NULL; o; l = o, o = o->next) { | |
60 | if (!strcmp(o->path, path)) { | |
61 | /* bump to front */ | |
62 | if (l) { | |
63 | l->next = o->next; | |
64 | o->next = commit->util; | |
65 | commit->util = o; | |
66 | } | |
67 | return blame_origin_incref(o); | |
68 | } | |
69 | } | |
70 | return make_origin(commit, path); | |
71 | } | |
072bf432 JS |
72 | |
73 | ||
74 | ||
75 | static void verify_working_tree_path(struct commit *work_tree, const char *path) | |
76 | { | |
77 | struct commit_list *parents; | |
78 | int pos; | |
79 | ||
80 | for (parents = work_tree->parents; parents; parents = parents->next) { | |
81 | const struct object_id *commit_oid = &parents->item->object.oid; | |
82 | struct object_id blob_oid; | |
83 | unsigned mode; | |
84 | ||
916bc35b | 85 | if (!get_tree_entry(commit_oid, path, &blob_oid, &mode) && |
0df8e965 | 86 | oid_object_info(the_repository, &blob_oid, NULL) == OBJ_BLOB) |
072bf432 JS |
87 | return; |
88 | } | |
89 | ||
90 | pos = cache_name_pos(path, strlen(path)); | |
91 | if (pos >= 0) | |
92 | ; /* path is in the index */ | |
93 | else if (-1 - pos < active_nr && | |
94 | !strcmp(active_cache[-1 - pos]->name, path)) | |
95 | ; /* path is in the index, unmerged */ | |
96 | else | |
97 | die("no such path '%s' in HEAD", path); | |
98 | } | |
99 | ||
100 | static struct commit_list **append_parent(struct commit_list **tail, const struct object_id *oid) | |
101 | { | |
102 | struct commit *parent; | |
103 | ||
583c6a22 | 104 | parent = lookup_commit_reference(oid); |
072bf432 JS |
105 | if (!parent) |
106 | die("no such commit %s", oid_to_hex(oid)); | |
107 | return &commit_list_insert(parent, tail)->next; | |
108 | } | |
109 | ||
110 | static void append_merge_parents(struct commit_list **tail) | |
111 | { | |
112 | int merge_head; | |
113 | struct strbuf line = STRBUF_INIT; | |
114 | ||
102de880 | 115 | merge_head = open(git_path_merge_head(the_repository), O_RDONLY); |
072bf432 JS |
116 | if (merge_head < 0) { |
117 | if (errno == ENOENT) | |
118 | return; | |
102de880 SB |
119 | die("cannot open '%s' for reading", |
120 | git_path_merge_head(the_repository)); | |
072bf432 JS |
121 | } |
122 | ||
123 | while (!strbuf_getwholeline_fd(&line, merge_head, '\n')) { | |
124 | struct object_id oid; | |
125 | if (line.len < GIT_SHA1_HEXSZ || get_oid_hex(line.buf, &oid)) | |
102de880 SB |
126 | die("unknown line in '%s': %s", |
127 | git_path_merge_head(the_repository), line.buf); | |
072bf432 JS |
128 | tail = append_parent(tail, &oid); |
129 | } | |
130 | close(merge_head); | |
131 | strbuf_release(&line); | |
132 | } | |
133 | ||
134 | /* | |
135 | * This isn't as simple as passing sb->buf and sb->len, because we | |
136 | * want to transfer ownership of the buffer to the commit (so we | |
137 | * must use detach). | |
138 | */ | |
139 | static void set_commit_buffer_from_strbuf(struct commit *c, struct strbuf *sb) | |
140 | { | |
141 | size_t len; | |
142 | void *buf = strbuf_detach(sb, &len); | |
143 | set_commit_buffer(c, buf, len); | |
144 | } | |
145 | ||
146 | /* | |
147 | * Prepare a dummy commit that represents the work tree (or staged) item. | |
148 | * Note that annotating work tree item never works in the reverse. | |
149 | */ | |
09002f1b JS |
150 | static struct commit *fake_working_tree_commit(struct diff_options *opt, |
151 | const char *path, | |
152 | const char *contents_from) | |
072bf432 JS |
153 | { |
154 | struct commit *commit; | |
155 | struct blame_origin *origin; | |
156 | struct commit_list **parent_tail, *parent; | |
157 | struct object_id head_oid; | |
158 | struct strbuf buf = STRBUF_INIT; | |
159 | const char *ident; | |
160 | time_t now; | |
161 | int size, len; | |
162 | struct cache_entry *ce; | |
163 | unsigned mode; | |
164 | struct strbuf msg = STRBUF_INIT; | |
165 | ||
166 | read_cache(); | |
167 | time(&now); | |
8ba0e5ec | 168 | commit = alloc_commit_node(the_repository); |
072bf432 JS |
169 | commit->object.parsed = 1; |
170 | commit->date = now; | |
171 | parent_tail = &commit->parents; | |
172 | ||
49e61479 | 173 | if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL)) |
072bf432 JS |
174 | die("no such ref: HEAD"); |
175 | ||
176 | parent_tail = append_parent(parent_tail, &head_oid); | |
177 | append_merge_parents(parent_tail); | |
178 | verify_working_tree_path(commit, path); | |
179 | ||
180 | origin = make_origin(commit, path); | |
181 | ||
182 | ident = fmt_ident("Not Committed Yet", "not.committed.yet", NULL, 0); | |
183 | strbuf_addstr(&msg, "tree 0000000000000000000000000000000000000000\n"); | |
184 | for (parent = commit->parents; parent; parent = parent->next) | |
185 | strbuf_addf(&msg, "parent %s\n", | |
186 | oid_to_hex(&parent->item->object.oid)); | |
187 | strbuf_addf(&msg, | |
188 | "author %s\n" | |
189 | "committer %s\n\n" | |
190 | "Version of %s from %s\n", | |
191 | ident, ident, path, | |
192 | (!contents_from ? path : | |
193 | (!strcmp(contents_from, "-") ? "standard input" : contents_from))); | |
194 | set_commit_buffer_from_strbuf(commit, &msg); | |
195 | ||
196 | if (!contents_from || strcmp("-", contents_from)) { | |
197 | struct stat st; | |
198 | const char *read_from; | |
199 | char *buf_ptr; | |
200 | unsigned long buf_len; | |
201 | ||
202 | if (contents_from) { | |
203 | if (stat(contents_from, &st) < 0) | |
204 | die_errno("Cannot stat '%s'", contents_from); | |
205 | read_from = contents_from; | |
206 | } | |
207 | else { | |
208 | if (lstat(path, &st) < 0) | |
209 | die_errno("Cannot lstat '%s'", path); | |
210 | read_from = path; | |
211 | } | |
212 | mode = canon_mode(st.st_mode); | |
213 | ||
214 | switch (st.st_mode & S_IFMT) { | |
215 | case S_IFREG: | |
0d1e0e78 | 216 | if (opt->flags.allow_textconv && |
072bf432 JS |
217 | textconv_object(read_from, mode, &null_oid, 0, &buf_ptr, &buf_len)) |
218 | strbuf_attach(&buf, buf_ptr, buf_len, buf_len + 1); | |
219 | else if (strbuf_read_file(&buf, read_from, st.st_size) != st.st_size) | |
220 | die_errno("cannot open or read '%s'", read_from); | |
221 | break; | |
222 | case S_IFLNK: | |
223 | if (strbuf_readlink(&buf, read_from, st.st_size) < 0) | |
224 | die_errno("cannot readlink '%s'", read_from); | |
225 | break; | |
226 | default: | |
227 | die("unsupported file type %s", read_from); | |
228 | } | |
229 | } | |
230 | else { | |
231 | /* Reading from stdin */ | |
232 | mode = 0; | |
233 | if (strbuf_read(&buf, 0, 0) < 0) | |
234 | die_errno("failed to read from stdin"); | |
235 | } | |
82b474e0 | 236 | convert_to_git(&the_index, path, buf.buf, buf.len, &buf, 0); |
072bf432 JS |
237 | origin->file.ptr = buf.buf; |
238 | origin->file.size = buf.len; | |
829e5c3b | 239 | pretend_object_file(buf.buf, buf.len, OBJ_BLOB, &origin->blob_oid); |
072bf432 JS |
240 | |
241 | /* | |
242 | * Read the current index, replace the path entry with | |
243 | * origin->blob_sha1 without mucking with its mode or type | |
244 | * bits; we are not going to write this index out -- we just | |
245 | * want to run "diff-index --cached". | |
246 | */ | |
247 | discard_cache(); | |
248 | read_cache(); | |
249 | ||
250 | len = strlen(path); | |
251 | if (!mode) { | |
252 | int pos = cache_name_pos(path, len); | |
253 | if (0 <= pos) | |
254 | mode = active_cache[pos]->ce_mode; | |
255 | else | |
256 | /* Let's not bother reading from HEAD tree */ | |
257 | mode = S_IFREG | 0644; | |
258 | } | |
259 | size = cache_entry_size(len); | |
260 | ce = xcalloc(1, size); | |
261 | oidcpy(&ce->oid, &origin->blob_oid); | |
262 | memcpy(ce->name, path, len); | |
263 | ce->ce_flags = create_ce_flags(0); | |
264 | ce->ce_namelen = len; | |
265 | ce->ce_mode = create_ce_mode(mode); | |
266 | add_cache_entry(ce, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE); | |
267 | ||
268 | cache_tree_invalidate_path(&the_index, path); | |
269 | ||
270 | return commit; | |
271 | } | |
b543bb1c JS |
272 | |
273 | ||
274 | ||
275 | static int diff_hunks(mmfile_t *file_a, mmfile_t *file_b, | |
276 | xdl_emit_hunk_consume_func_t hunk_func, void *cb_data, int xdl_opts) | |
277 | { | |
278 | xpparam_t xpp = {0}; | |
279 | xdemitconf_t xecfg = {0}; | |
280 | xdemitcb_t ecb = {NULL}; | |
281 | ||
282 | xpp.flags = xdl_opts; | |
283 | xecfg.hunk_func = hunk_func; | |
284 | ecb.priv = cb_data; | |
285 | return xdi_diff(file_a, file_b, &xpp, &xecfg, &ecb); | |
286 | } | |
287 | ||
288 | /* | |
289 | * Given an origin, prepare mmfile_t structure to be used by the | |
290 | * diff machinery | |
291 | */ | |
292 | static void fill_origin_blob(struct diff_options *opt, | |
293 | struct blame_origin *o, mmfile_t *file, int *num_read_blob) | |
294 | { | |
295 | if (!o->file.ptr) { | |
296 | enum object_type type; | |
297 | unsigned long file_size; | |
298 | ||
299 | (*num_read_blob)++; | |
0d1e0e78 | 300 | if (opt->flags.allow_textconv && |
b543bb1c JS |
301 | textconv_object(o->path, o->mode, &o->blob_oid, 1, &file->ptr, &file_size)) |
302 | ; | |
303 | else | |
b4f5aca4 | 304 | file->ptr = read_object_file(&o->blob_oid, &type, |
305 | &file_size); | |
b543bb1c JS |
306 | file->size = file_size; |
307 | ||
308 | if (!file->ptr) | |
309 | die("Cannot read blob %s for path %s", | |
310 | oid_to_hex(&o->blob_oid), | |
311 | o->path); | |
312 | o->file = *file; | |
313 | } | |
314 | else | |
315 | *file = o->file; | |
316 | } | |
317 | ||
318 | static void drop_origin_blob(struct blame_origin *o) | |
319 | { | |
320 | if (o->file.ptr) { | |
e140f7af | 321 | FREE_AND_NULL(o->file.ptr); |
b543bb1c JS |
322 | } |
323 | } | |
324 | ||
325 | /* | |
326 | * Any merge of blames happens on lists of blames that arrived via | |
327 | * different parents in a single suspect. In this case, we want to | |
328 | * sort according to the suspect line numbers as opposed to the final | |
329 | * image line numbers. The function body is somewhat longish because | |
330 | * it avoids unnecessary writes. | |
331 | */ | |
332 | ||
333 | static struct blame_entry *blame_merge(struct blame_entry *list1, | |
334 | struct blame_entry *list2) | |
335 | { | |
336 | struct blame_entry *p1 = list1, *p2 = list2, | |
337 | **tail = &list1; | |
338 | ||
339 | if (!p1) | |
340 | return p2; | |
341 | if (!p2) | |
342 | return p1; | |
343 | ||
344 | if (p1->s_lno <= p2->s_lno) { | |
345 | do { | |
346 | tail = &p1->next; | |
347 | if ((p1 = *tail) == NULL) { | |
348 | *tail = p2; | |
349 | return list1; | |
350 | } | |
351 | } while (p1->s_lno <= p2->s_lno); | |
352 | } | |
353 | for (;;) { | |
354 | *tail = p2; | |
355 | do { | |
356 | tail = &p2->next; | |
357 | if ((p2 = *tail) == NULL) { | |
358 | *tail = p1; | |
359 | return list1; | |
360 | } | |
361 | } while (p1->s_lno > p2->s_lno); | |
362 | *tail = p1; | |
363 | do { | |
364 | tail = &p1->next; | |
365 | if ((p1 = *tail) == NULL) { | |
366 | *tail = p2; | |
367 | return list1; | |
368 | } | |
369 | } while (p1->s_lno <= p2->s_lno); | |
370 | } | |
371 | } | |
372 | ||
373 | static void *get_next_blame(const void *p) | |
374 | { | |
375 | return ((struct blame_entry *)p)->next; | |
376 | } | |
377 | ||
378 | static void set_next_blame(void *p1, void *p2) | |
379 | { | |
380 | ((struct blame_entry *)p1)->next = p2; | |
381 | } | |
382 | ||
383 | /* | |
384 | * Final image line numbers are all different, so we don't need a | |
385 | * three-way comparison here. | |
386 | */ | |
387 | ||
388 | static int compare_blame_final(const void *p1, const void *p2) | |
389 | { | |
390 | return ((struct blame_entry *)p1)->lno > ((struct blame_entry *)p2)->lno | |
391 | ? 1 : -1; | |
392 | } | |
393 | ||
394 | static int compare_blame_suspect(const void *p1, const void *p2) | |
395 | { | |
396 | const struct blame_entry *s1 = p1, *s2 = p2; | |
397 | /* | |
398 | * to allow for collating suspects, we sort according to the | |
399 | * respective pointer value as the primary sorting criterion. | |
400 | * The actual relation is pretty unimportant as long as it | |
401 | * establishes a total order. Comparing as integers gives us | |
402 | * that. | |
403 | */ | |
404 | if (s1->suspect != s2->suspect) | |
405 | return (intptr_t)s1->suspect > (intptr_t)s2->suspect ? 1 : -1; | |
406 | if (s1->s_lno == s2->s_lno) | |
407 | return 0; | |
408 | return s1->s_lno > s2->s_lno ? 1 : -1; | |
409 | } | |
410 | ||
411 | void blame_sort_final(struct blame_scoreboard *sb) | |
412 | { | |
413 | sb->ent = llist_mergesort(sb->ent, get_next_blame, set_next_blame, | |
414 | compare_blame_final); | |
415 | } | |
416 | ||
09002f1b JS |
417 | static int compare_commits_by_reverse_commit_date(const void *a, |
418 | const void *b, | |
419 | void *c) | |
420 | { | |
421 | return -compare_commits_by_commit_date(a, b, c); | |
422 | } | |
423 | ||
b543bb1c JS |
424 | /* |
425 | * For debugging -- origin is refcounted, and this asserts that | |
426 | * we do not underflow. | |
427 | */ | |
428 | static void sanity_check_refcnt(struct blame_scoreboard *sb) | |
429 | { | |
430 | int baa = 0; | |
431 | struct blame_entry *ent; | |
432 | ||
433 | for (ent = sb->ent; ent; ent = ent->next) { | |
434 | /* Nobody should have zero or negative refcnt */ | |
435 | if (ent->suspect->refcnt <= 0) { | |
436 | fprintf(stderr, "%s in %s has negative refcnt %d\n", | |
437 | ent->suspect->path, | |
438 | oid_to_hex(&ent->suspect->commit->object.oid), | |
439 | ent->suspect->refcnt); | |
440 | baa = 1; | |
441 | } | |
442 | } | |
443 | if (baa) | |
444 | sb->on_sanity_fail(sb, baa); | |
445 | } | |
446 | ||
447 | /* | |
448 | * If two blame entries that are next to each other came from | |
449 | * contiguous lines in the same origin (i.e. <commit, path> pair), | |
450 | * merge them together. | |
451 | */ | |
452 | void blame_coalesce(struct blame_scoreboard *sb) | |
453 | { | |
454 | struct blame_entry *ent, *next; | |
455 | ||
456 | for (ent = sb->ent; ent && (next = ent->next); ent = next) { | |
457 | if (ent->suspect == next->suspect && | |
458 | ent->s_lno + ent->num_lines == next->s_lno) { | |
459 | ent->num_lines += next->num_lines; | |
460 | ent->next = next->next; | |
461 | blame_origin_decref(next->suspect); | |
462 | free(next); | |
463 | ent->score = 0; | |
464 | next = ent; /* again */ | |
465 | } | |
466 | } | |
467 | ||
468 | if (sb->debug) /* sanity */ | |
469 | sanity_check_refcnt(sb); | |
470 | } | |
471 | ||
472 | /* | |
473 | * Merge the given sorted list of blames into a preexisting origin. | |
474 | * If there were no previous blames to that commit, it is entered into | |
475 | * the commit priority queue of the score board. | |
476 | */ | |
477 | ||
478 | static void queue_blames(struct blame_scoreboard *sb, struct blame_origin *porigin, | |
479 | struct blame_entry *sorted) | |
480 | { | |
481 | if (porigin->suspects) | |
482 | porigin->suspects = blame_merge(porigin->suspects, sorted); | |
483 | else { | |
484 | struct blame_origin *o; | |
485 | for (o = porigin->commit->util; o; o = o->next) { | |
486 | if (o->suspects) { | |
487 | porigin->suspects = sorted; | |
488 | return; | |
489 | } | |
490 | } | |
491 | porigin->suspects = sorted; | |
492 | prio_queue_put(&sb->commits, porigin->commit); | |
493 | } | |
494 | } | |
495 | ||
09002f1b JS |
496 | /* |
497 | * Fill the blob_sha1 field of an origin if it hasn't, so that later | |
498 | * call to fill_origin_blob() can use it to locate the data. blob_sha1 | |
499 | * for an origin is also used to pass the blame for the entire file to | |
500 | * the parent to detect the case where a child's blob is identical to | |
501 | * that of its parent's. | |
502 | * | |
503 | * This also fills origin->mode for corresponding tree path. | |
504 | */ | |
505 | static int fill_blob_sha1_and_mode(struct blame_origin *origin) | |
506 | { | |
507 | if (!is_null_oid(&origin->blob_oid)) | |
508 | return 0; | |
916bc35b | 509 | if (get_tree_entry(&origin->commit->object.oid, origin->path, &origin->blob_oid, &origin->mode)) |
09002f1b | 510 | goto error_out; |
0df8e965 | 511 | if (oid_object_info(the_repository, &origin->blob_oid, NULL) != OBJ_BLOB) |
09002f1b JS |
512 | goto error_out; |
513 | return 0; | |
514 | error_out: | |
515 | oidclr(&origin->blob_oid); | |
516 | origin->mode = S_IFINVALID; | |
517 | return -1; | |
518 | } | |
519 | ||
b543bb1c JS |
520 | /* |
521 | * We have an origin -- check if the same path exists in the | |
522 | * parent and return an origin structure to represent it. | |
523 | */ | |
524 | static struct blame_origin *find_origin(struct commit *parent, | |
525 | struct blame_origin *origin) | |
526 | { | |
527 | struct blame_origin *porigin; | |
528 | struct diff_options diff_opts; | |
529 | const char *paths[2]; | |
530 | ||
531 | /* First check any existing origins */ | |
532 | for (porigin = parent->util; porigin; porigin = porigin->next) | |
533 | if (!strcmp(porigin->path, origin->path)) { | |
534 | /* | |
535 | * The same path between origin and its parent | |
536 | * without renaming -- the most common case. | |
537 | */ | |
538 | return blame_origin_incref (porigin); | |
539 | } | |
540 | ||
541 | /* See if the origin->path is different between parent | |
542 | * and origin first. Most of the time they are the | |
543 | * same and diff-tree is fairly efficient about this. | |
544 | */ | |
545 | diff_setup(&diff_opts); | |
0d1e0e78 | 546 | diff_opts.flags.recursive = 1; |
b543bb1c JS |
547 | diff_opts.detect_rename = 0; |
548 | diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; | |
549 | paths[0] = origin->path; | |
550 | paths[1] = NULL; | |
551 | ||
552 | parse_pathspec(&diff_opts.pathspec, | |
553 | PATHSPEC_ALL_MAGIC & ~PATHSPEC_LITERAL, | |
554 | PATHSPEC_LITERAL_PATH, "", paths); | |
555 | diff_setup_done(&diff_opts); | |
556 | ||
557 | if (is_null_oid(&origin->commit->object.oid)) | |
583c6a22 | 558 | do_diff_cache(&parent->tree->object.oid, &diff_opts); |
b543bb1c | 559 | else |
a6f38c10 JH |
560 | diff_tree_oid(&parent->tree->object.oid, |
561 | &origin->commit->tree->object.oid, | |
562 | "", &diff_opts); | |
b543bb1c JS |
563 | diffcore_std(&diff_opts); |
564 | ||
565 | if (!diff_queued_diff.nr) { | |
566 | /* The path is the same as parent */ | |
567 | porigin = get_origin(parent, origin->path); | |
568 | oidcpy(&porigin->blob_oid, &origin->blob_oid); | |
569 | porigin->mode = origin->mode; | |
570 | } else { | |
571 | /* | |
572 | * Since origin->path is a pathspec, if the parent | |
573 | * commit had it as a directory, we will see a whole | |
574 | * bunch of deletion of files in the directory that we | |
575 | * do not care about. | |
576 | */ | |
577 | int i; | |
578 | struct diff_filepair *p = NULL; | |
579 | for (i = 0; i < diff_queued_diff.nr; i++) { | |
580 | const char *name; | |
581 | p = diff_queued_diff.queue[i]; | |
582 | name = p->one->path ? p->one->path : p->two->path; | |
583 | if (!strcmp(name, origin->path)) | |
584 | break; | |
585 | } | |
586 | if (!p) | |
587 | die("internal error in blame::find_origin"); | |
588 | switch (p->status) { | |
589 | default: | |
590 | die("internal error in blame::find_origin (%c)", | |
591 | p->status); | |
592 | case 'M': | |
593 | porigin = get_origin(parent, origin->path); | |
594 | oidcpy(&porigin->blob_oid, &p->one->oid); | |
595 | porigin->mode = p->one->mode; | |
596 | break; | |
597 | case 'A': | |
598 | case 'T': | |
599 | /* Did not exist in parent, or type changed */ | |
600 | break; | |
601 | } | |
602 | } | |
603 | diff_flush(&diff_opts); | |
604 | clear_pathspec(&diff_opts.pathspec); | |
605 | return porigin; | |
606 | } | |
607 | ||
608 | /* | |
609 | * We have an origin -- find the path that corresponds to it in its | |
610 | * parent and return an origin structure to represent it. | |
611 | */ | |
612 | static struct blame_origin *find_rename(struct commit *parent, | |
613 | struct blame_origin *origin) | |
614 | { | |
615 | struct blame_origin *porigin = NULL; | |
616 | struct diff_options diff_opts; | |
617 | int i; | |
618 | ||
619 | diff_setup(&diff_opts); | |
0d1e0e78 | 620 | diff_opts.flags.recursive = 1; |
b543bb1c JS |
621 | diff_opts.detect_rename = DIFF_DETECT_RENAME; |
622 | diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; | |
623 | diff_opts.single_follow = origin->path; | |
624 | diff_setup_done(&diff_opts); | |
625 | ||
626 | if (is_null_oid(&origin->commit->object.oid)) | |
583c6a22 | 627 | do_diff_cache(&parent->tree->object.oid, &diff_opts); |
b543bb1c | 628 | else |
a6f38c10 JH |
629 | diff_tree_oid(&parent->tree->object.oid, |
630 | &origin->commit->tree->object.oid, | |
631 | "", &diff_opts); | |
b543bb1c JS |
632 | diffcore_std(&diff_opts); |
633 | ||
634 | for (i = 0; i < diff_queued_diff.nr; i++) { | |
635 | struct diff_filepair *p = diff_queued_diff.queue[i]; | |
636 | if ((p->status == 'R' || p->status == 'C') && | |
637 | !strcmp(p->two->path, origin->path)) { | |
638 | porigin = get_origin(parent, p->one->path); | |
639 | oidcpy(&porigin->blob_oid, &p->one->oid); | |
640 | porigin->mode = p->one->mode; | |
641 | break; | |
642 | } | |
643 | } | |
644 | diff_flush(&diff_opts); | |
645 | clear_pathspec(&diff_opts.pathspec); | |
646 | return porigin; | |
647 | } | |
648 | ||
649 | /* | |
650 | * Append a new blame entry to a given output queue. | |
651 | */ | |
652 | static void add_blame_entry(struct blame_entry ***queue, | |
653 | const struct blame_entry *src) | |
654 | { | |
655 | struct blame_entry *e = xmalloc(sizeof(*e)); | |
656 | memcpy(e, src, sizeof(*e)); | |
657 | blame_origin_incref(e->suspect); | |
658 | ||
659 | e->next = **queue; | |
660 | **queue = e; | |
661 | *queue = &e->next; | |
662 | } | |
663 | ||
664 | /* | |
665 | * src typically is on-stack; we want to copy the information in it to | |
666 | * a malloced blame_entry that gets added to the given queue. The | |
667 | * origin of dst loses a refcnt. | |
668 | */ | |
669 | static void dup_entry(struct blame_entry ***queue, | |
670 | struct blame_entry *dst, struct blame_entry *src) | |
671 | { | |
672 | blame_origin_incref(src->suspect); | |
673 | blame_origin_decref(dst->suspect); | |
674 | memcpy(dst, src, sizeof(*src)); | |
675 | dst->next = **queue; | |
676 | **queue = dst; | |
677 | *queue = &dst->next; | |
678 | } | |
679 | ||
680 | const char *blame_nth_line(struct blame_scoreboard *sb, long lno) | |
681 | { | |
682 | return sb->final_buf + sb->lineno[lno]; | |
683 | } | |
684 | ||
685 | /* | |
686 | * It is known that lines between tlno to same came from parent, and e | |
687 | * has an overlap with that range. it also is known that parent's | |
688 | * line plno corresponds to e's line tlno. | |
689 | * | |
690 | * <---- e -----> | |
691 | * <------> | |
692 | * <------------> | |
693 | * <------------> | |
694 | * <------------------> | |
695 | * | |
696 | * Split e into potentially three parts; before this chunk, the chunk | |
697 | * to be blamed for the parent, and after that portion. | |
698 | */ | |
699 | static void split_overlap(struct blame_entry *split, | |
700 | struct blame_entry *e, | |
701 | int tlno, int plno, int same, | |
702 | struct blame_origin *parent) | |
703 | { | |
704 | int chunk_end_lno; | |
705 | memset(split, 0, sizeof(struct blame_entry [3])); | |
706 | ||
707 | if (e->s_lno < tlno) { | |
708 | /* there is a pre-chunk part not blamed on parent */ | |
709 | split[0].suspect = blame_origin_incref(e->suspect); | |
710 | split[0].lno = e->lno; | |
711 | split[0].s_lno = e->s_lno; | |
712 | split[0].num_lines = tlno - e->s_lno; | |
713 | split[1].lno = e->lno + tlno - e->s_lno; | |
714 | split[1].s_lno = plno; | |
715 | } | |
716 | else { | |
717 | split[1].lno = e->lno; | |
718 | split[1].s_lno = plno + (e->s_lno - tlno); | |
719 | } | |
720 | ||
721 | if (same < e->s_lno + e->num_lines) { | |
722 | /* there is a post-chunk part not blamed on parent */ | |
723 | split[2].suspect = blame_origin_incref(e->suspect); | |
724 | split[2].lno = e->lno + (same - e->s_lno); | |
725 | split[2].s_lno = e->s_lno + (same - e->s_lno); | |
726 | split[2].num_lines = e->s_lno + e->num_lines - same; | |
727 | chunk_end_lno = split[2].lno; | |
728 | } | |
729 | else | |
730 | chunk_end_lno = e->lno + e->num_lines; | |
731 | split[1].num_lines = chunk_end_lno - split[1].lno; | |
732 | ||
733 | /* | |
734 | * if it turns out there is nothing to blame the parent for, | |
735 | * forget about the splitting. !split[1].suspect signals this. | |
736 | */ | |
737 | if (split[1].num_lines < 1) | |
738 | return; | |
739 | split[1].suspect = blame_origin_incref(parent); | |
740 | } | |
741 | ||
742 | /* | |
743 | * split_overlap() divided an existing blame e into up to three parts | |
744 | * in split. Any assigned blame is moved to queue to | |
745 | * reflect the split. | |
746 | */ | |
747 | static void split_blame(struct blame_entry ***blamed, | |
748 | struct blame_entry ***unblamed, | |
749 | struct blame_entry *split, | |
750 | struct blame_entry *e) | |
751 | { | |
752 | if (split[0].suspect && split[2].suspect) { | |
753 | /* The first part (reuse storage for the existing entry e) */ | |
754 | dup_entry(unblamed, e, &split[0]); | |
755 | ||
756 | /* The last part -- me */ | |
757 | add_blame_entry(unblamed, &split[2]); | |
758 | ||
759 | /* ... and the middle part -- parent */ | |
760 | add_blame_entry(blamed, &split[1]); | |
761 | } | |
762 | else if (!split[0].suspect && !split[2].suspect) | |
763 | /* | |
764 | * The parent covers the entire area; reuse storage for | |
765 | * e and replace it with the parent. | |
766 | */ | |
767 | dup_entry(blamed, e, &split[1]); | |
768 | else if (split[0].suspect) { | |
769 | /* me and then parent */ | |
770 | dup_entry(unblamed, e, &split[0]); | |
771 | add_blame_entry(blamed, &split[1]); | |
772 | } | |
773 | else { | |
774 | /* parent and then me */ | |
775 | dup_entry(blamed, e, &split[1]); | |
776 | add_blame_entry(unblamed, &split[2]); | |
777 | } | |
778 | } | |
779 | ||
780 | /* | |
781 | * After splitting the blame, the origins used by the | |
782 | * on-stack blame_entry should lose one refcnt each. | |
783 | */ | |
784 | static void decref_split(struct blame_entry *split) | |
785 | { | |
786 | int i; | |
787 | ||
788 | for (i = 0; i < 3; i++) | |
789 | blame_origin_decref(split[i].suspect); | |
790 | } | |
791 | ||
792 | /* | |
793 | * reverse_blame reverses the list given in head, appending tail. | |
794 | * That allows us to build lists in reverse order, then reverse them | |
795 | * afterwards. This can be faster than building the list in proper | |
796 | * order right away. The reason is that building in proper order | |
797 | * requires writing a link in the _previous_ element, while building | |
798 | * in reverse order just requires placing the list head into the | |
799 | * _current_ element. | |
800 | */ | |
801 | ||
802 | static struct blame_entry *reverse_blame(struct blame_entry *head, | |
803 | struct blame_entry *tail) | |
804 | { | |
805 | while (head) { | |
806 | struct blame_entry *next = head->next; | |
807 | head->next = tail; | |
808 | tail = head; | |
809 | head = next; | |
810 | } | |
811 | return tail; | |
812 | } | |
813 | ||
814 | /* | |
815 | * Process one hunk from the patch between the current suspect for | |
816 | * blame_entry e and its parent. This first blames any unfinished | |
817 | * entries before the chunk (which is where target and parent start | |
818 | * differing) on the parent, and then splits blame entries at the | |
819 | * start and at the end of the difference region. Since use of -M and | |
820 | * -C options may lead to overlapping/duplicate source line number | |
821 | * ranges, all we can rely on from sorting/merging is the order of the | |
822 | * first suspect line number. | |
823 | */ | |
824 | static void blame_chunk(struct blame_entry ***dstq, struct blame_entry ***srcq, | |
825 | int tlno, int offset, int same, | |
826 | struct blame_origin *parent) | |
827 | { | |
828 | struct blame_entry *e = **srcq; | |
829 | struct blame_entry *samep = NULL, *diffp = NULL; | |
830 | ||
831 | while (e && e->s_lno < tlno) { | |
832 | struct blame_entry *next = e->next; | |
833 | /* | |
834 | * current record starts before differing portion. If | |
835 | * it reaches into it, we need to split it up and | |
836 | * examine the second part separately. | |
837 | */ | |
838 | if (e->s_lno + e->num_lines > tlno) { | |
839 | /* Move second half to a new record */ | |
840 | int len = tlno - e->s_lno; | |
841 | struct blame_entry *n = xcalloc(1, sizeof (struct blame_entry)); | |
842 | n->suspect = e->suspect; | |
843 | n->lno = e->lno + len; | |
844 | n->s_lno = e->s_lno + len; | |
845 | n->num_lines = e->num_lines - len; | |
846 | e->num_lines = len; | |
847 | e->score = 0; | |
848 | /* Push new record to diffp */ | |
849 | n->next = diffp; | |
850 | diffp = n; | |
851 | } else | |
852 | blame_origin_decref(e->suspect); | |
853 | /* Pass blame for everything before the differing | |
854 | * chunk to the parent */ | |
855 | e->suspect = blame_origin_incref(parent); | |
856 | e->s_lno += offset; | |
857 | e->next = samep; | |
858 | samep = e; | |
859 | e = next; | |
860 | } | |
861 | /* | |
862 | * As we don't know how much of a common stretch after this | |
863 | * diff will occur, the currently blamed parts are all that we | |
864 | * can assign to the parent for now. | |
865 | */ | |
866 | ||
867 | if (samep) { | |
868 | **dstq = reverse_blame(samep, **dstq); | |
869 | *dstq = &samep->next; | |
870 | } | |
871 | /* | |
872 | * Prepend the split off portions: everything after e starts | |
873 | * after the blameable portion. | |
874 | */ | |
875 | e = reverse_blame(diffp, e); | |
876 | ||
877 | /* | |
878 | * Now retain records on the target while parts are different | |
879 | * from the parent. | |
880 | */ | |
881 | samep = NULL; | |
882 | diffp = NULL; | |
883 | while (e && e->s_lno < same) { | |
884 | struct blame_entry *next = e->next; | |
885 | ||
886 | /* | |
887 | * If current record extends into sameness, need to split. | |
888 | */ | |
889 | if (e->s_lno + e->num_lines > same) { | |
890 | /* | |
891 | * Move second half to a new record to be | |
892 | * processed by later chunks | |
893 | */ | |
894 | int len = same - e->s_lno; | |
895 | struct blame_entry *n = xcalloc(1, sizeof (struct blame_entry)); | |
896 | n->suspect = blame_origin_incref(e->suspect); | |
897 | n->lno = e->lno + len; | |
898 | n->s_lno = e->s_lno + len; | |
899 | n->num_lines = e->num_lines - len; | |
900 | e->num_lines = len; | |
901 | e->score = 0; | |
902 | /* Push new record to samep */ | |
903 | n->next = samep; | |
904 | samep = n; | |
905 | } | |
906 | e->next = diffp; | |
907 | diffp = e; | |
908 | e = next; | |
909 | } | |
910 | **srcq = reverse_blame(diffp, reverse_blame(samep, e)); | |
911 | /* Move across elements that are in the unblamable portion */ | |
912 | if (diffp) | |
913 | *srcq = &diffp->next; | |
914 | } | |
915 | ||
916 | struct blame_chunk_cb_data { | |
917 | struct blame_origin *parent; | |
918 | long offset; | |
919 | struct blame_entry **dstq; | |
920 | struct blame_entry **srcq; | |
921 | }; | |
922 | ||
923 | /* diff chunks are from parent to target */ | |
924 | static int blame_chunk_cb(long start_a, long count_a, | |
925 | long start_b, long count_b, void *data) | |
926 | { | |
927 | struct blame_chunk_cb_data *d = data; | |
928 | if (start_a - start_b != d->offset) | |
929 | die("internal error in blame::blame_chunk_cb"); | |
930 | blame_chunk(&d->dstq, &d->srcq, start_b, start_a - start_b, | |
931 | start_b + count_b, d->parent); | |
932 | d->offset = start_a + count_a - (start_b + count_b); | |
933 | return 0; | |
934 | } | |
935 | ||
936 | /* | |
937 | * We are looking at the origin 'target' and aiming to pass blame | |
938 | * for the lines it is suspected to its parent. Run diff to find | |
939 | * which lines came from parent and pass blame for them. | |
940 | */ | |
941 | static void pass_blame_to_parent(struct blame_scoreboard *sb, | |
942 | struct blame_origin *target, | |
943 | struct blame_origin *parent) | |
944 | { | |
945 | mmfile_t file_p, file_o; | |
946 | struct blame_chunk_cb_data d; | |
947 | struct blame_entry *newdest = NULL; | |
948 | ||
949 | if (!target->suspects) | |
950 | return; /* nothing remains for this target */ | |
951 | ||
952 | d.parent = parent; | |
953 | d.offset = 0; | |
954 | d.dstq = &newdest; d.srcq = &target->suspects; | |
955 | ||
956 | fill_origin_blob(&sb->revs->diffopt, parent, &file_p, &sb->num_read_blob); | |
957 | fill_origin_blob(&sb->revs->diffopt, target, &file_o, &sb->num_read_blob); | |
958 | sb->num_get_patch++; | |
959 | ||
960 | if (diff_hunks(&file_p, &file_o, blame_chunk_cb, &d, sb->xdl_opts)) | |
961 | die("unable to generate diff (%s -> %s)", | |
962 | oid_to_hex(&parent->commit->object.oid), | |
963 | oid_to_hex(&target->commit->object.oid)); | |
964 | /* The rest are the same as the parent */ | |
965 | blame_chunk(&d.dstq, &d.srcq, INT_MAX, d.offset, INT_MAX, parent); | |
966 | *d.dstq = NULL; | |
967 | queue_blames(sb, parent, newdest); | |
968 | ||
969 | return; | |
970 | } | |
971 | ||
972 | /* | |
973 | * The lines in blame_entry after splitting blames many times can become | |
974 | * very small and trivial, and at some point it becomes pointless to | |
975 | * blame the parents. E.g. "\t\t}\n\t}\n\n" appears everywhere in any | |
976 | * ordinary C program, and it is not worth to say it was copied from | |
977 | * totally unrelated file in the parent. | |
978 | * | |
979 | * Compute how trivial the lines in the blame_entry are. | |
980 | */ | |
981 | unsigned blame_entry_score(struct blame_scoreboard *sb, struct blame_entry *e) | |
982 | { | |
983 | unsigned score; | |
984 | const char *cp, *ep; | |
985 | ||
986 | if (e->score) | |
987 | return e->score; | |
988 | ||
989 | score = 1; | |
990 | cp = blame_nth_line(sb, e->lno); | |
991 | ep = blame_nth_line(sb, e->lno + e->num_lines); | |
992 | while (cp < ep) { | |
993 | unsigned ch = *((unsigned char *)cp); | |
994 | if (isalnum(ch)) | |
995 | score++; | |
996 | cp++; | |
997 | } | |
998 | e->score = score; | |
999 | return score; | |
1000 | } | |
1001 | ||
1002 | /* | |
abeacb25 BW |
1003 | * best_so_far[] and potential[] are both a split of an existing blame_entry |
1004 | * that passes blame to the parent. Maintain best_so_far the best split so | |
1005 | * far, by comparing potential and best_so_far and copying potential into | |
b543bb1c JS |
1006 | * bst_so_far as needed. |
1007 | */ | |
1008 | static void copy_split_if_better(struct blame_scoreboard *sb, | |
1009 | struct blame_entry *best_so_far, | |
abeacb25 | 1010 | struct blame_entry *potential) |
b543bb1c JS |
1011 | { |
1012 | int i; | |
1013 | ||
abeacb25 | 1014 | if (!potential[1].suspect) |
b543bb1c JS |
1015 | return; |
1016 | if (best_so_far[1].suspect) { | |
abeacb25 BW |
1017 | if (blame_entry_score(sb, &potential[1]) < |
1018 | blame_entry_score(sb, &best_so_far[1])) | |
b543bb1c JS |
1019 | return; |
1020 | } | |
1021 | ||
1022 | for (i = 0; i < 3; i++) | |
abeacb25 | 1023 | blame_origin_incref(potential[i].suspect); |
b543bb1c | 1024 | decref_split(best_so_far); |
abeacb25 | 1025 | memcpy(best_so_far, potential, sizeof(struct blame_entry[3])); |
b543bb1c JS |
1026 | } |
1027 | ||
1028 | /* | |
1029 | * We are looking at a part of the final image represented by | |
1030 | * ent (tlno and same are offset by ent->s_lno). | |
1031 | * tlno is where we are looking at in the final image. | |
1032 | * up to (but not including) same match preimage. | |
1033 | * plno is where we are looking at in the preimage. | |
1034 | * | |
1035 | * <-------------- final image ----------------------> | |
1036 | * <------ent------> | |
1037 | * ^tlno ^same | |
1038 | * <---------preimage-----> | |
1039 | * ^plno | |
1040 | * | |
1041 | * All line numbers are 0-based. | |
1042 | */ | |
1043 | static void handle_split(struct blame_scoreboard *sb, | |
1044 | struct blame_entry *ent, | |
1045 | int tlno, int plno, int same, | |
1046 | struct blame_origin *parent, | |
1047 | struct blame_entry *split) | |
1048 | { | |
1049 | if (ent->num_lines <= tlno) | |
1050 | return; | |
1051 | if (tlno < same) { | |
abeacb25 | 1052 | struct blame_entry potential[3]; |
b543bb1c JS |
1053 | tlno += ent->s_lno; |
1054 | same += ent->s_lno; | |
abeacb25 BW |
1055 | split_overlap(potential, ent, tlno, plno, same, parent); |
1056 | copy_split_if_better(sb, split, potential); | |
1057 | decref_split(potential); | |
b543bb1c JS |
1058 | } |
1059 | } | |
1060 | ||
1061 | struct handle_split_cb_data { | |
1062 | struct blame_scoreboard *sb; | |
1063 | struct blame_entry *ent; | |
1064 | struct blame_origin *parent; | |
1065 | struct blame_entry *split; | |
1066 | long plno; | |
1067 | long tlno; | |
1068 | }; | |
1069 | ||
1070 | static int handle_split_cb(long start_a, long count_a, | |
1071 | long start_b, long count_b, void *data) | |
1072 | { | |
1073 | struct handle_split_cb_data *d = data; | |
1074 | handle_split(d->sb, d->ent, d->tlno, d->plno, start_b, d->parent, | |
1075 | d->split); | |
1076 | d->plno = start_a + count_a; | |
1077 | d->tlno = start_b + count_b; | |
1078 | return 0; | |
1079 | } | |
1080 | ||
1081 | /* | |
1082 | * Find the lines from parent that are the same as ent so that | |
1083 | * we can pass blames to it. file_p has the blob contents for | |
1084 | * the parent. | |
1085 | */ | |
1086 | static void find_copy_in_blob(struct blame_scoreboard *sb, | |
1087 | struct blame_entry *ent, | |
1088 | struct blame_origin *parent, | |
1089 | struct blame_entry *split, | |
1090 | mmfile_t *file_p) | |
1091 | { | |
1092 | const char *cp; | |
1093 | mmfile_t file_o; | |
1094 | struct handle_split_cb_data d; | |
1095 | ||
1096 | memset(&d, 0, sizeof(d)); | |
1097 | d.sb = sb; d.ent = ent; d.parent = parent; d.split = split; | |
1098 | /* | |
1099 | * Prepare mmfile that contains only the lines in ent. | |
1100 | */ | |
1101 | cp = blame_nth_line(sb, ent->lno); | |
1102 | file_o.ptr = (char *) cp; | |
1103 | file_o.size = blame_nth_line(sb, ent->lno + ent->num_lines) - cp; | |
1104 | ||
1105 | /* | |
1106 | * file_o is a part of final image we are annotating. | |
1107 | * file_p partially may match that image. | |
1108 | */ | |
1109 | memset(split, 0, sizeof(struct blame_entry [3])); | |
1110 | if (diff_hunks(file_p, &file_o, handle_split_cb, &d, sb->xdl_opts)) | |
1111 | die("unable to generate diff (%s)", | |
1112 | oid_to_hex(&parent->commit->object.oid)); | |
1113 | /* remainder, if any, all match the preimage */ | |
1114 | handle_split(sb, ent, d.tlno, d.plno, ent->num_lines, parent, split); | |
1115 | } | |
1116 | ||
1117 | /* Move all blame entries from list *source that have a score smaller | |
1118 | * than score_min to the front of list *small. | |
1119 | * Returns a pointer to the link pointing to the old head of the small list. | |
1120 | */ | |
1121 | ||
1122 | static struct blame_entry **filter_small(struct blame_scoreboard *sb, | |
1123 | struct blame_entry **small, | |
1124 | struct blame_entry **source, | |
1125 | unsigned score_min) | |
1126 | { | |
1127 | struct blame_entry *p = *source; | |
1128 | struct blame_entry *oldsmall = *small; | |
1129 | while (p) { | |
1130 | if (blame_entry_score(sb, p) <= score_min) { | |
1131 | *small = p; | |
1132 | small = &p->next; | |
1133 | p = *small; | |
1134 | } else { | |
1135 | *source = p; | |
1136 | source = &p->next; | |
1137 | p = *source; | |
1138 | } | |
1139 | } | |
1140 | *small = oldsmall; | |
1141 | *source = NULL; | |
1142 | return small; | |
1143 | } | |
1144 | ||
1145 | /* | |
1146 | * See if lines currently target is suspected for can be attributed to | |
1147 | * parent. | |
1148 | */ | |
1149 | static void find_move_in_parent(struct blame_scoreboard *sb, | |
1150 | struct blame_entry ***blamed, | |
1151 | struct blame_entry **toosmall, | |
1152 | struct blame_origin *target, | |
1153 | struct blame_origin *parent) | |
1154 | { | |
1155 | struct blame_entry *e, split[3]; | |
1156 | struct blame_entry *unblamed = target->suspects; | |
1157 | struct blame_entry *leftover = NULL; | |
1158 | mmfile_t file_p; | |
1159 | ||
1160 | if (!unblamed) | |
1161 | return; /* nothing remains for this target */ | |
1162 | ||
1163 | fill_origin_blob(&sb->revs->diffopt, parent, &file_p, &sb->num_read_blob); | |
1164 | if (!file_p.ptr) | |
1165 | return; | |
1166 | ||
1167 | /* At each iteration, unblamed has a NULL-terminated list of | |
1168 | * entries that have not yet been tested for blame. leftover | |
1169 | * contains the reversed list of entries that have been tested | |
1170 | * without being assignable to the parent. | |
1171 | */ | |
1172 | do { | |
1173 | struct blame_entry **unblamedtail = &unblamed; | |
1174 | struct blame_entry *next; | |
1175 | for (e = unblamed; e; e = next) { | |
1176 | next = e->next; | |
1177 | find_copy_in_blob(sb, e, parent, split, &file_p); | |
1178 | if (split[1].suspect && | |
1179 | sb->move_score < blame_entry_score(sb, &split[1])) { | |
1180 | split_blame(blamed, &unblamedtail, split, e); | |
1181 | } else { | |
1182 | e->next = leftover; | |
1183 | leftover = e; | |
1184 | } | |
1185 | decref_split(split); | |
1186 | } | |
1187 | *unblamedtail = NULL; | |
1188 | toosmall = filter_small(sb, toosmall, &unblamed, sb->move_score); | |
1189 | } while (unblamed); | |
1190 | target->suspects = reverse_blame(leftover, NULL); | |
1191 | } | |
1192 | ||
1193 | struct blame_list { | |
1194 | struct blame_entry *ent; | |
1195 | struct blame_entry split[3]; | |
1196 | }; | |
1197 | ||
1198 | /* | |
1199 | * Count the number of entries the target is suspected for, | |
1200 | * and prepare a list of entry and the best split. | |
1201 | */ | |
1202 | static struct blame_list *setup_blame_list(struct blame_entry *unblamed, | |
1203 | int *num_ents_p) | |
1204 | { | |
1205 | struct blame_entry *e; | |
1206 | int num_ents, i; | |
1207 | struct blame_list *blame_list = NULL; | |
1208 | ||
1209 | for (e = unblamed, num_ents = 0; e; e = e->next) | |
1210 | num_ents++; | |
1211 | if (num_ents) { | |
1212 | blame_list = xcalloc(num_ents, sizeof(struct blame_list)); | |
1213 | for (e = unblamed, i = 0; e; e = e->next) | |
1214 | blame_list[i++].ent = e; | |
1215 | } | |
1216 | *num_ents_p = num_ents; | |
1217 | return blame_list; | |
1218 | } | |
1219 | ||
1220 | /* | |
1221 | * For lines target is suspected for, see if we can find code movement | |
1222 | * across file boundary from the parent commit. porigin is the path | |
1223 | * in the parent we already tried. | |
1224 | */ | |
1225 | static void find_copy_in_parent(struct blame_scoreboard *sb, | |
1226 | struct blame_entry ***blamed, | |
1227 | struct blame_entry **toosmall, | |
1228 | struct blame_origin *target, | |
1229 | struct commit *parent, | |
1230 | struct blame_origin *porigin, | |
1231 | int opt) | |
1232 | { | |
1233 | struct diff_options diff_opts; | |
1234 | int i, j; | |
1235 | struct blame_list *blame_list; | |
1236 | int num_ents; | |
1237 | struct blame_entry *unblamed = target->suspects; | |
1238 | struct blame_entry *leftover = NULL; | |
1239 | ||
1240 | if (!unblamed) | |
1241 | return; /* nothing remains for this target */ | |
1242 | ||
1243 | diff_setup(&diff_opts); | |
0d1e0e78 | 1244 | diff_opts.flags.recursive = 1; |
b543bb1c JS |
1245 | diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; |
1246 | ||
1247 | diff_setup_done(&diff_opts); | |
1248 | ||
1249 | /* Try "find copies harder" on new path if requested; | |
1250 | * we do not want to use diffcore_rename() actually to | |
1251 | * match things up; find_copies_harder is set only to | |
a6f38c10 | 1252 | * force diff_tree_oid() to feed all filepairs to diff_queue, |
b543bb1c JS |
1253 | * and this code needs to be after diff_setup_done(), which |
1254 | * usually makes find-copies-harder imply copy detection. | |
1255 | */ | |
1256 | if ((opt & PICKAXE_BLAME_COPY_HARDEST) | |
1257 | || ((opt & PICKAXE_BLAME_COPY_HARDER) | |
1258 | && (!porigin || strcmp(target->path, porigin->path)))) | |
0d1e0e78 | 1259 | diff_opts.flags.find_copies_harder = 1; |
b543bb1c JS |
1260 | |
1261 | if (is_null_oid(&target->commit->object.oid)) | |
583c6a22 | 1262 | do_diff_cache(&parent->tree->object.oid, &diff_opts); |
b543bb1c | 1263 | else |
a6f38c10 JH |
1264 | diff_tree_oid(&parent->tree->object.oid, |
1265 | &target->commit->tree->object.oid, | |
1266 | "", &diff_opts); | |
b543bb1c | 1267 | |
0d1e0e78 | 1268 | if (!diff_opts.flags.find_copies_harder) |
b543bb1c JS |
1269 | diffcore_std(&diff_opts); |
1270 | ||
1271 | do { | |
1272 | struct blame_entry **unblamedtail = &unblamed; | |
1273 | blame_list = setup_blame_list(unblamed, &num_ents); | |
1274 | ||
1275 | for (i = 0; i < diff_queued_diff.nr; i++) { | |
1276 | struct diff_filepair *p = diff_queued_diff.queue[i]; | |
1277 | struct blame_origin *norigin; | |
1278 | mmfile_t file_p; | |
abeacb25 | 1279 | struct blame_entry potential[3]; |
b543bb1c JS |
1280 | |
1281 | if (!DIFF_FILE_VALID(p->one)) | |
1282 | continue; /* does not exist in parent */ | |
1283 | if (S_ISGITLINK(p->one->mode)) | |
1284 | continue; /* ignore git links */ | |
1285 | if (porigin && !strcmp(p->one->path, porigin->path)) | |
1286 | /* find_move already dealt with this path */ | |
1287 | continue; | |
1288 | ||
1289 | norigin = get_origin(parent, p->one->path); | |
1290 | oidcpy(&norigin->blob_oid, &p->one->oid); | |
1291 | norigin->mode = p->one->mode; | |
1292 | fill_origin_blob(&sb->revs->diffopt, norigin, &file_p, &sb->num_read_blob); | |
1293 | if (!file_p.ptr) | |
1294 | continue; | |
1295 | ||
1296 | for (j = 0; j < num_ents; j++) { | |
1297 | find_copy_in_blob(sb, blame_list[j].ent, | |
abeacb25 | 1298 | norigin, potential, &file_p); |
b543bb1c | 1299 | copy_split_if_better(sb, blame_list[j].split, |
abeacb25 BW |
1300 | potential); |
1301 | decref_split(potential); | |
b543bb1c JS |
1302 | } |
1303 | blame_origin_decref(norigin); | |
1304 | } | |
1305 | ||
1306 | for (j = 0; j < num_ents; j++) { | |
1307 | struct blame_entry *split = blame_list[j].split; | |
1308 | if (split[1].suspect && | |
1309 | sb->copy_score < blame_entry_score(sb, &split[1])) { | |
1310 | split_blame(blamed, &unblamedtail, split, | |
1311 | blame_list[j].ent); | |
1312 | } else { | |
1313 | blame_list[j].ent->next = leftover; | |
1314 | leftover = blame_list[j].ent; | |
1315 | } | |
1316 | decref_split(split); | |
1317 | } | |
1318 | free(blame_list); | |
1319 | *unblamedtail = NULL; | |
1320 | toosmall = filter_small(sb, toosmall, &unblamed, sb->copy_score); | |
1321 | } while (unblamed); | |
1322 | target->suspects = reverse_blame(leftover, NULL); | |
1323 | diff_flush(&diff_opts); | |
1324 | clear_pathspec(&diff_opts.pathspec); | |
1325 | } | |
1326 | ||
1327 | /* | |
1328 | * The blobs of origin and porigin exactly match, so everything | |
1329 | * origin is suspected for can be blamed on the parent. | |
1330 | */ | |
1331 | static void pass_whole_blame(struct blame_scoreboard *sb, | |
1332 | struct blame_origin *origin, struct blame_origin *porigin) | |
1333 | { | |
1334 | struct blame_entry *e, *suspects; | |
1335 | ||
1336 | if (!porigin->file.ptr && origin->file.ptr) { | |
1337 | /* Steal its file */ | |
1338 | porigin->file = origin->file; | |
1339 | origin->file.ptr = NULL; | |
1340 | } | |
1341 | suspects = origin->suspects; | |
1342 | origin->suspects = NULL; | |
1343 | for (e = suspects; e; e = e->next) { | |
1344 | blame_origin_incref(porigin); | |
1345 | blame_origin_decref(e->suspect); | |
1346 | e->suspect = porigin; | |
1347 | } | |
1348 | queue_blames(sb, porigin, suspects); | |
1349 | } | |
1350 | ||
1351 | /* | |
1352 | * We pass blame from the current commit to its parents. We keep saying | |
1353 | * "parent" (and "porigin"), but what we mean is to find scapegoat to | |
1354 | * exonerate ourselves. | |
1355 | */ | |
1356 | static struct commit_list *first_scapegoat(struct rev_info *revs, struct commit *commit, | |
1357 | int reverse) | |
1358 | { | |
1359 | if (!reverse) { | |
1360 | if (revs->first_parent_only && | |
1361 | commit->parents && | |
1362 | commit->parents->next) { | |
1363 | free_commit_list(commit->parents->next); | |
1364 | commit->parents->next = NULL; | |
1365 | } | |
1366 | return commit->parents; | |
1367 | } | |
1368 | return lookup_decoration(&revs->children, &commit->object); | |
1369 | } | |
1370 | ||
1371 | static int num_scapegoats(struct rev_info *revs, struct commit *commit, int reverse) | |
1372 | { | |
1373 | struct commit_list *l = first_scapegoat(revs, commit, reverse); | |
1374 | return commit_list_count(l); | |
1375 | } | |
1376 | ||
1377 | /* Distribute collected unsorted blames to the respected sorted lists | |
1378 | * in the various origins. | |
1379 | */ | |
1380 | static void distribute_blame(struct blame_scoreboard *sb, struct blame_entry *blamed) | |
1381 | { | |
1382 | blamed = llist_mergesort(blamed, get_next_blame, set_next_blame, | |
1383 | compare_blame_suspect); | |
1384 | while (blamed) | |
1385 | { | |
1386 | struct blame_origin *porigin = blamed->suspect; | |
1387 | struct blame_entry *suspects = NULL; | |
1388 | do { | |
1389 | struct blame_entry *next = blamed->next; | |
1390 | blamed->next = suspects; | |
1391 | suspects = blamed; | |
1392 | blamed = next; | |
1393 | } while (blamed && blamed->suspect == porigin); | |
1394 | suspects = reverse_blame(suspects, NULL); | |
1395 | queue_blames(sb, porigin, suspects); | |
1396 | } | |
1397 | } | |
1398 | ||
1399 | #define MAXSG 16 | |
1400 | ||
1401 | static void pass_blame(struct blame_scoreboard *sb, struct blame_origin *origin, int opt) | |
1402 | { | |
1403 | struct rev_info *revs = sb->revs; | |
1404 | int i, pass, num_sg; | |
1405 | struct commit *commit = origin->commit; | |
1406 | struct commit_list *sg; | |
1407 | struct blame_origin *sg_buf[MAXSG]; | |
1408 | struct blame_origin *porigin, **sg_origin = sg_buf; | |
1409 | struct blame_entry *toosmall = NULL; | |
1410 | struct blame_entry *blames, **blametail = &blames; | |
1411 | ||
1412 | num_sg = num_scapegoats(revs, commit, sb->reverse); | |
1413 | if (!num_sg) | |
1414 | goto finish; | |
1415 | else if (num_sg < ARRAY_SIZE(sg_buf)) | |
1416 | memset(sg_buf, 0, sizeof(sg_buf)); | |
1417 | else | |
1418 | sg_origin = xcalloc(num_sg, sizeof(*sg_origin)); | |
1419 | ||
1420 | /* | |
1421 | * The first pass looks for unrenamed path to optimize for | |
1422 | * common cases, then we look for renames in the second pass. | |
1423 | */ | |
1424 | for (pass = 0; pass < 2 - sb->no_whole_file_rename; pass++) { | |
1425 | struct blame_origin *(*find)(struct commit *, struct blame_origin *); | |
1426 | find = pass ? find_rename : find_origin; | |
1427 | ||
1428 | for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse); | |
1429 | i < num_sg && sg; | |
1430 | sg = sg->next, i++) { | |
1431 | struct commit *p = sg->item; | |
1432 | int j, same; | |
1433 | ||
1434 | if (sg_origin[i]) | |
1435 | continue; | |
1436 | if (parse_commit(p)) | |
1437 | continue; | |
1438 | porigin = find(p, origin); | |
1439 | if (!porigin) | |
1440 | continue; | |
1441 | if (!oidcmp(&porigin->blob_oid, &origin->blob_oid)) { | |
1442 | pass_whole_blame(sb, origin, porigin); | |
1443 | blame_origin_decref(porigin); | |
1444 | goto finish; | |
1445 | } | |
1446 | for (j = same = 0; j < i; j++) | |
1447 | if (sg_origin[j] && | |
1448 | !oidcmp(&sg_origin[j]->blob_oid, &porigin->blob_oid)) { | |
1449 | same = 1; | |
1450 | break; | |
1451 | } | |
1452 | if (!same) | |
1453 | sg_origin[i] = porigin; | |
1454 | else | |
1455 | blame_origin_decref(porigin); | |
1456 | } | |
1457 | } | |
1458 | ||
1459 | sb->num_commits++; | |
1460 | for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse); | |
1461 | i < num_sg && sg; | |
1462 | sg = sg->next, i++) { | |
1463 | struct blame_origin *porigin = sg_origin[i]; | |
1464 | if (!porigin) | |
1465 | continue; | |
1466 | if (!origin->previous) { | |
1467 | blame_origin_incref(porigin); | |
1468 | origin->previous = porigin; | |
1469 | } | |
1470 | pass_blame_to_parent(sb, origin, porigin); | |
1471 | if (!origin->suspects) | |
1472 | goto finish; | |
1473 | } | |
1474 | ||
1475 | /* | |
1476 | * Optionally find moves in parents' files. | |
1477 | */ | |
1478 | if (opt & PICKAXE_BLAME_MOVE) { | |
1479 | filter_small(sb, &toosmall, &origin->suspects, sb->move_score); | |
1480 | if (origin->suspects) { | |
1481 | for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse); | |
1482 | i < num_sg && sg; | |
1483 | sg = sg->next, i++) { | |
1484 | struct blame_origin *porigin = sg_origin[i]; | |
1485 | if (!porigin) | |
1486 | continue; | |
1487 | find_move_in_parent(sb, &blametail, &toosmall, origin, porigin); | |
1488 | if (!origin->suspects) | |
1489 | break; | |
1490 | } | |
1491 | } | |
1492 | } | |
1493 | ||
1494 | /* | |
1495 | * Optionally find copies from parents' files. | |
1496 | */ | |
1497 | if (opt & PICKAXE_BLAME_COPY) { | |
1498 | if (sb->copy_score > sb->move_score) | |
1499 | filter_small(sb, &toosmall, &origin->suspects, sb->copy_score); | |
1500 | else if (sb->copy_score < sb->move_score) { | |
1501 | origin->suspects = blame_merge(origin->suspects, toosmall); | |
1502 | toosmall = NULL; | |
1503 | filter_small(sb, &toosmall, &origin->suspects, sb->copy_score); | |
1504 | } | |
1505 | if (!origin->suspects) | |
1506 | goto finish; | |
1507 | ||
1508 | for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse); | |
1509 | i < num_sg && sg; | |
1510 | sg = sg->next, i++) { | |
1511 | struct blame_origin *porigin = sg_origin[i]; | |
1512 | find_copy_in_parent(sb, &blametail, &toosmall, | |
1513 | origin, sg->item, porigin, opt); | |
1514 | if (!origin->suspects) | |
1515 | goto finish; | |
1516 | } | |
1517 | } | |
1518 | ||
1519 | finish: | |
1520 | *blametail = NULL; | |
1521 | distribute_blame(sb, blames); | |
1522 | /* | |
1523 | * prepend toosmall to origin->suspects | |
1524 | * | |
1525 | * There is no point in sorting: this ends up on a big | |
1526 | * unsorted list in the caller anyway. | |
1527 | */ | |
1528 | if (toosmall) { | |
1529 | struct blame_entry **tail = &toosmall; | |
1530 | while (*tail) | |
1531 | tail = &(*tail)->next; | |
1532 | *tail = origin->suspects; | |
1533 | origin->suspects = toosmall; | |
1534 | } | |
1535 | for (i = 0; i < num_sg; i++) { | |
1536 | if (sg_origin[i]) { | |
1537 | drop_origin_blob(sg_origin[i]); | |
1538 | blame_origin_decref(sg_origin[i]); | |
1539 | } | |
1540 | } | |
1541 | drop_origin_blob(origin); | |
1542 | if (sg_buf != sg_origin) | |
1543 | free(sg_origin); | |
1544 | } | |
1545 | ||
1546 | /* | |
1547 | * The main loop -- while we have blobs with lines whose true origin | |
1548 | * is still unknown, pick one blob, and allow its lines to pass blames | |
1549 | * to its parents. */ | |
1550 | void assign_blame(struct blame_scoreboard *sb, int opt) | |
1551 | { | |
1552 | struct rev_info *revs = sb->revs; | |
1553 | struct commit *commit = prio_queue_get(&sb->commits); | |
1554 | ||
1555 | while (commit) { | |
1556 | struct blame_entry *ent; | |
1557 | struct blame_origin *suspect = commit->util; | |
1558 | ||
1559 | /* find one suspect to break down */ | |
1560 | while (suspect && !suspect->suspects) | |
1561 | suspect = suspect->next; | |
1562 | ||
1563 | if (!suspect) { | |
1564 | commit = prio_queue_get(&sb->commits); | |
1565 | continue; | |
1566 | } | |
1567 | ||
1568 | assert(commit == suspect->commit); | |
1569 | ||
1570 | /* | |
1571 | * We will use this suspect later in the loop, | |
1572 | * so hold onto it in the meantime. | |
1573 | */ | |
1574 | blame_origin_incref(suspect); | |
1575 | parse_commit(commit); | |
1576 | if (sb->reverse || | |
1577 | (!(commit->object.flags & UNINTERESTING) && | |
1578 | !(revs->max_age != -1 && commit->date < revs->max_age))) | |
1579 | pass_blame(sb, suspect, opt); | |
1580 | else { | |
1581 | commit->object.flags |= UNINTERESTING; | |
1582 | if (commit->object.parsed) | |
1583 | mark_parents_uninteresting(commit); | |
1584 | } | |
1585 | /* treat root commit as boundary */ | |
1586 | if (!commit->parents && !sb->show_root) | |
1587 | commit->object.flags |= UNINTERESTING; | |
1588 | ||
1589 | /* Take responsibility for the remaining entries */ | |
1590 | ent = suspect->suspects; | |
1591 | if (ent) { | |
1592 | suspect->guilty = 1; | |
1593 | for (;;) { | |
1594 | struct blame_entry *next = ent->next; | |
1595 | if (sb->found_guilty_entry) | |
1596 | sb->found_guilty_entry(ent, sb->found_guilty_entry_data); | |
1597 | if (next) { | |
1598 | ent = next; | |
1599 | continue; | |
1600 | } | |
1601 | ent->next = sb->ent; | |
1602 | sb->ent = suspect->suspects; | |
1603 | suspect->suspects = NULL; | |
1604 | break; | |
1605 | } | |
1606 | } | |
1607 | blame_origin_decref(suspect); | |
1608 | ||
1609 | if (sb->debug) /* sanity */ | |
1610 | sanity_check_refcnt(sb); | |
1611 | } | |
1612 | } | |
09002f1b JS |
1613 | |
1614 | static const char *get_next_line(const char *start, const char *end) | |
1615 | { | |
1616 | const char *nl = memchr(start, '\n', end - start); | |
1617 | return nl ? nl + 1 : end; | |
1618 | } | |
1619 | ||
1620 | /* | |
1621 | * To allow quick access to the contents of nth line in the | |
1622 | * final image, prepare an index in the scoreboard. | |
1623 | */ | |
1624 | static int prepare_lines(struct blame_scoreboard *sb) | |
1625 | { | |
1626 | const char *buf = sb->final_buf; | |
1627 | unsigned long len = sb->final_buf_size; | |
1628 | const char *end = buf + len; | |
1629 | const char *p; | |
1630 | int *lineno; | |
1631 | int num = 0; | |
1632 | ||
1633 | for (p = buf; p < end; p = get_next_line(p, end)) | |
1634 | num++; | |
1635 | ||
1636 | ALLOC_ARRAY(sb->lineno, num + 1); | |
1637 | lineno = sb->lineno; | |
1638 | ||
1639 | for (p = buf; p < end; p = get_next_line(p, end)) | |
1640 | *lineno++ = p - buf; | |
1641 | ||
1642 | *lineno = len; | |
1643 | ||
1644 | sb->num_lines = num; | |
1645 | return sb->num_lines; | |
1646 | } | |
1647 | ||
1648 | static struct commit *find_single_final(struct rev_info *revs, | |
1649 | const char **name_p) | |
1650 | { | |
1651 | int i; | |
1652 | struct commit *found = NULL; | |
1653 | const char *name = NULL; | |
1654 | ||
1655 | for (i = 0; i < revs->pending.nr; i++) { | |
1656 | struct object *obj = revs->pending.objects[i].item; | |
1657 | if (obj->flags & UNINTERESTING) | |
1658 | continue; | |
1659 | obj = deref_tag(obj, NULL, 0); | |
1660 | if (obj->type != OBJ_COMMIT) | |
1661 | die("Non commit %s?", revs->pending.objects[i].name); | |
1662 | if (found) | |
1663 | die("More than one commit to dig from %s and %s?", | |
1664 | revs->pending.objects[i].name, name); | |
1665 | found = (struct commit *)obj; | |
1666 | name = revs->pending.objects[i].name; | |
1667 | } | |
1668 | if (name_p) | |
9e7d8a9b | 1669 | *name_p = xstrdup_or_null(name); |
09002f1b JS |
1670 | return found; |
1671 | } | |
1672 | ||
1673 | static struct commit *dwim_reverse_initial(struct rev_info *revs, | |
1674 | const char **name_p) | |
1675 | { | |
1676 | /* | |
1677 | * DWIM "git blame --reverse ONE -- PATH" as | |
1678 | * "git blame --reverse ONE..HEAD -- PATH" but only do so | |
1679 | * when it makes sense. | |
1680 | */ | |
1681 | struct object *obj; | |
1682 | struct commit *head_commit; | |
583c6a22 | 1683 | struct object_id head_oid; |
09002f1b JS |
1684 | |
1685 | if (revs->pending.nr != 1) | |
1686 | return NULL; | |
1687 | ||
1688 | /* Is that sole rev a committish? */ | |
1689 | obj = revs->pending.objects[0].item; | |
1690 | obj = deref_tag(obj, NULL, 0); | |
1691 | if (obj->type != OBJ_COMMIT) | |
1692 | return NULL; | |
1693 | ||
1694 | /* Do we have HEAD? */ | |
49e61479 | 1695 | if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL)) |
09002f1b | 1696 | return NULL; |
583c6a22 | 1697 | head_commit = lookup_commit_reference_gently(&head_oid, 1); |
09002f1b JS |
1698 | if (!head_commit) |
1699 | return NULL; | |
1700 | ||
1701 | /* Turn "ONE" into "ONE..HEAD" then */ | |
1702 | obj->flags |= UNINTERESTING; | |
1703 | add_pending_object(revs, &head_commit->object, "HEAD"); | |
1704 | ||
1705 | if (name_p) | |
1706 | *name_p = revs->pending.objects[0].name; | |
1707 | return (struct commit *)obj; | |
1708 | } | |
1709 | ||
1710 | static struct commit *find_single_initial(struct rev_info *revs, | |
1711 | const char **name_p) | |
1712 | { | |
1713 | int i; | |
1714 | struct commit *found = NULL; | |
1715 | const char *name = NULL; | |
1716 | ||
1717 | /* | |
1718 | * There must be one and only one negative commit, and it must be | |
1719 | * the boundary. | |
1720 | */ | |
1721 | for (i = 0; i < revs->pending.nr; i++) { | |
1722 | struct object *obj = revs->pending.objects[i].item; | |
1723 | if (!(obj->flags & UNINTERESTING)) | |
1724 | continue; | |
1725 | obj = deref_tag(obj, NULL, 0); | |
1726 | if (obj->type != OBJ_COMMIT) | |
1727 | die("Non commit %s?", revs->pending.objects[i].name); | |
1728 | if (found) | |
1729 | die("More than one commit to dig up from, %s and %s?", | |
1730 | revs->pending.objects[i].name, name); | |
1731 | found = (struct commit *) obj; | |
1732 | name = revs->pending.objects[i].name; | |
1733 | } | |
1734 | ||
1735 | if (!name) | |
1736 | found = dwim_reverse_initial(revs, &name); | |
1737 | if (!name) | |
1738 | die("No commit to dig up from?"); | |
1739 | ||
1740 | if (name_p) | |
9e7d8a9b | 1741 | *name_p = xstrdup(name); |
09002f1b JS |
1742 | return found; |
1743 | } | |
1744 | ||
1745 | void init_scoreboard(struct blame_scoreboard *sb) | |
1746 | { | |
1747 | memset(sb, 0, sizeof(struct blame_scoreboard)); | |
1748 | sb->move_score = BLAME_DEFAULT_MOVE_SCORE; | |
1749 | sb->copy_score = BLAME_DEFAULT_COPY_SCORE; | |
1750 | } | |
1751 | ||
1752 | void setup_scoreboard(struct blame_scoreboard *sb, const char *path, struct blame_origin **orig) | |
1753 | { | |
1754 | const char *final_commit_name = NULL; | |
1755 | struct blame_origin *o; | |
1756 | struct commit *final_commit = NULL; | |
1757 | enum object_type type; | |
1758 | ||
1759 | if (sb->reverse && sb->contents_from) | |
1760 | die(_("--contents and --reverse do not blend well.")); | |
1761 | ||
1762 | if (!sb->reverse) { | |
1763 | sb->final = find_single_final(sb->revs, &final_commit_name); | |
1764 | sb->commits.compare = compare_commits_by_commit_date; | |
1765 | } else { | |
1766 | sb->final = find_single_initial(sb->revs, &final_commit_name); | |
1767 | sb->commits.compare = compare_commits_by_reverse_commit_date; | |
1768 | } | |
1769 | ||
1770 | if (sb->final && sb->contents_from) | |
1771 | die(_("cannot use --contents with final commit object name")); | |
1772 | ||
1773 | if (sb->reverse && sb->revs->first_parent_only) | |
1774 | sb->revs->children.name = NULL; | |
1775 | ||
1776 | if (!sb->final) { | |
1777 | /* | |
1778 | * "--not A B -- path" without anything positive; | |
1779 | * do not default to HEAD, but use the working tree | |
1780 | * or "--contents". | |
1781 | */ | |
1782 | setup_work_tree(); | |
1783 | sb->final = fake_working_tree_commit(&sb->revs->diffopt, | |
1784 | path, sb->contents_from); | |
1785 | add_pending_object(sb->revs, &(sb->final->object), ":"); | |
1786 | } | |
1787 | ||
1788 | if (sb->reverse && sb->revs->first_parent_only) { | |
1789 | final_commit = find_single_final(sb->revs, NULL); | |
1790 | if (!final_commit) | |
1791 | die(_("--reverse and --first-parent together require specified latest commit")); | |
1792 | } | |
1793 | ||
1794 | /* | |
1795 | * If we have bottom, this will mark the ancestors of the | |
1796 | * bottom commits we would reach while traversing as | |
1797 | * uninteresting. | |
1798 | */ | |
1799 | if (prepare_revision_walk(sb->revs)) | |
1800 | die(_("revision walk setup failed")); | |
1801 | ||
1802 | if (sb->reverse && sb->revs->first_parent_only) { | |
1803 | struct commit *c = final_commit; | |
1804 | ||
1805 | sb->revs->children.name = "children"; | |
1806 | while (c->parents && | |
1807 | oidcmp(&c->object.oid, &sb->final->object.oid)) { | |
1808 | struct commit_list *l = xcalloc(1, sizeof(*l)); | |
1809 | ||
1810 | l->item = c; | |
1811 | if (add_decoration(&sb->revs->children, | |
1812 | &c->parents->item->object, l)) | |
1813 | die("BUG: not unique item in first-parent chain"); | |
1814 | c = c->parents->item; | |
1815 | } | |
1816 | ||
1817 | if (oidcmp(&c->object.oid, &sb->final->object.oid)) | |
1818 | die(_("--reverse --first-parent together require range along first-parent chain")); | |
1819 | } | |
1820 | ||
1821 | if (is_null_oid(&sb->final->object.oid)) { | |
1822 | o = sb->final->util; | |
1823 | sb->final_buf = xmemdupz(o->file.ptr, o->file.size); | |
1824 | sb->final_buf_size = o->file.size; | |
1825 | } | |
1826 | else { | |
1827 | o = get_origin(sb->final, path); | |
1828 | if (fill_blob_sha1_and_mode(o)) | |
1829 | die(_("no such path %s in %s"), path, final_commit_name); | |
1830 | ||
0d1e0e78 | 1831 | if (sb->revs->diffopt.flags.allow_textconv && |
09002f1b JS |
1832 | textconv_object(path, o->mode, &o->blob_oid, 1, (char **) &sb->final_buf, |
1833 | &sb->final_buf_size)) | |
1834 | ; | |
1835 | else | |
b4f5aca4 | 1836 | sb->final_buf = read_object_file(&o->blob_oid, &type, |
1837 | &sb->final_buf_size); | |
09002f1b JS |
1838 | |
1839 | if (!sb->final_buf) | |
1840 | die(_("cannot read blob %s for path %s"), | |
1841 | oid_to_hex(&o->blob_oid), | |
1842 | path); | |
1843 | } | |
1844 | sb->num_read_blob++; | |
1845 | prepare_lines(sb); | |
1846 | ||
1847 | if (orig) | |
1848 | *orig = o; | |
9e7d8a9b SG |
1849 | |
1850 | free((char *)final_commit_name); | |
09002f1b | 1851 | } |
bd481de7 JS |
1852 | |
1853 | ||
1854 | ||
1855 | struct blame_entry *blame_entry_prepend(struct blame_entry *head, | |
1856 | long start, long end, | |
1857 | struct blame_origin *o) | |
1858 | { | |
1859 | struct blame_entry *new_head = xcalloc(1, sizeof(struct blame_entry)); | |
1860 | new_head->lno = start; | |
1861 | new_head->num_lines = end - start; | |
1862 | new_head->suspect = o; | |
1863 | new_head->s_lno = start; | |
1864 | new_head->next = head; | |
1865 | blame_origin_incref(o); | |
1866 | return new_head; | |
1867 | } |