]> git.ipfire.org Git - thirdparty/git.git/blob - tree-diff.c
Merge branch 'rs/ref-filter-signature-fix'
[thirdparty/git.git] / tree-diff.c
1 /*
2 * Helper functions for tree diff generation
3 */
4 #include "git-compat-util.h"
5 #include "diff.h"
6 #include "diffcore.h"
7 #include "hash.h"
8 #include "tree.h"
9 #include "tree-walk.h"
10
11 /*
12 * Some mode bits are also used internally for computations.
13 *
14 * They *must* not overlap with any valid modes, and they *must* not be emitted
15 * to outside world - i.e. appear on disk or network. In other words, it's just
16 * temporary fields, which we internally use, but they have to stay in-house.
17 *
18 * ( such approach is valid, as standard S_IF* fits into 16 bits, and in Git
19 * codebase mode is `unsigned int` which is assumed to be at least 32 bits )
20 */
21
22 #define S_DIFFTREE_IFXMIN_NEQ 0x80000000
23
24 /*
25 * internal mode marker, saying a tree entry != entry of tp[imin]
26 * (see ll_diff_tree_paths for what it means there)
27 *
28 * we will update/use/emit entry for diff only with it unset.
29 */
30 #define S_IFXMIN_NEQ S_DIFFTREE_IFXMIN_NEQ
31
32 #define FAST_ARRAY_ALLOC(x, nr) do { \
33 if ((nr) <= 2) \
34 (x) = xalloca((nr) * sizeof(*(x))); \
35 else \
36 ALLOC_ARRAY((x), nr); \
37 } while(0)
38 #define FAST_ARRAY_FREE(x, nr) do { \
39 if ((nr) <= 2) \
40 xalloca_free((x)); \
41 else \
42 free((x)); \
43 } while(0)
44
45 static struct combine_diff_path *ll_diff_tree_paths(
46 struct combine_diff_path *p, const struct object_id *oid,
47 const struct object_id **parents_oid, int nparent,
48 struct strbuf *base, struct diff_options *opt);
49 static void ll_diff_tree_oid(const struct object_id *old_oid,
50 const struct object_id *new_oid,
51 struct strbuf *base, struct diff_options *opt);
52
53 /*
54 * Compare two tree entries, taking into account only path/S_ISDIR(mode),
55 * but not their sha1's.
56 *
57 * NOTE files and directories *always* compare differently, even when having
58 * the same name - thanks to base_name_compare().
59 *
60 * NOTE empty (=invalid) descriptor(s) take part in comparison as +infty,
61 * so that they sort *after* valid tree entries.
62 *
63 * Due to this convention, if trees are scanned in sorted order, all
64 * non-empty descriptors will be processed first.
65 */
66 static int tree_entry_pathcmp(struct tree_desc *t1, struct tree_desc *t2)
67 {
68 struct name_entry *e1, *e2;
69 int cmp;
70
71 /* empty descriptors sort after valid tree entries */
72 if (!t1->size)
73 return t2->size ? 1 : 0;
74 else if (!t2->size)
75 return -1;
76
77 e1 = &t1->entry;
78 e2 = &t2->entry;
79 cmp = base_name_compare(e1->path, tree_entry_len(e1), e1->mode,
80 e2->path, tree_entry_len(e2), e2->mode);
81 return cmp;
82 }
83
84
85 /*
86 * convert path -> opt->diff_*() callbacks
87 *
88 * emits diff to first parent only, and tells diff tree-walker that we are done
89 * with p and it can be freed.
90 */
91 static int emit_diff_first_parent_only(struct diff_options *opt, struct combine_diff_path *p)
92 {
93 struct combine_diff_parent *p0 = &p->parent[0];
94 if (p->mode && p0->mode) {
95 opt->change(opt, p0->mode, p->mode, &p0->oid, &p->oid,
96 1, 1, p->path, 0, 0);
97 }
98 else {
99 const struct object_id *oid;
100 unsigned int mode;
101 int addremove;
102
103 if (p->mode) {
104 addremove = '+';
105 oid = &p->oid;
106 mode = p->mode;
107 } else {
108 addremove = '-';
109 oid = &p0->oid;
110 mode = p0->mode;
111 }
112
113 opt->add_remove(opt, addremove, mode, oid, 1, p->path, 0);
114 }
115
116 return 0; /* we are done with p */
117 }
118
119
120 /*
121 * Make a new combine_diff_path from path/mode/sha1
122 * and append it to paths list tail.
123 *
124 * Memory for created elements could be reused:
125 *
126 * - if last->next == NULL, the memory is allocated;
127 *
128 * - if last->next != NULL, it is assumed that p=last->next was returned
129 * earlier by this function, and p->next was *not* modified.
130 * The memory is then reused from p.
131 *
132 * so for clients,
133 *
134 * - if you do need to keep the element
135 *
136 * p = path_appendnew(p, ...);
137 * process(p);
138 * p->next = NULL;
139 *
140 * - if you don't need to keep the element after processing
141 *
142 * pprev = p;
143 * p = path_appendnew(p, ...);
144 * process(p);
145 * p = pprev;
146 * ; don't forget to free tail->next in the end
147 *
148 * p->parent[] remains uninitialized.
149 */
150 static struct combine_diff_path *path_appendnew(struct combine_diff_path *last,
151 int nparent, const struct strbuf *base, const char *path, int pathlen,
152 unsigned mode, const struct object_id *oid)
153 {
154 struct combine_diff_path *p;
155 size_t len = st_add(base->len, pathlen);
156 size_t alloclen = combine_diff_path_size(nparent, len);
157
158 /* if last->next is !NULL - it is a pre-allocated memory, we can reuse */
159 p = last->next;
160 if (p && (alloclen > (intptr_t)p->next)) {
161 FREE_AND_NULL(p);
162 }
163
164 if (!p) {
165 p = xmalloc(alloclen);
166
167 /*
168 * until we go to it next round, .next holds how many bytes we
169 * allocated (for faster realloc - we don't need copying old data).
170 */
171 p->next = (struct combine_diff_path *)(intptr_t)alloclen;
172 }
173
174 last->next = p;
175
176 p->path = (char *)&(p->parent[nparent]);
177 memcpy(p->path, base->buf, base->len);
178 memcpy(p->path + base->len, path, pathlen);
179 p->path[len] = 0;
180 p->mode = mode;
181 oidcpy(&p->oid, oid ? oid : null_oid());
182
183 return p;
184 }
185
186 /*
187 * new path should be added to combine diff
188 *
189 * 3 cases on how/when it should be called and behaves:
190 *
191 * t, !tp -> path added, all parents lack it
192 * !t, tp -> path removed from all parents
193 * t, tp -> path modified/added
194 * (M for tp[i]=tp[imin], A otherwise)
195 */
196 static struct combine_diff_path *emit_path(struct combine_diff_path *p,
197 struct strbuf *base, struct diff_options *opt, int nparent,
198 struct tree_desc *t, struct tree_desc *tp,
199 int imin)
200 {
201 unsigned short mode;
202 const char *path;
203 const struct object_id *oid;
204 int pathlen;
205 int old_baselen = base->len;
206 int i, isdir, recurse = 0, emitthis = 1;
207
208 /* at least something has to be valid */
209 assert(t || tp);
210
211 if (t) {
212 /* path present in resulting tree */
213 oid = tree_entry_extract(t, &path, &mode);
214 pathlen = tree_entry_len(&t->entry);
215 isdir = S_ISDIR(mode);
216 } else {
217 /*
218 * a path was removed - take path from imin parent. Also take
219 * mode from that parent, to decide on recursion(1).
220 *
221 * 1) all modes for tp[i]=tp[imin] should be the same wrt
222 * S_ISDIR, thanks to base_name_compare().
223 */
224 tree_entry_extract(&tp[imin], &path, &mode);
225 pathlen = tree_entry_len(&tp[imin].entry);
226
227 isdir = S_ISDIR(mode);
228 oid = NULL;
229 mode = 0;
230 }
231
232 if (opt->flags.recursive && isdir) {
233 recurse = 1;
234 emitthis = opt->flags.tree_in_recursive;
235 }
236
237 if (emitthis) {
238 int keep;
239 struct combine_diff_path *pprev = p;
240 p = path_appendnew(p, nparent, base, path, pathlen, mode, oid);
241
242 for (i = 0; i < nparent; ++i) {
243 /*
244 * tp[i] is valid, if present and if tp[i]==tp[imin] -
245 * otherwise, we should ignore it.
246 */
247 int tpi_valid = tp && !(tp[i].entry.mode & S_IFXMIN_NEQ);
248
249 const struct object_id *oid_i;
250 unsigned mode_i;
251
252 p->parent[i].status =
253 !t ? DIFF_STATUS_DELETED :
254 tpi_valid ?
255 DIFF_STATUS_MODIFIED :
256 DIFF_STATUS_ADDED;
257
258 if (tpi_valid) {
259 oid_i = &tp[i].entry.oid;
260 mode_i = tp[i].entry.mode;
261 }
262 else {
263 oid_i = null_oid();
264 mode_i = 0;
265 }
266
267 p->parent[i].mode = mode_i;
268 oidcpy(&p->parent[i].oid, oid_i);
269 }
270
271 keep = 1;
272 if (opt->pathchange)
273 keep = opt->pathchange(opt, p);
274
275 /*
276 * If a path was filtered or consumed - we don't need to add it
277 * to the list and can reuse its memory, leaving it as
278 * pre-allocated element on the tail.
279 *
280 * On the other hand, if path needs to be kept, we need to
281 * correct its .next to NULL, as it was pre-initialized to how
282 * much memory was allocated.
283 *
284 * see path_appendnew() for details.
285 */
286 if (!keep)
287 p = pprev;
288 else
289 p->next = NULL;
290 }
291
292 if (recurse) {
293 const struct object_id **parents_oid;
294
295 FAST_ARRAY_ALLOC(parents_oid, nparent);
296 for (i = 0; i < nparent; ++i) {
297 /* same rule as in emitthis */
298 int tpi_valid = tp && !(tp[i].entry.mode & S_IFXMIN_NEQ);
299
300 parents_oid[i] = tpi_valid ? &tp[i].entry.oid : NULL;
301 }
302
303 strbuf_add(base, path, pathlen);
304 strbuf_addch(base, '/');
305 p = ll_diff_tree_paths(p, oid, parents_oid, nparent, base, opt);
306 FAST_ARRAY_FREE(parents_oid, nparent);
307 }
308
309 strbuf_setlen(base, old_baselen);
310 return p;
311 }
312
313 static void skip_uninteresting(struct tree_desc *t, struct strbuf *base,
314 struct diff_options *opt)
315 {
316 enum interesting match;
317
318 while (t->size) {
319 match = tree_entry_interesting(opt->repo->index, &t->entry,
320 base, 0, &opt->pathspec);
321 if (match) {
322 if (match == all_entries_not_interesting)
323 t->size = 0;
324 break;
325 }
326 update_tree_entry(t);
327 }
328 }
329
330
331 /*
332 * generate paths for combined diff D(sha1,parents_oid[])
333 *
334 * Resulting paths are appended to combine_diff_path linked list, and also, are
335 * emitted on the go via opt->pathchange() callback, so it is possible to
336 * process the result as batch or incrementally.
337 *
338 * The paths are generated scanning new tree and all parents trees
339 * simultaneously, similarly to what diff_tree() was doing for 2 trees.
340 * The theory behind such scan is as follows:
341 *
342 *
343 * D(T,P1...Pn) calculation scheme
344 * -------------------------------
345 *
346 * D(T,P1...Pn) = D(T,P1) ^ ... ^ D(T,Pn) (regarding resulting paths set)
347 *
348 * D(T,Pj) - diff between T..Pj
349 * D(T,P1...Pn) - combined diff from T to parents P1,...,Pn
350 *
351 *
352 * We start from all trees, which are sorted, and compare their entries in
353 * lock-step:
354 *
355 * T P1 Pn
356 * - - -
357 * |t| |p1| |pn|
358 * |-| |--| ... |--| imin = argmin(p1...pn)
359 * | | | | | |
360 * |-| |--| |--|
361 * |.| |. | |. |
362 * . . .
363 * . . .
364 *
365 * at any time there could be 3 cases:
366 *
367 * 1) t < p[imin];
368 * 2) t > p[imin];
369 * 3) t = p[imin].
370 *
371 * Schematic deduction of what every case means, and what to do, follows:
372 *
373 * 1) t < p[imin] -> ∀j t ∉ Pj -> "+t" ∈ D(T,Pj) -> D += "+t"; t↓
374 *
375 * 2) t > p[imin]
376 *
377 * 2.1) ∃j: pj > p[imin] -> "-p[imin]" ∉ D(T,Pj) -> D += ø; ∀ pi=p[imin] pi↓
378 * 2.2) ∀i pi = p[imin] -> pi ∉ T -> "-pi" ∈ D(T,Pi) -> D += "-p[imin]"; ∀i pi↓
379 *
380 * 3) t = p[imin]
381 *
382 * 3.1) ∃j: pj > p[imin] -> "+t" ∈ D(T,Pj) -> only pi=p[imin] remains to investigate
383 * 3.2) pi = p[imin] -> investigate δ(t,pi)
384 * |
385 * |
386 * v
387 *
388 * 3.1+3.2) looking at δ(t,pi) ∀i: pi=p[imin] - if all != ø ->
389 *
390 * ⎧δ(t,pi) - if pi=p[imin]
391 * -> D += ⎨
392 * ⎩"+t" - if pi>p[imin]
393 *
394 *
395 * in any case t↓ ∀ pi=p[imin] pi↓
396 *
397 *
398 * ~~~~~~~~
399 *
400 * NOTE
401 *
402 * Usual diff D(A,B) is by definition the same as combined diff D(A,[B]),
403 * so this diff paths generator can, and is used, for plain diffs
404 * generation too.
405 *
406 * Please keep attention to the common D(A,[B]) case when working on the
407 * code, in order not to slow it down.
408 *
409 * NOTE
410 * nparent must be > 0.
411 */
412
413
414 /* ∀ pi=p[imin] pi↓ */
415 static inline void update_tp_entries(struct tree_desc *tp, int nparent)
416 {
417 int i;
418 for (i = 0; i < nparent; ++i)
419 if (!(tp[i].entry.mode & S_IFXMIN_NEQ))
420 update_tree_entry(&tp[i]);
421 }
422
423 static struct combine_diff_path *ll_diff_tree_paths(
424 struct combine_diff_path *p, const struct object_id *oid,
425 const struct object_id **parents_oid, int nparent,
426 struct strbuf *base, struct diff_options *opt)
427 {
428 struct tree_desc t, *tp;
429 void *ttree, **tptree;
430 int i;
431
432 FAST_ARRAY_ALLOC(tp, nparent);
433 FAST_ARRAY_ALLOC(tptree, nparent);
434
435 /*
436 * load parents first, as they are probably already cached.
437 *
438 * ( log_tree_diff() parses commit->parent before calling here via
439 * diff_tree_oid(parent, commit) )
440 */
441 for (i = 0; i < nparent; ++i)
442 tptree[i] = fill_tree_descriptor(opt->repo, &tp[i], parents_oid[i]);
443 ttree = fill_tree_descriptor(opt->repo, &t, oid);
444
445 /* Enable recursion indefinitely */
446 opt->pathspec.recursive = opt->flags.recursive;
447
448 for (;;) {
449 int imin, cmp;
450
451 if (diff_can_quit_early(opt))
452 break;
453
454 if (opt->max_changes && diff_queued_diff.nr > opt->max_changes)
455 break;
456
457 if (opt->pathspec.nr) {
458 skip_uninteresting(&t, base, opt);
459 for (i = 0; i < nparent; i++)
460 skip_uninteresting(&tp[i], base, opt);
461 }
462
463 /* comparing is finished when all trees are done */
464 if (!t.size) {
465 int done = 1;
466 for (i = 0; i < nparent; ++i)
467 if (tp[i].size) {
468 done = 0;
469 break;
470 }
471 if (done)
472 break;
473 }
474
475 /*
476 * lookup imin = argmin(p1...pn),
477 * mark entries whether they =p[imin] along the way
478 */
479 imin = 0;
480 tp[0].entry.mode &= ~S_IFXMIN_NEQ;
481
482 for (i = 1; i < nparent; ++i) {
483 cmp = tree_entry_pathcmp(&tp[i], &tp[imin]);
484 if (cmp < 0) {
485 imin = i;
486 tp[i].entry.mode &= ~S_IFXMIN_NEQ;
487 }
488 else if (cmp == 0) {
489 tp[i].entry.mode &= ~S_IFXMIN_NEQ;
490 }
491 else {
492 tp[i].entry.mode |= S_IFXMIN_NEQ;
493 }
494 }
495
496 /* fixup markings for entries before imin */
497 for (i = 0; i < imin; ++i)
498 tp[i].entry.mode |= S_IFXMIN_NEQ; /* pi > p[imin] */
499
500
501
502 /* compare t vs p[imin] */
503 cmp = tree_entry_pathcmp(&t, &tp[imin]);
504
505 /* t = p[imin] */
506 if (cmp == 0) {
507 /* are either pi > p[imin] or diff(t,pi) != ø ? */
508 if (!opt->flags.find_copies_harder) {
509 for (i = 0; i < nparent; ++i) {
510 /* p[i] > p[imin] */
511 if (tp[i].entry.mode & S_IFXMIN_NEQ)
512 continue;
513
514 /* diff(t,pi) != ø */
515 if (!oideq(&t.entry.oid, &tp[i].entry.oid) ||
516 (t.entry.mode != tp[i].entry.mode))
517 continue;
518
519 goto skip_emit_t_tp;
520 }
521 }
522
523 /* D += {δ(t,pi) if pi=p[imin]; "+a" if pi > p[imin]} */
524 p = emit_path(p, base, opt, nparent,
525 &t, tp, imin);
526
527 skip_emit_t_tp:
528 /* t↓, ∀ pi=p[imin] pi↓ */
529 update_tree_entry(&t);
530 update_tp_entries(tp, nparent);
531 }
532
533 /* t < p[imin] */
534 else if (cmp < 0) {
535 /* D += "+t" */
536 p = emit_path(p, base, opt, nparent,
537 &t, /*tp=*/NULL, -1);
538
539 /* t↓ */
540 update_tree_entry(&t);
541 }
542
543 /* t > p[imin] */
544 else {
545 /* ∀i pi=p[imin] -> D += "-p[imin]" */
546 if (!opt->flags.find_copies_harder) {
547 for (i = 0; i < nparent; ++i)
548 if (tp[i].entry.mode & S_IFXMIN_NEQ)
549 goto skip_emit_tp;
550 }
551
552 p = emit_path(p, base, opt, nparent,
553 /*t=*/NULL, tp, imin);
554
555 skip_emit_tp:
556 /* ∀ pi=p[imin] pi↓ */
557 update_tp_entries(tp, nparent);
558 }
559 }
560
561 free(ttree);
562 for (i = nparent-1; i >= 0; i--)
563 free(tptree[i]);
564 FAST_ARRAY_FREE(tptree, nparent);
565 FAST_ARRAY_FREE(tp, nparent);
566
567 return p;
568 }
569
570 struct combine_diff_path *diff_tree_paths(
571 struct combine_diff_path *p, const struct object_id *oid,
572 const struct object_id **parents_oid, int nparent,
573 struct strbuf *base, struct diff_options *opt)
574 {
575 p = ll_diff_tree_paths(p, oid, parents_oid, nparent, base, opt);
576
577 /*
578 * free pre-allocated last element, if any
579 * (see path_appendnew() for details about why)
580 */
581 FREE_AND_NULL(p->next);
582
583 return p;
584 }
585
586 /*
587 * Does it look like the resulting diff might be due to a rename?
588 * - single entry
589 * - not a valid previous file
590 */
591 static inline int diff_might_be_rename(void)
592 {
593 return diff_queued_diff.nr == 1 &&
594 !DIFF_FILE_VALID(diff_queued_diff.queue[0]->one);
595 }
596
597 static void try_to_follow_renames(const struct object_id *old_oid,
598 const struct object_id *new_oid,
599 struct strbuf *base, struct diff_options *opt)
600 {
601 struct diff_options diff_opts;
602 struct diff_queue_struct *q = &diff_queued_diff;
603 struct diff_filepair *choice;
604 int i;
605
606 /*
607 * follow-rename code is very specific, we need exactly one
608 * path. Magic that matches more than one path is not
609 * supported.
610 */
611 GUARD_PATHSPEC(&opt->pathspec, PATHSPEC_FROMTOP | PATHSPEC_LITERAL);
612 #if 0
613 /*
614 * We should reject wildcards as well. Unfortunately we
615 * haven't got a reliable way to detect that 'foo\*bar' in
616 * fact has no wildcards. nowildcard_len is merely a hint for
617 * optimization. Let it slip for now until wildmatch is taught
618 * about dry-run mode and returns wildcard info.
619 */
620 if (opt->pathspec.has_wildcard)
621 BUG("wildcards are not supported");
622 #endif
623
624 /* Remove the file creation entry from the diff queue, and remember it */
625 choice = q->queue[0];
626 q->nr = 0;
627
628 repo_diff_setup(opt->repo, &diff_opts);
629 diff_opts.flags.recursive = 1;
630 diff_opts.flags.find_copies_harder = 1;
631 diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT;
632 diff_opts.single_follow = opt->pathspec.items[0].match;
633 diff_opts.break_opt = opt->break_opt;
634 diff_opts.rename_score = opt->rename_score;
635 diff_setup_done(&diff_opts);
636 ll_diff_tree_oid(old_oid, new_oid, base, &diff_opts);
637 diffcore_std(&diff_opts);
638 clear_pathspec(&diff_opts.pathspec);
639
640 /* Go through the new set of filepairing, and see if we find a more interesting one */
641 opt->found_follow = 0;
642 for (i = 0; i < q->nr; i++) {
643 struct diff_filepair *p = q->queue[i];
644
645 /*
646 * Found a source? Not only do we use that for the new
647 * diff_queued_diff, we will also use that as the path in
648 * the future!
649 */
650 if ((p->status == 'R' || p->status == 'C') &&
651 !strcmp(p->two->path, opt->pathspec.items[0].match)) {
652 const char *path[2];
653
654 /* Switch the file-pairs around */
655 q->queue[i] = choice;
656 choice = p;
657
658 /* Update the path we use from now on.. */
659 path[0] = p->one->path;
660 path[1] = NULL;
661 clear_pathspec(&opt->pathspec);
662 parse_pathspec(&opt->pathspec,
663 PATHSPEC_ALL_MAGIC & ~PATHSPEC_LITERAL,
664 PATHSPEC_LITERAL_PATH, "", path);
665
666 /*
667 * The caller expects us to return a set of vanilla
668 * filepairs to let a later call to diffcore_std()
669 * it makes to sort the renames out (among other
670 * things), but we already have found renames
671 * ourselves; signal diffcore_std() not to muck with
672 * rename information.
673 */
674 opt->found_follow = 1;
675 break;
676 }
677 }
678
679 /*
680 * Then, discard all the non-relevant file pairs...
681 */
682 for (i = 0; i < q->nr; i++) {
683 struct diff_filepair *p = q->queue[i];
684 diff_free_filepair(p);
685 }
686
687 /*
688 * .. and re-instate the one we want (which might be either the
689 * original one, or the rename/copy we found)
690 */
691 q->queue[0] = choice;
692 q->nr = 1;
693 }
694
695 static void ll_diff_tree_oid(const struct object_id *old_oid,
696 const struct object_id *new_oid,
697 struct strbuf *base, struct diff_options *opt)
698 {
699 struct combine_diff_path phead, *p;
700 pathchange_fn_t pathchange_old = opt->pathchange;
701
702 phead.next = NULL;
703 opt->pathchange = emit_diff_first_parent_only;
704 diff_tree_paths(&phead, new_oid, &old_oid, 1, base, opt);
705
706 for (p = phead.next; p;) {
707 struct combine_diff_path *pprev = p;
708 p = p->next;
709 free(pprev);
710 }
711
712 opt->pathchange = pathchange_old;
713 }
714
715 void diff_tree_oid(const struct object_id *old_oid,
716 const struct object_id *new_oid,
717 const char *base_str, struct diff_options *opt)
718 {
719 struct strbuf base;
720
721 strbuf_init(&base, PATH_MAX);
722 strbuf_addstr(&base, base_str);
723
724 ll_diff_tree_oid(old_oid, new_oid, &base, opt);
725 if (!*base_str && opt->flags.follow_renames && diff_might_be_rename())
726 try_to_follow_renames(old_oid, new_oid, &base, opt);
727
728 strbuf_release(&base);
729 }
730
731 void diff_root_tree_oid(const struct object_id *new_oid,
732 const char *base,
733 struct diff_options *opt)
734 {
735 diff_tree_oid(NULL, new_oid, base, opt);
736 }