Merge branch 'jk/test-lsan-improvements'
[git/gitster.git] / tree-diff.c
blob5eab8af631b78955589d1bb73031ecc854303c51
1 /*
2 * Helper functions for tree diff generation
3 */
5 #define USE_THE_REPOSITORY_VARIABLE
7 #include "git-compat-util.h"
8 #include "diff.h"
9 #include "diffcore.h"
10 #include "hash.h"
11 #include "tree.h"
12 #include "tree-walk.h"
13 #include "environment.h"
14 #include "repository.h"
17 * Some mode bits are also used internally for computations.
19 * They *must* not overlap with any valid modes, and they *must* not be emitted
20 * to outside world - i.e. appear on disk or network. In other words, it's just
21 * temporary fields, which we internally use, but they have to stay in-house.
23 * ( such approach is valid, as standard S_IF* fits into 16 bits, and in Git
24 * codebase mode is `unsigned int` which is assumed to be at least 32 bits )
27 #define S_DIFFTREE_IFXMIN_NEQ 0x80000000
30 * internal mode marker, saying a tree entry != entry of tp[imin]
31 * (see ll_diff_tree_paths for what it means there)
33 * we will update/use/emit entry for diff only with it unset.
35 #define S_IFXMIN_NEQ S_DIFFTREE_IFXMIN_NEQ
37 #define FAST_ARRAY_ALLOC(x, nr) do { \
38 if ((nr) <= 2) \
39 (x) = xalloca((nr) * sizeof(*(x))); \
40 else \
41 ALLOC_ARRAY((x), nr); \
42 } while(0)
43 #define FAST_ARRAY_FREE(x, nr) do { \
44 if ((nr) <= 2) \
45 xalloca_free((x)); \
46 else \
47 free((x)); \
48 } while(0)
50 static struct combine_diff_path *ll_diff_tree_paths(
51 struct combine_diff_path *p, const struct object_id *oid,
52 const struct object_id **parents_oid, int nparent,
53 struct strbuf *base, struct diff_options *opt,
54 int depth);
55 static void ll_diff_tree_oid(const struct object_id *old_oid,
56 const struct object_id *new_oid,
57 struct strbuf *base, struct diff_options *opt);
60 * Compare two tree entries, taking into account only path/S_ISDIR(mode),
61 * but not their sha1's.
63 * NOTE files and directories *always* compare differently, even when having
64 * the same name - thanks to base_name_compare().
66 * NOTE empty (=invalid) descriptor(s) take part in comparison as +infty,
67 * so that they sort *after* valid tree entries.
69 * Due to this convention, if trees are scanned in sorted order, all
70 * non-empty descriptors will be processed first.
72 static int tree_entry_pathcmp(struct tree_desc *t1, struct tree_desc *t2)
74 struct name_entry *e1, *e2;
75 int cmp;
77 /* empty descriptors sort after valid tree entries */
78 if (!t1->size)
79 return t2->size ? 1 : 0;
80 else if (!t2->size)
81 return -1;
83 e1 = &t1->entry;
84 e2 = &t2->entry;
85 cmp = base_name_compare(e1->path, tree_entry_len(e1), e1->mode,
86 e2->path, tree_entry_len(e2), e2->mode);
87 return cmp;
92 * convert path -> opt->diff_*() callbacks
94 * emits diff to first parent only, and tells diff tree-walker that we are done
95 * with p and it can be freed.
97 static int emit_diff_first_parent_only(struct diff_options *opt, struct combine_diff_path *p)
99 struct combine_diff_parent *p0 = &p->parent[0];
100 if (p->mode && p0->mode) {
101 opt->change(opt, p0->mode, p->mode, &p0->oid, &p->oid,
102 1, 1, p->path, 0, 0);
104 else {
105 const struct object_id *oid;
106 unsigned int mode;
107 int addremove;
109 if (p->mode) {
110 addremove = '+';
111 oid = &p->oid;
112 mode = p->mode;
113 } else {
114 addremove = '-';
115 oid = &p0->oid;
116 mode = p0->mode;
119 opt->add_remove(opt, addremove, mode, oid, 1, p->path, 0);
122 return 0; /* we are done with p */
127 * Make a new combine_diff_path from path/mode/sha1
128 * and append it to paths list tail.
130 * Memory for created elements could be reused:
132 * - if last->next == NULL, the memory is allocated;
134 * - if last->next != NULL, it is assumed that p=last->next was returned
135 * earlier by this function, and p->next was *not* modified.
136 * The memory is then reused from p.
138 * so for clients,
140 * - if you do need to keep the element
142 * p = path_appendnew(p, ...);
143 * process(p);
144 * p->next = NULL;
146 * - if you don't need to keep the element after processing
148 * pprev = p;
149 * p = path_appendnew(p, ...);
150 * process(p);
151 * p = pprev;
152 * ; don't forget to free tail->next in the end
154 * p->parent[] remains uninitialized.
156 static struct combine_diff_path *path_appendnew(struct combine_diff_path *last,
157 int nparent, const struct strbuf *base, const char *path, int pathlen,
158 unsigned mode, const struct object_id *oid)
160 struct combine_diff_path *p;
161 size_t len = st_add(base->len, pathlen);
162 size_t alloclen = combine_diff_path_size(nparent, len);
164 /* if last->next is !NULL - it is a pre-allocated memory, we can reuse */
165 p = last->next;
166 if (p && (alloclen > (intptr_t)p->next)) {
167 FREE_AND_NULL(p);
170 if (!p) {
171 p = xmalloc(alloclen);
174 * until we go to it next round, .next holds how many bytes we
175 * allocated (for faster realloc - we don't need copying old data).
177 p->next = (struct combine_diff_path *)(intptr_t)alloclen;
180 last->next = p;
182 p->path = (char *)&(p->parent[nparent]);
183 memcpy(p->path, base->buf, base->len);
184 memcpy(p->path + base->len, path, pathlen);
185 p->path[len] = 0;
186 p->mode = mode;
187 oidcpy(&p->oid, oid ? oid : null_oid());
189 return p;
193 * new path should be added to combine diff
195 * 3 cases on how/when it should be called and behaves:
197 * t, !tp -> path added, all parents lack it
198 * !t, tp -> path removed from all parents
199 * t, tp -> path modified/added
200 * (M for tp[i]=tp[imin], A otherwise)
202 static struct combine_diff_path *emit_path(struct combine_diff_path *p,
203 struct strbuf *base, struct diff_options *opt, int nparent,
204 struct tree_desc *t, struct tree_desc *tp,
205 int imin, int depth)
207 unsigned short mode;
208 const char *path;
209 const struct object_id *oid;
210 int pathlen;
211 int old_baselen = base->len;
212 int i, isdir, recurse = 0, emitthis = 1;
214 /* at least something has to be valid */
215 assert(t || tp);
217 if (t) {
218 /* path present in resulting tree */
219 oid = tree_entry_extract(t, &path, &mode);
220 pathlen = tree_entry_len(&t->entry);
221 isdir = S_ISDIR(mode);
222 } else {
224 * a path was removed - take path from imin parent. Also take
225 * mode from that parent, to decide on recursion(1).
227 * 1) all modes for tp[i]=tp[imin] should be the same wrt
228 * S_ISDIR, thanks to base_name_compare().
230 tree_entry_extract(&tp[imin], &path, &mode);
231 pathlen = tree_entry_len(&tp[imin].entry);
233 isdir = S_ISDIR(mode);
234 oid = NULL;
235 mode = 0;
238 if (opt->flags.recursive && isdir) {
239 recurse = 1;
240 emitthis = opt->flags.tree_in_recursive;
243 if (emitthis) {
244 int keep;
245 struct combine_diff_path *pprev = p;
246 p = path_appendnew(p, nparent, base, path, pathlen, mode, oid);
248 for (i = 0; i < nparent; ++i) {
250 * tp[i] is valid, if present and if tp[i]==tp[imin] -
251 * otherwise, we should ignore it.
253 int tpi_valid = tp && !(tp[i].entry.mode & S_IFXMIN_NEQ);
255 const struct object_id *oid_i;
256 unsigned mode_i;
258 p->parent[i].status =
259 !t ? DIFF_STATUS_DELETED :
260 tpi_valid ?
261 DIFF_STATUS_MODIFIED :
262 DIFF_STATUS_ADDED;
264 if (tpi_valid) {
265 oid_i = &tp[i].entry.oid;
266 mode_i = tp[i].entry.mode;
268 else {
269 oid_i = null_oid();
270 mode_i = 0;
273 p->parent[i].mode = mode_i;
274 oidcpy(&p->parent[i].oid, oid_i);
277 keep = 1;
278 if (opt->pathchange)
279 keep = opt->pathchange(opt, p);
282 * If a path was filtered or consumed - we don't need to add it
283 * to the list and can reuse its memory, leaving it as
284 * pre-allocated element on the tail.
286 * On the other hand, if path needs to be kept, we need to
287 * correct its .next to NULL, as it was pre-initialized to how
288 * much memory was allocated.
290 * see path_appendnew() for details.
292 if (!keep)
293 p = pprev;
294 else
295 p->next = NULL;
298 if (recurse) {
299 const struct object_id **parents_oid;
301 FAST_ARRAY_ALLOC(parents_oid, nparent);
302 for (i = 0; i < nparent; ++i) {
303 /* same rule as in emitthis */
304 int tpi_valid = tp && !(tp[i].entry.mode & S_IFXMIN_NEQ);
306 parents_oid[i] = tpi_valid ? &tp[i].entry.oid : NULL;
309 strbuf_add(base, path, pathlen);
310 strbuf_addch(base, '/');
311 p = ll_diff_tree_paths(p, oid, parents_oid, nparent, base, opt,
312 depth + 1);
313 FAST_ARRAY_FREE(parents_oid, nparent);
316 strbuf_setlen(base, old_baselen);
317 return p;
320 static void skip_uninteresting(struct tree_desc *t, struct strbuf *base,
321 struct diff_options *opt)
323 enum interesting match;
325 while (t->size) {
326 match = tree_entry_interesting(opt->repo->index, &t->entry,
327 base, &opt->pathspec);
328 if (match) {
329 if (match == all_entries_not_interesting)
330 t->size = 0;
331 break;
333 update_tree_entry(t);
339 * generate paths for combined diff D(sha1,parents_oid[])
341 * Resulting paths are appended to combine_diff_path linked list, and also, are
342 * emitted on the go via opt->pathchange() callback, so it is possible to
343 * process the result as batch or incrementally.
345 * The paths are generated scanning new tree and all parents trees
346 * simultaneously, similarly to what diff_tree() was doing for 2 trees.
347 * The theory behind such scan is as follows:
350 * D(T,P1...Pn) calculation scheme
351 * -------------------------------
353 * D(T,P1...Pn) = D(T,P1) ^ ... ^ D(T,Pn) (regarding resulting paths set)
355 * D(T,Pj) - diff between T..Pj
356 * D(T,P1...Pn) - combined diff from T to parents P1,...,Pn
359 * We start from all trees, which are sorted, and compare their entries in
360 * lock-step:
362 * T P1 Pn
363 * - - -
364 * |t| |p1| |pn|
365 * |-| |--| ... |--| imin = argmin(p1...pn)
366 * | | | | | |
367 * |-| |--| |--|
368 * |.| |. | |. |
369 * . . .
370 * . . .
372 * at any time there could be 3 cases:
374 * 1) t < p[imin];
375 * 2) t > p[imin];
376 * 3) t = p[imin].
378 * Schematic deduction of what every case means, and what to do, follows:
380 * 1) t < p[imin] -> ∀j t ∉ Pj -> "+t" ∈ D(T,Pj) -> D += "+t"; t↓
382 * 2) t > p[imin]
384 * 2.1) ∃j: pj > p[imin] -> "-p[imin]" ∉ D(T,Pj) -> D += ø; ∀ pi=p[imin] pi↓
385 * 2.2) ∀i pi = p[imin] -> pi ∉ T -> "-pi" ∈ D(T,Pi) -> D += "-p[imin]"; ∀i pi↓
387 * 3) t = p[imin]
389 * 3.1) ∃j: pj > p[imin] -> "+t" ∈ D(T,Pj) -> only pi=p[imin] remains to investigate
390 * 3.2) pi = p[imin] -> investigate δ(t,pi)
395 * 3.1+3.2) looking at δ(t,pi) ∀i: pi=p[imin] - if all != ø ->
397 * ⎧δ(t,pi) - if pi=p[imin]
398 * -> D += ⎨
399 * ⎩"+t" - if pi>p[imin]
402 * in any case t↓ ∀ pi=p[imin] pi↓
405 * ~~~~~~~~
407 * NOTE
409 * Usual diff D(A,B) is by definition the same as combined diff D(A,[B]),
410 * so this diff paths generator can, and is used, for plain diffs
411 * generation too.
413 * Please keep attention to the common D(A,[B]) case when working on the
414 * code, in order not to slow it down.
416 * NOTE
417 * nparent must be > 0.
421 /* ∀ pi=p[imin] pi↓ */
422 static inline void update_tp_entries(struct tree_desc *tp, int nparent)
424 int i;
425 for (i = 0; i < nparent; ++i)
426 if (!(tp[i].entry.mode & S_IFXMIN_NEQ))
427 update_tree_entry(&tp[i]);
430 static struct combine_diff_path *ll_diff_tree_paths(
431 struct combine_diff_path *p, const struct object_id *oid,
432 const struct object_id **parents_oid, int nparent,
433 struct strbuf *base, struct diff_options *opt,
434 int depth)
436 struct tree_desc t, *tp;
437 void *ttree, **tptree;
438 int i;
440 if (depth > max_allowed_tree_depth)
441 die("exceeded maximum allowed tree depth");
443 FAST_ARRAY_ALLOC(tp, nparent);
444 FAST_ARRAY_ALLOC(tptree, nparent);
447 * load parents first, as they are probably already cached.
449 * ( log_tree_diff() parses commit->parent before calling here via
450 * diff_tree_oid(parent, commit) )
452 for (i = 0; i < nparent; ++i)
453 tptree[i] = fill_tree_descriptor(opt->repo, &tp[i], parents_oid[i]);
454 ttree = fill_tree_descriptor(opt->repo, &t, oid);
456 /* Enable recursion indefinitely */
457 opt->pathspec.recursive = opt->flags.recursive;
459 for (;;) {
460 int imin, cmp;
462 if (diff_can_quit_early(opt))
463 break;
465 if (opt->max_changes && diff_queued_diff.nr > opt->max_changes)
466 break;
468 if (opt->pathspec.nr) {
469 skip_uninteresting(&t, base, opt);
470 for (i = 0; i < nparent; i++)
471 skip_uninteresting(&tp[i], base, opt);
474 /* comparing is finished when all trees are done */
475 if (!t.size) {
476 int done = 1;
477 for (i = 0; i < nparent; ++i)
478 if (tp[i].size) {
479 done = 0;
480 break;
482 if (done)
483 break;
487 * lookup imin = argmin(p1...pn),
488 * mark entries whether they =p[imin] along the way
490 imin = 0;
491 tp[0].entry.mode &= ~S_IFXMIN_NEQ;
493 for (i = 1; i < nparent; ++i) {
494 cmp = tree_entry_pathcmp(&tp[i], &tp[imin]);
495 if (cmp < 0) {
496 imin = i;
497 tp[i].entry.mode &= ~S_IFXMIN_NEQ;
499 else if (cmp == 0) {
500 tp[i].entry.mode &= ~S_IFXMIN_NEQ;
502 else {
503 tp[i].entry.mode |= S_IFXMIN_NEQ;
507 /* fixup markings for entries before imin */
508 for (i = 0; i < imin; ++i)
509 tp[i].entry.mode |= S_IFXMIN_NEQ; /* pi > p[imin] */
513 /* compare t vs p[imin] */
514 cmp = tree_entry_pathcmp(&t, &tp[imin]);
516 /* t = p[imin] */
517 if (cmp == 0) {
518 /* are either pi > p[imin] or diff(t,pi) != ø ? */
519 if (!opt->flags.find_copies_harder) {
520 for (i = 0; i < nparent; ++i) {
521 /* p[i] > p[imin] */
522 if (tp[i].entry.mode & S_IFXMIN_NEQ)
523 continue;
525 /* diff(t,pi) != ø */
526 if (!oideq(&t.entry.oid, &tp[i].entry.oid) ||
527 (t.entry.mode != tp[i].entry.mode))
528 continue;
530 goto skip_emit_t_tp;
534 /* D += {δ(t,pi) if pi=p[imin]; "+a" if pi > p[imin]} */
535 p = emit_path(p, base, opt, nparent,
536 &t, tp, imin, depth);
538 skip_emit_t_tp:
539 /* t↓, ∀ pi=p[imin] pi↓ */
540 update_tree_entry(&t);
541 update_tp_entries(tp, nparent);
544 /* t < p[imin] */
545 else if (cmp < 0) {
546 /* D += "+t" */
547 p = emit_path(p, base, opt, nparent,
548 &t, /*tp=*/NULL, -1, depth);
550 /* t↓ */
551 update_tree_entry(&t);
554 /* t > p[imin] */
555 else {
556 /* ∀i pi=p[imin] -> D += "-p[imin]" */
557 if (!opt->flags.find_copies_harder) {
558 for (i = 0; i < nparent; ++i)
559 if (tp[i].entry.mode & S_IFXMIN_NEQ)
560 goto skip_emit_tp;
563 p = emit_path(p, base, opt, nparent,
564 /*t=*/NULL, tp, imin, depth);
566 skip_emit_tp:
567 /* ∀ pi=p[imin] pi↓ */
568 update_tp_entries(tp, nparent);
572 free(ttree);
573 for (i = nparent-1; i >= 0; i--)
574 free(tptree[i]);
575 FAST_ARRAY_FREE(tptree, nparent);
576 FAST_ARRAY_FREE(tp, nparent);
578 return p;
581 struct combine_diff_path *diff_tree_paths(
582 struct combine_diff_path *p, const struct object_id *oid,
583 const struct object_id **parents_oid, int nparent,
584 struct strbuf *base, struct diff_options *opt)
586 p = ll_diff_tree_paths(p, oid, parents_oid, nparent, base, opt, 0);
589 * free pre-allocated last element, if any
590 * (see path_appendnew() for details about why)
592 FREE_AND_NULL(p->next);
594 return p;
598 * Does it look like the resulting diff might be due to a rename?
599 * - single entry
600 * - not a valid previous file
602 static inline int diff_might_be_rename(void)
604 return diff_queued_diff.nr == 1 &&
605 !DIFF_FILE_VALID(diff_queued_diff.queue[0]->one);
608 static void try_to_follow_renames(const struct object_id *old_oid,
609 const struct object_id *new_oid,
610 struct strbuf *base, struct diff_options *opt)
612 struct diff_options diff_opts;
613 struct diff_queue_struct *q = &diff_queued_diff;
614 struct diff_filepair *choice;
615 int i;
618 * follow-rename code is very specific, we need exactly one
619 * path. Magic that matches more than one path is not
620 * supported.
622 GUARD_PATHSPEC(&opt->pathspec, PATHSPEC_FROMTOP | PATHSPEC_LITERAL);
623 #if 0
625 * We should reject wildcards as well. Unfortunately we
626 * haven't got a reliable way to detect that 'foo\*bar' in
627 * fact has no wildcards. nowildcard_len is merely a hint for
628 * optimization. Let it slip for now until wildmatch is taught
629 * about dry-run mode and returns wildcard info.
631 if (opt->pathspec.has_wildcard)
632 BUG("wildcards are not supported");
633 #endif
635 /* Remove the file creation entry from the diff queue, and remember it */
636 choice = q->queue[0];
637 q->nr = 0;
639 repo_diff_setup(opt->repo, &diff_opts);
640 diff_opts.flags.recursive = 1;
641 diff_opts.flags.find_copies_harder = 1;
642 diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT;
643 diff_opts.single_follow = opt->pathspec.items[0].match;
644 diff_opts.break_opt = opt->break_opt;
645 diff_opts.rename_score = opt->rename_score;
646 diff_setup_done(&diff_opts);
647 ll_diff_tree_oid(old_oid, new_oid, base, &diff_opts);
648 diffcore_std(&diff_opts);
649 clear_pathspec(&diff_opts.pathspec);
651 /* Go through the new set of filepairing, and see if we find a more interesting one */
652 opt->found_follow = 0;
653 for (i = 0; i < q->nr; i++) {
654 struct diff_filepair *p = q->queue[i];
657 * Found a source? Not only do we use that for the new
658 * diff_queued_diff, we will also use that as the path in
659 * the future!
661 if ((p->status == 'R' || p->status == 'C') &&
662 !strcmp(p->two->path, opt->pathspec.items[0].match)) {
663 const char *path[2];
665 /* Switch the file-pairs around */
666 q->queue[i] = choice;
667 choice = p;
669 /* Update the path we use from now on.. */
670 path[0] = p->one->path;
671 path[1] = NULL;
672 clear_pathspec(&opt->pathspec);
673 parse_pathspec(&opt->pathspec,
674 PATHSPEC_ALL_MAGIC & ~PATHSPEC_LITERAL,
675 PATHSPEC_LITERAL_PATH, "", path);
678 * The caller expects us to return a set of vanilla
679 * filepairs to let a later call to diffcore_std()
680 * it makes to sort the renames out (among other
681 * things), but we already have found renames
682 * ourselves; signal diffcore_std() not to muck with
683 * rename information.
685 opt->found_follow = 1;
686 break;
691 * Then, discard all the non-relevant file pairs...
693 for (i = 0; i < q->nr; i++) {
694 struct diff_filepair *p = q->queue[i];
695 diff_free_filepair(p);
699 * .. and re-instate the one we want (which might be either the
700 * original one, or the rename/copy we found)
702 q->queue[0] = choice;
703 q->nr = 1;
706 static void ll_diff_tree_oid(const struct object_id *old_oid,
707 const struct object_id *new_oid,
708 struct strbuf *base, struct diff_options *opt)
710 struct combine_diff_path phead, *p;
711 pathchange_fn_t pathchange_old = opt->pathchange;
713 phead.next = NULL;
714 opt->pathchange = emit_diff_first_parent_only;
715 diff_tree_paths(&phead, new_oid, &old_oid, 1, base, opt);
717 for (p = phead.next; p;) {
718 struct combine_diff_path *pprev = p;
719 p = p->next;
720 free(pprev);
723 opt->pathchange = pathchange_old;
726 void diff_tree_oid(const struct object_id *old_oid,
727 const struct object_id *new_oid,
728 const char *base_str, struct diff_options *opt)
730 struct strbuf base;
732 strbuf_init(&base, PATH_MAX);
733 strbuf_addstr(&base, base_str);
735 ll_diff_tree_oid(old_oid, new_oid, &base, opt);
736 if (!*base_str && opt->flags.follow_renames && diff_might_be_rename())
737 try_to_follow_renames(old_oid, new_oid, &base, opt);
739 strbuf_release(&base);
742 void diff_root_tree_oid(const struct object_id *new_oid,
743 const char *base,
744 struct diff_options *opt)
746 diff_tree_oid(NULL, new_oid, base, opt);