Merge branch 'ma/test-libcurl-prereq' into maint-2.46
[git/gitster.git] / tree-diff.c
blob9252481df3677c59c05c1896e960438dc0281baf
1 /*
2 * Helper functions for tree diff generation
3 */
4 #include "git-compat-util.h"
5 #include "diff.h"
6 #include "diffcore.h"
7 #include "hash.h"
8 #include "tree.h"
9 #include "tree-walk.h"
10 #include "environment.h"
11 #include "repository.h"
14 * Some mode bits are also used internally for computations.
16 * They *must* not overlap with any valid modes, and they *must* not be emitted
17 * to outside world - i.e. appear on disk or network. In other words, it's just
18 * temporary fields, which we internally use, but they have to stay in-house.
20 * ( such approach is valid, as standard S_IF* fits into 16 bits, and in Git
21 * codebase mode is `unsigned int` which is assumed to be at least 32 bits )
24 #define S_DIFFTREE_IFXMIN_NEQ 0x80000000
27 * internal mode marker, saying a tree entry != entry of tp[imin]
28 * (see ll_diff_tree_paths for what it means there)
30 * we will update/use/emit entry for diff only with it unset.
32 #define S_IFXMIN_NEQ S_DIFFTREE_IFXMIN_NEQ
34 #define FAST_ARRAY_ALLOC(x, nr) do { \
35 if ((nr) <= 2) \
36 (x) = xalloca((nr) * sizeof(*(x))); \
37 else \
38 ALLOC_ARRAY((x), nr); \
39 } while(0)
40 #define FAST_ARRAY_FREE(x, nr) do { \
41 if ((nr) <= 2) \
42 xalloca_free((x)); \
43 else \
44 free((x)); \
45 } while(0)
47 static struct combine_diff_path *ll_diff_tree_paths(
48 struct combine_diff_path *p, const struct object_id *oid,
49 const struct object_id **parents_oid, int nparent,
50 struct strbuf *base, struct diff_options *opt,
51 int depth);
52 static void ll_diff_tree_oid(const struct object_id *old_oid,
53 const struct object_id *new_oid,
54 struct strbuf *base, struct diff_options *opt);
57 * Compare two tree entries, taking into account only path/S_ISDIR(mode),
58 * but not their sha1's.
60 * NOTE files and directories *always* compare differently, even when having
61 * the same name - thanks to base_name_compare().
63 * NOTE empty (=invalid) descriptor(s) take part in comparison as +infty,
64 * so that they sort *after* valid tree entries.
66 * Due to this convention, if trees are scanned in sorted order, all
67 * non-empty descriptors will be processed first.
69 static int tree_entry_pathcmp(struct tree_desc *t1, struct tree_desc *t2)
71 struct name_entry *e1, *e2;
72 int cmp;
74 /* empty descriptors sort after valid tree entries */
75 if (!t1->size)
76 return t2->size ? 1 : 0;
77 else if (!t2->size)
78 return -1;
80 e1 = &t1->entry;
81 e2 = &t2->entry;
82 cmp = base_name_compare(e1->path, tree_entry_len(e1), e1->mode,
83 e2->path, tree_entry_len(e2), e2->mode);
84 return cmp;
89 * convert path -> opt->diff_*() callbacks
91 * emits diff to first parent only, and tells diff tree-walker that we are done
92 * with p and it can be freed.
94 static int emit_diff_first_parent_only(struct diff_options *opt, struct combine_diff_path *p)
96 struct combine_diff_parent *p0 = &p->parent[0];
97 if (p->mode && p0->mode) {
98 opt->change(opt, p0->mode, p->mode, &p0->oid, &p->oid,
99 1, 1, p->path, 0, 0);
101 else {
102 const struct object_id *oid;
103 unsigned int mode;
104 int addremove;
106 if (p->mode) {
107 addremove = '+';
108 oid = &p->oid;
109 mode = p->mode;
110 } else {
111 addremove = '-';
112 oid = &p0->oid;
113 mode = p0->mode;
116 opt->add_remove(opt, addremove, mode, oid, 1, p->path, 0);
119 return 0; /* we are done with p */
124 * Make a new combine_diff_path from path/mode/sha1
125 * and append it to paths list tail.
127 * Memory for created elements could be reused:
129 * - if last->next == NULL, the memory is allocated;
131 * - if last->next != NULL, it is assumed that p=last->next was returned
132 * earlier by this function, and p->next was *not* modified.
133 * The memory is then reused from p.
135 * so for clients,
137 * - if you do need to keep the element
139 * p = path_appendnew(p, ...);
140 * process(p);
141 * p->next = NULL;
143 * - if you don't need to keep the element after processing
145 * pprev = p;
146 * p = path_appendnew(p, ...);
147 * process(p);
148 * p = pprev;
149 * ; don't forget to free tail->next in the end
151 * p->parent[] remains uninitialized.
153 static struct combine_diff_path *path_appendnew(struct combine_diff_path *last,
154 int nparent, const struct strbuf *base, const char *path, int pathlen,
155 unsigned mode, const struct object_id *oid)
157 struct combine_diff_path *p;
158 size_t len = st_add(base->len, pathlen);
159 size_t alloclen = combine_diff_path_size(nparent, len);
161 /* if last->next is !NULL - it is a pre-allocated memory, we can reuse */
162 p = last->next;
163 if (p && (alloclen > (intptr_t)p->next)) {
164 FREE_AND_NULL(p);
167 if (!p) {
168 p = xmalloc(alloclen);
171 * until we go to it next round, .next holds how many bytes we
172 * allocated (for faster realloc - we don't need copying old data).
174 p->next = (struct combine_diff_path *)(intptr_t)alloclen;
177 last->next = p;
179 p->path = (char *)&(p->parent[nparent]);
180 memcpy(p->path, base->buf, base->len);
181 memcpy(p->path + base->len, path, pathlen);
182 p->path[len] = 0;
183 p->mode = mode;
184 oidcpy(&p->oid, oid ? oid : null_oid());
186 return p;
190 * new path should be added to combine diff
192 * 3 cases on how/when it should be called and behaves:
194 * t, !tp -> path added, all parents lack it
195 * !t, tp -> path removed from all parents
196 * t, tp -> path modified/added
197 * (M for tp[i]=tp[imin], A otherwise)
199 static struct combine_diff_path *emit_path(struct combine_diff_path *p,
200 struct strbuf *base, struct diff_options *opt, int nparent,
201 struct tree_desc *t, struct tree_desc *tp,
202 int imin, int depth)
204 unsigned short mode;
205 const char *path;
206 const struct object_id *oid;
207 int pathlen;
208 int old_baselen = base->len;
209 int i, isdir, recurse = 0, emitthis = 1;
211 /* at least something has to be valid */
212 assert(t || tp);
214 if (t) {
215 /* path present in resulting tree */
216 oid = tree_entry_extract(t, &path, &mode);
217 pathlen = tree_entry_len(&t->entry);
218 isdir = S_ISDIR(mode);
219 } else {
221 * a path was removed - take path from imin parent. Also take
222 * mode from that parent, to decide on recursion(1).
224 * 1) all modes for tp[i]=tp[imin] should be the same wrt
225 * S_ISDIR, thanks to base_name_compare().
227 tree_entry_extract(&tp[imin], &path, &mode);
228 pathlen = tree_entry_len(&tp[imin].entry);
230 isdir = S_ISDIR(mode);
231 oid = NULL;
232 mode = 0;
235 if (opt->flags.recursive && isdir) {
236 recurse = 1;
237 emitthis = opt->flags.tree_in_recursive;
240 if (emitthis) {
241 int keep;
242 struct combine_diff_path *pprev = p;
243 p = path_appendnew(p, nparent, base, path, pathlen, mode, oid);
245 for (i = 0; i < nparent; ++i) {
247 * tp[i] is valid, if present and if tp[i]==tp[imin] -
248 * otherwise, we should ignore it.
250 int tpi_valid = tp && !(tp[i].entry.mode & S_IFXMIN_NEQ);
252 const struct object_id *oid_i;
253 unsigned mode_i;
255 p->parent[i].status =
256 !t ? DIFF_STATUS_DELETED :
257 tpi_valid ?
258 DIFF_STATUS_MODIFIED :
259 DIFF_STATUS_ADDED;
261 if (tpi_valid) {
262 oid_i = &tp[i].entry.oid;
263 mode_i = tp[i].entry.mode;
265 else {
266 oid_i = null_oid();
267 mode_i = 0;
270 p->parent[i].mode = mode_i;
271 oidcpy(&p->parent[i].oid, oid_i);
274 keep = 1;
275 if (opt->pathchange)
276 keep = opt->pathchange(opt, p);
279 * If a path was filtered or consumed - we don't need to add it
280 * to the list and can reuse its memory, leaving it as
281 * pre-allocated element on the tail.
283 * On the other hand, if path needs to be kept, we need to
284 * correct its .next to NULL, as it was pre-initialized to how
285 * much memory was allocated.
287 * see path_appendnew() for details.
289 if (!keep)
290 p = pprev;
291 else
292 p->next = NULL;
295 if (recurse) {
296 const struct object_id **parents_oid;
298 FAST_ARRAY_ALLOC(parents_oid, nparent);
299 for (i = 0; i < nparent; ++i) {
300 /* same rule as in emitthis */
301 int tpi_valid = tp && !(tp[i].entry.mode & S_IFXMIN_NEQ);
303 parents_oid[i] = tpi_valid ? &tp[i].entry.oid : NULL;
306 strbuf_add(base, path, pathlen);
307 strbuf_addch(base, '/');
308 p = ll_diff_tree_paths(p, oid, parents_oid, nparent, base, opt,
309 depth + 1);
310 FAST_ARRAY_FREE(parents_oid, nparent);
313 strbuf_setlen(base, old_baselen);
314 return p;
317 static void skip_uninteresting(struct tree_desc *t, struct strbuf *base,
318 struct diff_options *opt)
320 enum interesting match;
322 while (t->size) {
323 match = tree_entry_interesting(opt->repo->index, &t->entry,
324 base, &opt->pathspec);
325 if (match) {
326 if (match == all_entries_not_interesting)
327 t->size = 0;
328 break;
330 update_tree_entry(t);
336 * generate paths for combined diff D(sha1,parents_oid[])
338 * Resulting paths are appended to combine_diff_path linked list, and also, are
339 * emitted on the go via opt->pathchange() callback, so it is possible to
340 * process the result as batch or incrementally.
342 * The paths are generated scanning new tree and all parents trees
343 * simultaneously, similarly to what diff_tree() was doing for 2 trees.
344 * The theory behind such scan is as follows:
347 * D(T,P1...Pn) calculation scheme
348 * -------------------------------
350 * D(T,P1...Pn) = D(T,P1) ^ ... ^ D(T,Pn) (regarding resulting paths set)
352 * D(T,Pj) - diff between T..Pj
353 * D(T,P1...Pn) - combined diff from T to parents P1,...,Pn
356 * We start from all trees, which are sorted, and compare their entries in
357 * lock-step:
359 * T P1 Pn
360 * - - -
361 * |t| |p1| |pn|
362 * |-| |--| ... |--| imin = argmin(p1...pn)
363 * | | | | | |
364 * |-| |--| |--|
365 * |.| |. | |. |
366 * . . .
367 * . . .
369 * at any time there could be 3 cases:
371 * 1) t < p[imin];
372 * 2) t > p[imin];
373 * 3) t = p[imin].
375 * Schematic deduction of what every case means, and what to do, follows:
377 * 1) t < p[imin] -> ∀j t ∉ Pj -> "+t" ∈ D(T,Pj) -> D += "+t"; t↓
379 * 2) t > p[imin]
381 * 2.1) ∃j: pj > p[imin] -> "-p[imin]" ∉ D(T,Pj) -> D += ø; ∀ pi=p[imin] pi↓
382 * 2.2) ∀i pi = p[imin] -> pi ∉ T -> "-pi" ∈ D(T,Pi) -> D += "-p[imin]"; ∀i pi↓
384 * 3) t = p[imin]
386 * 3.1) ∃j: pj > p[imin] -> "+t" ∈ D(T,Pj) -> only pi=p[imin] remains to investigate
387 * 3.2) pi = p[imin] -> investigate δ(t,pi)
392 * 3.1+3.2) looking at δ(t,pi) ∀i: pi=p[imin] - if all != ø ->
394 * ⎧δ(t,pi) - if pi=p[imin]
395 * -> D += ⎨
396 * ⎩"+t" - if pi>p[imin]
399 * in any case t↓ ∀ pi=p[imin] pi↓
402 * ~~~~~~~~
404 * NOTE
406 * Usual diff D(A,B) is by definition the same as combined diff D(A,[B]),
407 * so this diff paths generator can, and is used, for plain diffs
408 * generation too.
410 * Please keep attention to the common D(A,[B]) case when working on the
411 * code, in order not to slow it down.
413 * NOTE
414 * nparent must be > 0.
418 /* ∀ pi=p[imin] pi↓ */
419 static inline void update_tp_entries(struct tree_desc *tp, int nparent)
421 int i;
422 for (i = 0; i < nparent; ++i)
423 if (!(tp[i].entry.mode & S_IFXMIN_NEQ))
424 update_tree_entry(&tp[i]);
427 static struct combine_diff_path *ll_diff_tree_paths(
428 struct combine_diff_path *p, const struct object_id *oid,
429 const struct object_id **parents_oid, int nparent,
430 struct strbuf *base, struct diff_options *opt,
431 int depth)
433 struct tree_desc t, *tp;
434 void *ttree, **tptree;
435 int i;
437 if (depth > max_allowed_tree_depth)
438 die("exceeded maximum allowed tree depth");
440 FAST_ARRAY_ALLOC(tp, nparent);
441 FAST_ARRAY_ALLOC(tptree, nparent);
444 * load parents first, as they are probably already cached.
446 * ( log_tree_diff() parses commit->parent before calling here via
447 * diff_tree_oid(parent, commit) )
449 for (i = 0; i < nparent; ++i)
450 tptree[i] = fill_tree_descriptor(opt->repo, &tp[i], parents_oid[i]);
451 ttree = fill_tree_descriptor(opt->repo, &t, oid);
453 /* Enable recursion indefinitely */
454 opt->pathspec.recursive = opt->flags.recursive;
456 for (;;) {
457 int imin, cmp;
459 if (diff_can_quit_early(opt))
460 break;
462 if (opt->max_changes && diff_queued_diff.nr > opt->max_changes)
463 break;
465 if (opt->pathspec.nr) {
466 skip_uninteresting(&t, base, opt);
467 for (i = 0; i < nparent; i++)
468 skip_uninteresting(&tp[i], base, opt);
471 /* comparing is finished when all trees are done */
472 if (!t.size) {
473 int done = 1;
474 for (i = 0; i < nparent; ++i)
475 if (tp[i].size) {
476 done = 0;
477 break;
479 if (done)
480 break;
484 * lookup imin = argmin(p1...pn),
485 * mark entries whether they =p[imin] along the way
487 imin = 0;
488 tp[0].entry.mode &= ~S_IFXMIN_NEQ;
490 for (i = 1; i < nparent; ++i) {
491 cmp = tree_entry_pathcmp(&tp[i], &tp[imin]);
492 if (cmp < 0) {
493 imin = i;
494 tp[i].entry.mode &= ~S_IFXMIN_NEQ;
496 else if (cmp == 0) {
497 tp[i].entry.mode &= ~S_IFXMIN_NEQ;
499 else {
500 tp[i].entry.mode |= S_IFXMIN_NEQ;
504 /* fixup markings for entries before imin */
505 for (i = 0; i < imin; ++i)
506 tp[i].entry.mode |= S_IFXMIN_NEQ; /* pi > p[imin] */
510 /* compare t vs p[imin] */
511 cmp = tree_entry_pathcmp(&t, &tp[imin]);
513 /* t = p[imin] */
514 if (cmp == 0) {
515 /* are either pi > p[imin] or diff(t,pi) != ø ? */
516 if (!opt->flags.find_copies_harder) {
517 for (i = 0; i < nparent; ++i) {
518 /* p[i] > p[imin] */
519 if (tp[i].entry.mode & S_IFXMIN_NEQ)
520 continue;
522 /* diff(t,pi) != ø */
523 if (!oideq(&t.entry.oid, &tp[i].entry.oid) ||
524 (t.entry.mode != tp[i].entry.mode))
525 continue;
527 goto skip_emit_t_tp;
531 /* D += {δ(t,pi) if pi=p[imin]; "+a" if pi > p[imin]} */
532 p = emit_path(p, base, opt, nparent,
533 &t, tp, imin, depth);
535 skip_emit_t_tp:
536 /* t↓, ∀ pi=p[imin] pi↓ */
537 update_tree_entry(&t);
538 update_tp_entries(tp, nparent);
541 /* t < p[imin] */
542 else if (cmp < 0) {
543 /* D += "+t" */
544 p = emit_path(p, base, opt, nparent,
545 &t, /*tp=*/NULL, -1, depth);
547 /* t↓ */
548 update_tree_entry(&t);
551 /* t > p[imin] */
552 else {
553 /* ∀i pi=p[imin] -> D += "-p[imin]" */
554 if (!opt->flags.find_copies_harder) {
555 for (i = 0; i < nparent; ++i)
556 if (tp[i].entry.mode & S_IFXMIN_NEQ)
557 goto skip_emit_tp;
560 p = emit_path(p, base, opt, nparent,
561 /*t=*/NULL, tp, imin, depth);
563 skip_emit_tp:
564 /* ∀ pi=p[imin] pi↓ */
565 update_tp_entries(tp, nparent);
569 free(ttree);
570 for (i = nparent-1; i >= 0; i--)
571 free(tptree[i]);
572 FAST_ARRAY_FREE(tptree, nparent);
573 FAST_ARRAY_FREE(tp, nparent);
575 return p;
578 struct combine_diff_path *diff_tree_paths(
579 struct combine_diff_path *p, const struct object_id *oid,
580 const struct object_id **parents_oid, int nparent,
581 struct strbuf *base, struct diff_options *opt)
583 p = ll_diff_tree_paths(p, oid, parents_oid, nparent, base, opt, 0);
586 * free pre-allocated last element, if any
587 * (see path_appendnew() for details about why)
589 FREE_AND_NULL(p->next);
591 return p;
595 * Does it look like the resulting diff might be due to a rename?
596 * - single entry
597 * - not a valid previous file
599 static inline int diff_might_be_rename(void)
601 return diff_queued_diff.nr == 1 &&
602 !DIFF_FILE_VALID(diff_queued_diff.queue[0]->one);
605 static void try_to_follow_renames(const struct object_id *old_oid,
606 const struct object_id *new_oid,
607 struct strbuf *base, struct diff_options *opt)
609 struct diff_options diff_opts;
610 struct diff_queue_struct *q = &diff_queued_diff;
611 struct diff_filepair *choice;
612 int i;
615 * follow-rename code is very specific, we need exactly one
616 * path. Magic that matches more than one path is not
617 * supported.
619 GUARD_PATHSPEC(&opt->pathspec, PATHSPEC_FROMTOP | PATHSPEC_LITERAL);
620 #if 0
622 * We should reject wildcards as well. Unfortunately we
623 * haven't got a reliable way to detect that 'foo\*bar' in
624 * fact has no wildcards. nowildcard_len is merely a hint for
625 * optimization. Let it slip for now until wildmatch is taught
626 * about dry-run mode and returns wildcard info.
628 if (opt->pathspec.has_wildcard)
629 BUG("wildcards are not supported");
630 #endif
632 /* Remove the file creation entry from the diff queue, and remember it */
633 choice = q->queue[0];
634 q->nr = 0;
636 repo_diff_setup(opt->repo, &diff_opts);
637 diff_opts.flags.recursive = 1;
638 diff_opts.flags.find_copies_harder = 1;
639 diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT;
640 diff_opts.single_follow = opt->pathspec.items[0].match;
641 diff_opts.break_opt = opt->break_opt;
642 diff_opts.rename_score = opt->rename_score;
643 diff_setup_done(&diff_opts);
644 ll_diff_tree_oid(old_oid, new_oid, base, &diff_opts);
645 diffcore_std(&diff_opts);
646 clear_pathspec(&diff_opts.pathspec);
648 /* Go through the new set of filepairing, and see if we find a more interesting one */
649 opt->found_follow = 0;
650 for (i = 0; i < q->nr; i++) {
651 struct diff_filepair *p = q->queue[i];
654 * Found a source? Not only do we use that for the new
655 * diff_queued_diff, we will also use that as the path in
656 * the future!
658 if ((p->status == 'R' || p->status == 'C') &&
659 !strcmp(p->two->path, opt->pathspec.items[0].match)) {
660 const char *path[2];
662 /* Switch the file-pairs around */
663 q->queue[i] = choice;
664 choice = p;
666 /* Update the path we use from now on.. */
667 path[0] = p->one->path;
668 path[1] = NULL;
669 clear_pathspec(&opt->pathspec);
670 parse_pathspec(&opt->pathspec,
671 PATHSPEC_ALL_MAGIC & ~PATHSPEC_LITERAL,
672 PATHSPEC_LITERAL_PATH, "", path);
675 * The caller expects us to return a set of vanilla
676 * filepairs to let a later call to diffcore_std()
677 * it makes to sort the renames out (among other
678 * things), but we already have found renames
679 * ourselves; signal diffcore_std() not to muck with
680 * rename information.
682 opt->found_follow = 1;
683 break;
688 * Then, discard all the non-relevant file pairs...
690 for (i = 0; i < q->nr; i++) {
691 struct diff_filepair *p = q->queue[i];
692 diff_free_filepair(p);
696 * .. and re-instate the one we want (which might be either the
697 * original one, or the rename/copy we found)
699 q->queue[0] = choice;
700 q->nr = 1;
703 static void ll_diff_tree_oid(const struct object_id *old_oid,
704 const struct object_id *new_oid,
705 struct strbuf *base, struct diff_options *opt)
707 struct combine_diff_path phead, *p;
708 pathchange_fn_t pathchange_old = opt->pathchange;
710 phead.next = NULL;
711 opt->pathchange = emit_diff_first_parent_only;
712 diff_tree_paths(&phead, new_oid, &old_oid, 1, base, opt);
714 for (p = phead.next; p;) {
715 struct combine_diff_path *pprev = p;
716 p = p->next;
717 free(pprev);
720 opt->pathchange = pathchange_old;
723 void diff_tree_oid(const struct object_id *old_oid,
724 const struct object_id *new_oid,
725 const char *base_str, struct diff_options *opt)
727 struct strbuf base;
729 strbuf_init(&base, PATH_MAX);
730 strbuf_addstr(&base, base_str);
732 ll_diff_tree_oid(old_oid, new_oid, &base, opt);
733 if (!*base_str && opt->flags.follow_renames && diff_might_be_rename())
734 try_to_follow_renames(old_oid, new_oid, &base, opt);
736 strbuf_release(&base);
739 void diff_root_tree_oid(const struct object_id *new_oid,
740 const char *base,
741 struct diff_options *opt)
743 diff_tree_oid(NULL, new_oid, base, opt);