t5332-multi-pack-reuse.sh: demonstrate duplicate packing failure
[git/gitster.git] / shallow.c
blob4bb1518dbc80747d2641a5a80291ab159270804c
1 #define USE_THE_REPOSITORY_VARIABLE
3 #include "git-compat-util.h"
4 #include "hex.h"
5 #include "repository.h"
6 #include "tempfile.h"
7 #include "lockfile.h"
8 #include "object-store-ll.h"
9 #include "commit.h"
10 #include "tag.h"
11 #include "pkt-line.h"
12 #include "refs.h"
13 #include "oid-array.h"
14 #include "path.h"
15 #include "diff.h"
16 #include "revision.h"
17 #include "commit-slab.h"
18 #include "list-objects.h"
19 #include "commit-reach.h"
20 #include "shallow.h"
21 #include "statinfo.h"
22 #include "trace.h"
24 void set_alternate_shallow_file(struct repository *r, const char *path, int override)
26 if (r->parsed_objects->is_shallow != -1)
27 BUG("is_repository_shallow must not be called before set_alternate_shallow_file");
28 if (r->parsed_objects->alternate_shallow_file && !override)
29 return;
30 free(r->parsed_objects->alternate_shallow_file);
31 r->parsed_objects->alternate_shallow_file = xstrdup_or_null(path);
34 int register_shallow(struct repository *r, const struct object_id *oid)
36 struct commit_graft *graft =
37 xmalloc(sizeof(struct commit_graft));
38 struct commit *commit = lookup_commit(r, oid);
40 oidcpy(&graft->oid, oid);
41 graft->nr_parent = -1;
42 if (commit && commit->object.parsed) {
43 free_commit_list(commit->parents);
44 commit->parents = NULL;
46 return register_commit_graft(r, graft, 0);
49 int unregister_shallow(const struct object_id *oid)
51 int pos = commit_graft_pos(the_repository, oid);
52 if (pos < 0)
53 return -1;
54 free(the_repository->parsed_objects->grafts[pos]);
55 if (pos + 1 < the_repository->parsed_objects->grafts_nr)
56 MOVE_ARRAY(the_repository->parsed_objects->grafts + pos,
57 the_repository->parsed_objects->grafts + pos + 1,
58 the_repository->parsed_objects->grafts_nr - pos - 1);
59 the_repository->parsed_objects->grafts_nr--;
60 return 0;
63 int is_repository_shallow(struct repository *r)
65 FILE *fp;
66 char buf[1024];
67 const char *path = r->parsed_objects->alternate_shallow_file;
69 if (r->parsed_objects->is_shallow >= 0)
70 return r->parsed_objects->is_shallow;
72 if (!path)
73 path = git_path_shallow(r);
75 * fetch-pack sets '--shallow-file ""' as an indicator that no
76 * shallow file should be used. We could just open it and it
77 * will likely fail. But let's do an explicit check instead.
79 if (!*path || (fp = fopen(path, "r")) == NULL) {
80 stat_validity_clear(r->parsed_objects->shallow_stat);
81 r->parsed_objects->is_shallow = 0;
82 return r->parsed_objects->is_shallow;
84 stat_validity_update(r->parsed_objects->shallow_stat, fileno(fp));
85 r->parsed_objects->is_shallow = 1;
87 while (fgets(buf, sizeof(buf), fp)) {
88 struct object_id oid;
89 if (get_oid_hex(buf, &oid))
90 die("bad shallow line: %s", buf);
91 register_shallow(r, &oid);
93 fclose(fp);
94 return r->parsed_objects->is_shallow;
97 static void reset_repository_shallow(struct repository *r)
99 r->parsed_objects->is_shallow = -1;
100 stat_validity_clear(r->parsed_objects->shallow_stat);
101 parsed_object_pool_reset_commit_grafts(r->parsed_objects);
104 int commit_shallow_file(struct repository *r, struct shallow_lock *lk)
106 int res = commit_lock_file(&lk->lock);
107 reset_repository_shallow(r);
110 * Update in-memory data structures with the new shallow information,
111 * including unparsing all commits that now have grafts.
113 is_repository_shallow(r);
115 return res;
118 void rollback_shallow_file(struct repository *r, struct shallow_lock *lk)
120 rollback_lock_file(&lk->lock);
121 reset_repository_shallow(r);
125 * TODO: use "int" elemtype instead of "int *" when/if commit-slab
126 * supports a "valid" flag.
128 define_commit_slab(commit_depth, int *);
129 static void free_depth_in_slab(int **ptr)
131 FREE_AND_NULL(*ptr);
133 struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
134 int shallow_flag, int not_shallow_flag)
136 int i = 0, cur_depth = 0;
137 struct commit_list *result = NULL;
138 struct object_array stack = OBJECT_ARRAY_INIT;
139 struct commit *commit = NULL;
140 struct commit_graft *graft;
141 struct commit_depth depths;
143 init_commit_depth(&depths);
144 while (commit || i < heads->nr || stack.nr) {
145 struct commit_list *p;
146 if (!commit) {
147 if (i < heads->nr) {
148 int **depth_slot;
149 commit = (struct commit *)
150 deref_tag(the_repository,
151 heads->objects[i++].item,
152 NULL, 0);
153 if (!commit || commit->object.type != OBJ_COMMIT) {
154 commit = NULL;
155 continue;
157 depth_slot = commit_depth_at(&depths, commit);
158 if (!*depth_slot)
159 *depth_slot = xmalloc(sizeof(int));
160 **depth_slot = 0;
161 cur_depth = 0;
162 } else {
163 commit = (struct commit *)
164 object_array_pop(&stack);
165 cur_depth = **commit_depth_at(&depths, commit);
168 parse_commit_or_die(commit);
169 cur_depth++;
170 if ((depth != INFINITE_DEPTH && cur_depth >= depth) ||
171 (is_repository_shallow(the_repository) && !commit->parents &&
172 (graft = lookup_commit_graft(the_repository, &commit->object.oid)) != NULL &&
173 graft->nr_parent < 0)) {
174 commit_list_insert(commit, &result);
175 commit->object.flags |= shallow_flag;
176 commit = NULL;
177 continue;
179 commit->object.flags |= not_shallow_flag;
180 for (p = commit->parents, commit = NULL; p; p = p->next) {
181 int **depth_slot = commit_depth_at(&depths, p->item);
182 if (!*depth_slot) {
183 *depth_slot = xmalloc(sizeof(int));
184 **depth_slot = cur_depth;
185 } else {
186 if (cur_depth >= **depth_slot)
187 continue;
188 **depth_slot = cur_depth;
190 if (p->next)
191 add_object_array(&p->item->object,
192 NULL, &stack);
193 else {
194 commit = p->item;
195 cur_depth = **commit_depth_at(&depths, commit);
199 deep_clear_commit_depth(&depths, free_depth_in_slab);
201 return result;
204 static void show_commit(struct commit *commit, void *data)
206 commit_list_insert(commit, data);
210 * Given rev-list arguments, run rev-list. All reachable commits
211 * except border ones are marked with not_shallow_flag. Border commits
212 * are marked with shallow_flag. The list of border/shallow commits
213 * are also returned.
215 struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av,
216 int shallow_flag,
217 int not_shallow_flag)
219 struct commit_list *result = NULL, *p;
220 struct commit_list *not_shallow_list = NULL;
221 struct rev_info revs;
222 int both_flags = shallow_flag | not_shallow_flag;
225 * SHALLOW (excluded) and NOT_SHALLOW (included) should not be
226 * set at this point. But better be safe than sorry.
228 clear_object_flags(both_flags);
230 is_repository_shallow(the_repository); /* make sure shallows are read */
232 repo_init_revisions(the_repository, &revs, NULL);
233 save_commit_buffer = 0;
234 setup_revisions(ac, av, &revs, NULL);
236 if (prepare_revision_walk(&revs))
237 die("revision walk setup failed");
238 traverse_commit_list(&revs, show_commit, NULL, &not_shallow_list);
240 if (!not_shallow_list)
241 die("no commits selected for shallow requests");
243 /* Mark all reachable commits as NOT_SHALLOW */
244 for (p = not_shallow_list; p; p = p->next)
245 p->item->object.flags |= not_shallow_flag;
248 * mark border commits SHALLOW + NOT_SHALLOW.
249 * We cannot clear NOT_SHALLOW right now. Imagine border
250 * commit A is processed first, then commit B, whose parent is
251 * A, later. If NOT_SHALLOW on A is cleared at step 1, B
252 * itself is considered border at step 2, which is incorrect.
254 for (p = not_shallow_list; p; p = p->next) {
255 struct commit *c = p->item;
256 struct commit_list *parent;
258 if (repo_parse_commit(the_repository, c))
259 die("unable to parse commit %s",
260 oid_to_hex(&c->object.oid));
262 for (parent = c->parents; parent; parent = parent->next)
263 if (!(parent->item->object.flags & not_shallow_flag)) {
264 c->object.flags |= shallow_flag;
265 commit_list_insert(c, &result);
266 break;
269 free_commit_list(not_shallow_list);
272 * Now we can clean up NOT_SHALLOW on border commits. Having
273 * both flags set can confuse the caller.
275 for (p = result; p; p = p->next) {
276 struct object *o = &p->item->object;
277 if ((o->flags & both_flags) == both_flags)
278 o->flags &= ~not_shallow_flag;
280 release_revisions(&revs);
281 return result;
284 static void check_shallow_file_for_update(struct repository *r)
286 if (r->parsed_objects->is_shallow == -1)
287 BUG("shallow must be initialized by now");
289 if (!stat_validity_check(r->parsed_objects->shallow_stat,
290 git_path_shallow(r)))
291 die("shallow file has changed since we read it");
294 #define SEEN_ONLY 1
295 #define VERBOSE 2
296 #define QUICK 4
298 struct write_shallow_data {
299 struct strbuf *out;
300 int use_pack_protocol;
301 int count;
302 unsigned flags;
305 static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
307 struct write_shallow_data *data = cb_data;
308 const char *hex = oid_to_hex(&graft->oid);
309 if (graft->nr_parent != -1)
310 return 0;
311 if (data->flags & QUICK) {
312 if (!repo_has_object_file(the_repository, &graft->oid))
313 return 0;
314 } else if (data->flags & SEEN_ONLY) {
315 struct commit *c = lookup_commit(the_repository, &graft->oid);
316 if (!c || !(c->object.flags & SEEN)) {
317 if (data->flags & VERBOSE)
318 printf("Removing %s from .git/shallow\n",
319 oid_to_hex(&c->object.oid));
320 return 0;
323 data->count++;
324 if (data->use_pack_protocol)
325 packet_buf_write(data->out, "shallow %s", hex);
326 else {
327 strbuf_addstr(data->out, hex);
328 strbuf_addch(data->out, '\n');
330 return 0;
333 static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol,
334 const struct oid_array *extra,
335 unsigned flags)
337 struct write_shallow_data data;
338 int i;
339 data.out = out;
340 data.use_pack_protocol = use_pack_protocol;
341 data.count = 0;
342 data.flags = flags;
343 for_each_commit_graft(write_one_shallow, &data);
344 if (!extra)
345 return data.count;
346 for (i = 0; i < extra->nr; i++) {
347 strbuf_addstr(out, oid_to_hex(extra->oid + i));
348 strbuf_addch(out, '\n');
349 data.count++;
351 return data.count;
354 int write_shallow_commits(struct strbuf *out, int use_pack_protocol,
355 const struct oid_array *extra)
357 return write_shallow_commits_1(out, use_pack_protocol, extra, 0);
360 const char *setup_temporary_shallow(const struct oid_array *extra)
362 struct tempfile *temp;
363 struct strbuf sb = STRBUF_INIT;
365 if (write_shallow_commits(&sb, 0, extra)) {
366 temp = xmks_tempfile(git_path("shallow_XXXXXX"));
368 if (write_in_full(temp->fd, sb.buf, sb.len) < 0 ||
369 close_tempfile_gently(temp) < 0)
370 die_errno("failed to write to %s",
371 get_tempfile_path(temp));
372 strbuf_release(&sb);
373 return get_tempfile_path(temp);
376 * is_repository_shallow() sees empty string as "no shallow
377 * file".
379 return "";
382 void setup_alternate_shallow(struct shallow_lock *shallow_lock,
383 const char **alternate_shallow_file,
384 const struct oid_array *extra)
386 struct strbuf sb = STRBUF_INIT;
387 int fd;
389 fd = hold_lock_file_for_update(&shallow_lock->lock,
390 git_path_shallow(the_repository),
391 LOCK_DIE_ON_ERROR);
392 check_shallow_file_for_update(the_repository);
393 if (write_shallow_commits(&sb, 0, extra)) {
394 if (write_in_full(fd, sb.buf, sb.len) < 0)
395 die_errno("failed to write to %s",
396 get_lock_file_path(&shallow_lock->lock));
397 *alternate_shallow_file = get_lock_file_path(&shallow_lock->lock);
398 } else
400 * is_repository_shallow() sees empty string as "no
401 * shallow file".
403 *alternate_shallow_file = "";
404 strbuf_release(&sb);
407 static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb)
409 int fd = *(int *)cb;
410 if (graft->nr_parent == -1)
411 packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid));
412 return 0;
415 void advertise_shallow_grafts(int fd)
417 if (!is_repository_shallow(the_repository))
418 return;
419 for_each_commit_graft(advertise_shallow_grafts_cb, &fd);
423 * mark_reachable_objects() should have been run prior to this and all
424 * reachable commits marked as "SEEN", except when quick_prune is non-zero,
425 * in which case lines are excised from the shallow file if they refer to
426 * commits that do not exist (any longer).
428 void prune_shallow(unsigned options)
430 struct shallow_lock shallow_lock = SHALLOW_LOCK_INIT;
431 struct strbuf sb = STRBUF_INIT;
432 unsigned flags = SEEN_ONLY;
433 int fd;
435 if (options & PRUNE_QUICK)
436 flags |= QUICK;
438 if (options & PRUNE_SHOW_ONLY) {
439 flags |= VERBOSE;
440 write_shallow_commits_1(&sb, 0, NULL, flags);
441 strbuf_release(&sb);
442 return;
444 fd = hold_lock_file_for_update(&shallow_lock.lock,
445 git_path_shallow(the_repository),
446 LOCK_DIE_ON_ERROR);
447 check_shallow_file_for_update(the_repository);
448 if (write_shallow_commits_1(&sb, 0, NULL, flags)) {
449 if (write_in_full(fd, sb.buf, sb.len) < 0)
450 die_errno("failed to write to %s",
451 get_lock_file_path(&shallow_lock.lock));
452 commit_shallow_file(the_repository, &shallow_lock);
453 } else {
454 unlink(git_path_shallow(the_repository));
455 rollback_shallow_file(the_repository, &shallow_lock);
457 strbuf_release(&sb);
460 struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW);
463 * Step 1, split sender shallow commits into "ours" and "theirs"
464 * Step 2, clean "ours" based on .git/shallow
466 void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa)
468 int i;
469 trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n");
470 memset(info, 0, sizeof(*info));
471 info->shallow = sa;
472 if (!sa)
473 return;
474 ALLOC_ARRAY(info->ours, sa->nr);
475 ALLOC_ARRAY(info->theirs, sa->nr);
476 for (i = 0; i < sa->nr; i++) {
477 if (repo_has_object_file(the_repository, sa->oid + i)) {
478 struct commit_graft *graft;
479 graft = lookup_commit_graft(the_repository,
480 &sa->oid[i]);
481 if (graft && graft->nr_parent < 0)
482 continue;
483 info->ours[info->nr_ours++] = i;
484 } else
485 info->theirs[info->nr_theirs++] = i;
489 void clear_shallow_info(struct shallow_info *info)
491 if (info->used_shallow) {
492 for (size_t i = 0; i < info->shallow->nr; i++)
493 free(info->used_shallow[i]);
494 free(info->used_shallow);
497 free(info->need_reachability_test);
498 free(info->reachable);
499 free(info->shallow_ref);
500 free(info->ours);
501 free(info->theirs);
504 /* Step 4, remove non-existent ones in "theirs" after getting the pack */
506 void remove_nonexistent_theirs_shallow(struct shallow_info *info)
508 struct object_id *oid = info->shallow->oid;
509 int i, dst;
510 trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n");
511 for (i = dst = 0; i < info->nr_theirs; i++) {
512 if (i != dst)
513 info->theirs[dst] = info->theirs[i];
514 if (repo_has_object_file(the_repository, oid + info->theirs[i]))
515 dst++;
517 info->nr_theirs = dst;
520 define_commit_slab(ref_bitmap, uint32_t *);
522 #define POOL_SIZE (512 * 1024)
524 struct paint_info {
525 struct ref_bitmap ref_bitmap;
526 unsigned nr_bits;
527 char **pools;
528 char *free, *end;
529 unsigned pool_count;
532 static uint32_t *paint_alloc(struct paint_info *info)
534 unsigned nr = DIV_ROUND_UP(info->nr_bits, 32);
535 unsigned size = nr * sizeof(uint32_t);
536 void *p;
537 if (!info->pool_count || size > info->end - info->free) {
538 if (size > POOL_SIZE)
539 BUG("pool size too small for %d in paint_alloc()",
540 size);
541 info->pool_count++;
542 REALLOC_ARRAY(info->pools, info->pool_count);
543 info->free = xmalloc(POOL_SIZE);
544 info->pools[info->pool_count - 1] = info->free;
545 info->end = info->free + POOL_SIZE;
547 p = info->free;
548 info->free += size;
549 return p;
553 * Given a commit SHA-1, walk down to parents until either SEEN,
554 * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for
555 * all walked commits.
557 static void paint_down(struct paint_info *info, const struct object_id *oid,
558 unsigned int id)
560 unsigned int i, nr;
561 struct commit_list *head = NULL;
562 int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32);
563 size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr);
564 struct commit *c = lookup_commit_reference_gently(the_repository, oid,
566 uint32_t *tmp; /* to be freed before return */
567 uint32_t *bitmap;
569 if (!c)
570 return;
572 tmp = xmalloc(bitmap_size);
573 bitmap = paint_alloc(info);
574 memset(bitmap, 0, bitmap_size);
575 bitmap[id / 32] |= (1U << (id % 32));
576 commit_list_insert(c, &head);
577 while (head) {
578 struct commit_list *p;
579 struct commit *c = pop_commit(&head);
580 uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c);
582 /* XXX check "UNINTERESTING" from pack bitmaps if available */
583 if (c->object.flags & (SEEN | UNINTERESTING))
584 continue;
585 else
586 c->object.flags |= SEEN;
588 if (!*refs)
589 *refs = bitmap;
590 else {
591 memcpy(tmp, *refs, bitmap_size);
592 for (i = 0; i < bitmap_nr; i++)
593 tmp[i] |= bitmap[i];
594 if (memcmp(tmp, *refs, bitmap_size)) {
595 *refs = paint_alloc(info);
596 memcpy(*refs, tmp, bitmap_size);
600 if (c->object.flags & BOTTOM)
601 continue;
603 if (repo_parse_commit(the_repository, c))
604 die("unable to parse commit %s",
605 oid_to_hex(&c->object.oid));
607 for (p = c->parents; p; p = p->next) {
608 if (p->item->object.flags & SEEN)
609 continue;
610 commit_list_insert(p->item, &head);
614 nr = get_max_object_index();
615 for (i = 0; i < nr; i++) {
616 struct object *o = get_indexed_object(i);
617 if (o && o->type == OBJ_COMMIT)
618 o->flags &= ~SEEN;
621 free(tmp);
624 static int mark_uninteresting(const char *refname UNUSED,
625 const char *referent UNUSED,
626 const struct object_id *oid,
627 int flags UNUSED,
628 void *cb_data UNUSED)
630 struct commit *commit = lookup_commit_reference_gently(the_repository,
631 oid, 1);
632 if (!commit)
633 return 0;
634 commit->object.flags |= UNINTERESTING;
635 mark_parents_uninteresting(NULL, commit);
636 return 0;
639 static void post_assign_shallow(struct shallow_info *info,
640 struct ref_bitmap *ref_bitmap,
641 int *ref_status);
643 * Step 6(+7), associate shallow commits with new refs
645 * info->ref must be initialized before calling this function.
647 * If used is not NULL, it's an array of info->shallow->nr
648 * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the
649 * m-th shallow commit from info->shallow.
651 * If used is NULL, "ours" and "theirs" are updated. And if ref_status
652 * is not NULL it's an array of ref->nr ints. ref_status[i] is true if
653 * the ref needs some shallow commits from either info->ours or
654 * info->theirs.
656 void assign_shallow_commits_to_refs(struct shallow_info *info,
657 uint32_t **used, int *ref_status)
659 struct object_id *oid = info->shallow->oid;
660 struct oid_array *ref = info->ref;
661 unsigned int i, nr;
662 int *shallow, nr_shallow = 0;
663 struct paint_info pi;
665 trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n");
666 ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs);
667 for (i = 0; i < info->nr_ours; i++)
668 shallow[nr_shallow++] = info->ours[i];
669 for (i = 0; i < info->nr_theirs; i++)
670 shallow[nr_shallow++] = info->theirs[i];
673 * Prepare the commit graph to track what refs can reach what
674 * (new) shallow commits.
676 nr = get_max_object_index();
677 for (i = 0; i < nr; i++) {
678 struct object *o = get_indexed_object(i);
679 if (!o || o->type != OBJ_COMMIT)
680 continue;
682 o->flags &= ~(UNINTERESTING | BOTTOM | SEEN);
685 memset(&pi, 0, sizeof(pi));
686 init_ref_bitmap(&pi.ref_bitmap);
687 pi.nr_bits = ref->nr;
690 * "--not --all" to cut short the traversal if new refs
691 * connect to old refs. If not (e.g. force ref updates) it'll
692 * have to go down to the current shallow commits.
694 refs_head_ref(get_main_ref_store(the_repository), mark_uninteresting,
695 NULL);
696 refs_for_each_ref(get_main_ref_store(the_repository),
697 mark_uninteresting, NULL);
699 /* Mark potential bottoms so we won't go out of bound */
700 for (i = 0; i < nr_shallow; i++) {
701 struct commit *c = lookup_commit(the_repository,
702 &oid[shallow[i]]);
703 c->object.flags |= BOTTOM;
706 for (i = 0; i < ref->nr; i++)
707 paint_down(&pi, ref->oid + i, i);
709 if (used) {
710 int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t);
711 memset(used, 0, sizeof(*used) * info->shallow->nr);
712 for (i = 0; i < nr_shallow; i++) {
713 const struct commit *c = lookup_commit(the_repository,
714 &oid[shallow[i]]);
715 uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c);
716 if (*map)
717 used[shallow[i]] = xmemdupz(*map, bitmap_size);
720 * unreachable shallow commits are not removed from
721 * "ours" and "theirs". The user is supposed to run
722 * step 7 on every ref separately and not trust "ours"
723 * and "theirs" any more.
725 } else
726 post_assign_shallow(info, &pi.ref_bitmap, ref_status);
728 clear_ref_bitmap(&pi.ref_bitmap);
729 for (i = 0; i < pi.pool_count; i++)
730 free(pi.pools[i]);
731 free(pi.pools);
732 free(shallow);
735 struct commit_array {
736 struct commit **commits;
737 int nr, alloc;
740 static int add_ref(const char *refname UNUSED,
741 const char *referent UNUSED,
742 const struct object_id *oid,
743 int flags UNUSED,
744 void *cb_data)
746 struct commit_array *ca = cb_data;
747 ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc);
748 ca->commits[ca->nr] = lookup_commit_reference_gently(the_repository,
749 oid, 1);
750 if (ca->commits[ca->nr])
751 ca->nr++;
752 return 0;
755 static void update_refstatus(int *ref_status, int nr, uint32_t *bitmap)
757 unsigned int i;
758 if (!ref_status)
759 return;
760 for (i = 0; i < nr; i++)
761 if (bitmap[i / 32] & (1U << (i % 32)))
762 ref_status[i]++;
766 * Step 7, reachability test on "ours" at commit level
768 static void post_assign_shallow(struct shallow_info *info,
769 struct ref_bitmap *ref_bitmap,
770 int *ref_status)
772 struct object_id *oid = info->shallow->oid;
773 struct commit *c;
774 uint32_t **bitmap;
775 int dst, i, j;
776 int bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32);
777 struct commit_array ca;
779 trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n");
780 if (ref_status)
781 memset(ref_status, 0, sizeof(*ref_status) * info->ref->nr);
783 /* Remove unreachable shallow commits from "theirs" */
784 for (i = dst = 0; i < info->nr_theirs; i++) {
785 if (i != dst)
786 info->theirs[dst] = info->theirs[i];
787 c = lookup_commit(the_repository, &oid[info->theirs[i]]);
788 bitmap = ref_bitmap_at(ref_bitmap, c);
789 if (!*bitmap)
790 continue;
791 for (j = 0; j < bitmap_nr; j++)
792 if (bitmap[0][j]) {
793 update_refstatus(ref_status, info->ref->nr, *bitmap);
794 dst++;
795 break;
798 info->nr_theirs = dst;
800 memset(&ca, 0, sizeof(ca));
801 refs_head_ref(get_main_ref_store(the_repository), add_ref, &ca);
802 refs_for_each_ref(get_main_ref_store(the_repository), add_ref, &ca);
804 /* Remove unreachable shallow commits from "ours" */
805 for (i = dst = 0; i < info->nr_ours; i++) {
806 if (i != dst)
807 info->ours[dst] = info->ours[i];
808 c = lookup_commit(the_repository, &oid[info->ours[i]]);
809 bitmap = ref_bitmap_at(ref_bitmap, c);
810 if (!*bitmap)
811 continue;
812 for (j = 0; j < bitmap_nr; j++)
813 if (bitmap[0][j]) {
814 /* Step 7, reachability test at commit level */
815 int ret = repo_in_merge_bases_many(the_repository, c, ca.nr, ca.commits, 1);
816 if (ret < 0)
817 exit(128);
818 if (!ret) {
819 update_refstatus(ref_status, info->ref->nr, *bitmap);
820 dst++;
821 break;
825 info->nr_ours = dst;
827 free(ca.commits);
830 /* (Delayed) step 7, reachability test at commit level */
831 int delayed_reachability_test(struct shallow_info *si, int c)
833 if (si->need_reachability_test[c]) {
834 struct commit *commit = lookup_commit(the_repository,
835 &si->shallow->oid[c]);
837 if (!si->commits) {
838 struct commit_array ca;
840 memset(&ca, 0, sizeof(ca));
841 refs_head_ref(get_main_ref_store(the_repository),
842 add_ref, &ca);
843 refs_for_each_ref(get_main_ref_store(the_repository),
844 add_ref, &ca);
845 si->commits = ca.commits;
846 si->nr_commits = ca.nr;
849 si->reachable[c] = repo_in_merge_bases_many(the_repository,
850 commit,
851 si->nr_commits,
852 si->commits,
854 if (si->reachable[c] < 0)
855 exit(128);
856 si->need_reachability_test[c] = 0;
858 return si->reachable[c];