1 #define USE_THE_REPOSITORY_VARIABLE
3 #include "git-compat-util.h"
9 #include "object-file.h"
10 #include "hash-lookup.h"
14 #include "run-command.h"
15 #include "chunk-format.h"
16 #include "pack-bitmap.h"
19 #include "list-objects.h"
21 #define PACK_EXPIRED UINT_MAX
22 #define BITMAP_POS_UNKNOWN (~((uint32_t)0))
23 #define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256)
24 #define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
26 extern int midx_checksum_valid(struct multi_pack_index
*m
);
27 extern void clear_midx_files_ext(const char *object_dir
, const char *ext
,
28 unsigned char *keep_hash
);
29 extern int cmp_idx_or_pack_name(const char *idx_or_pack_name
,
30 const char *idx_name
);
32 static size_t write_midx_header(struct hashfile
*f
,
33 unsigned char num_chunks
,
36 hashwrite_be32(f
, MIDX_SIGNATURE
);
37 hashwrite_u8(f
, MIDX_VERSION
);
38 hashwrite_u8(f
, oid_version(the_hash_algo
));
39 hashwrite_u8(f
, num_chunks
);
40 hashwrite_u8(f
, 0); /* unused */
41 hashwrite_be32(f
, num_packs
);
43 return MIDX_HEADER_SIZE
;
47 uint32_t orig_pack_int_id
;
57 static void fill_pack_info(struct pack_info
*info
,
58 struct packed_git
*p
, const char *pack_name
,
59 uint32_t orig_pack_int_id
)
61 memset(info
, 0, sizeof(struct pack_info
));
63 info
->orig_pack_int_id
= orig_pack_int_id
;
64 info
->pack_name
= xstrdup(pack_name
);
66 info
->bitmap_pos
= BITMAP_POS_UNKNOWN
;
69 static int pack_info_compare(const void *_a
, const void *_b
)
71 struct pack_info
*a
= (struct pack_info
*)_a
;
72 struct pack_info
*b
= (struct pack_info
*)_b
;
73 return strcmp(a
->pack_name
, b
->pack_name
);
76 static int idx_or_pack_name_cmp(const void *_va
, const void *_vb
)
78 const char *pack_name
= _va
;
79 const struct pack_info
*compar
= _vb
;
81 return cmp_idx_or_pack_name(pack_name
, compar
->pack_name
);
84 struct write_midx_context
{
85 struct pack_info
*info
;
88 struct multi_pack_index
*m
;
89 struct progress
*progress
;
90 unsigned pack_paths_checked
;
92 struct pack_midx_entry
*entries
;
97 unsigned large_offsets_needed
:1;
98 uint32_t num_large_offsets
;
100 int preferred_pack_idx
;
102 struct string_list
*to_include
;
105 static int should_include_pack(const struct write_midx_context
*ctx
,
106 const char *file_name
)
109 * Note that at most one of ctx->m and ctx->to_include are set,
110 * so we are testing midx_contains_pack() and
111 * string_list_has_string() independently (guarded by the
112 * appropriate NULL checks).
114 * We could support passing to_include while reusing an existing
115 * MIDX, but don't currently since the reuse process drags
116 * forward all packs from an existing MIDX (without checking
117 * whether or not they appear in the to_include list).
119 * If we added support for that, these next two conditional
120 * should be performed independently (likely checking
121 * to_include before the existing MIDX).
123 if (ctx
->m
&& midx_contains_pack(ctx
->m
, file_name
))
125 else if (ctx
->to_include
&&
126 !string_list_has_string(ctx
->to_include
, file_name
))
131 static void add_pack_to_midx(const char *full_path
, size_t full_path_len
,
132 const char *file_name
, void *data
)
134 struct write_midx_context
*ctx
= data
;
135 struct packed_git
*p
;
137 if (ends_with(file_name
, ".idx")) {
138 display_progress(ctx
->progress
, ++ctx
->pack_paths_checked
);
140 if (!should_include_pack(ctx
, file_name
))
143 ALLOC_GROW(ctx
->info
, ctx
->nr
+ 1, ctx
->alloc
);
144 p
= add_packed_git(full_path
, full_path_len
, 0);
146 warning(_("failed to add packfile '%s'"),
151 if (open_pack_index(p
)) {
152 warning(_("failed to open pack-index '%s'"),
159 fill_pack_info(&ctx
->info
[ctx
->nr
], p
, file_name
, ctx
->nr
);
164 struct pack_midx_entry
{
165 struct object_id oid
;
166 uint32_t pack_int_id
;
169 unsigned preferred
: 1;
172 static int midx_oid_compare(const void *_a
, const void *_b
)
174 const struct pack_midx_entry
*a
= (const struct pack_midx_entry
*)_a
;
175 const struct pack_midx_entry
*b
= (const struct pack_midx_entry
*)_b
;
176 int cmp
= oidcmp(&a
->oid
, &b
->oid
);
181 /* Sort objects in a preferred pack first when multiple copies exist. */
182 if (a
->preferred
> b
->preferred
)
184 if (a
->preferred
< b
->preferred
)
187 if (a
->pack_mtime
> b
->pack_mtime
)
189 else if (a
->pack_mtime
< b
->pack_mtime
)
192 return a
->pack_int_id
- b
->pack_int_id
;
195 static int nth_midxed_pack_midx_entry(struct multi_pack_index
*m
,
196 struct pack_midx_entry
*e
,
199 if (pos
>= m
->num_objects
)
202 nth_midxed_object_oid(&e
->oid
, m
, pos
);
203 e
->pack_int_id
= nth_midxed_pack_int_id(m
, pos
);
204 e
->offset
= nth_midxed_offset(m
, pos
);
206 /* consider objects in midx to be from "old" packs */
211 static void fill_pack_entry(uint32_t pack_int_id
,
212 struct packed_git
*p
,
214 struct pack_midx_entry
*entry
,
217 if (nth_packed_object_id(&entry
->oid
, p
, cur_object
) < 0)
218 die(_("failed to locate object %d in packfile"), cur_object
);
220 entry
->pack_int_id
= pack_int_id
;
221 entry
->pack_mtime
= p
->mtime
;
223 entry
->offset
= nth_packed_object_offset(p
, cur_object
);
224 entry
->preferred
= !!preferred
;
228 struct pack_midx_entry
*entries
;
232 static void midx_fanout_grow(struct midx_fanout
*fanout
, size_t nr
)
235 BUG("negative growth in midx_fanout_grow() (%"PRIuMAX
" < %"PRIuMAX
")",
236 (uintmax_t)nr
, (uintmax_t)fanout
->nr
);
237 ALLOC_GROW(fanout
->entries
, nr
, fanout
->alloc
);
240 static void midx_fanout_sort(struct midx_fanout
*fanout
)
242 QSORT(fanout
->entries
, fanout
->nr
, midx_oid_compare
);
245 static void midx_fanout_add_midx_fanout(struct midx_fanout
*fanout
,
246 struct multi_pack_index
*m
,
250 uint32_t start
= 0, end
;
254 start
= ntohl(m
->chunk_oid_fanout
[cur_fanout
- 1]);
255 end
= ntohl(m
->chunk_oid_fanout
[cur_fanout
]);
257 for (cur_object
= start
; cur_object
< end
; cur_object
++) {
258 if ((preferred_pack
> -1) &&
259 (preferred_pack
== nth_midxed_pack_int_id(m
, cur_object
))) {
261 * Objects from preferred packs are added
267 midx_fanout_grow(fanout
, fanout
->nr
+ 1);
268 nth_midxed_pack_midx_entry(m
,
269 &fanout
->entries
[fanout
->nr
],
271 fanout
->entries
[fanout
->nr
].preferred
= 0;
276 static void midx_fanout_add_pack_fanout(struct midx_fanout
*fanout
,
277 struct pack_info
*info
,
282 struct packed_git
*pack
= info
[cur_pack
].p
;
283 uint32_t start
= 0, end
;
287 start
= get_pack_fanout(pack
, cur_fanout
- 1);
288 end
= get_pack_fanout(pack
, cur_fanout
);
290 for (cur_object
= start
; cur_object
< end
; cur_object
++) {
291 midx_fanout_grow(fanout
, fanout
->nr
+ 1);
292 fill_pack_entry(cur_pack
,
295 &fanout
->entries
[fanout
->nr
],
302 * It is possible to artificially get into a state where there are many
303 * duplicate copies of objects. That can create high memory pressure if
304 * we are to create a list of all objects before de-duplication. To reduce
305 * this memory pressure without a significant performance drop, automatically
306 * group objects by the first byte of their object id. Use the IDX fanout
307 * tables to group the data, copy to a local array, then sort.
309 * Copy only the de-duplicated entries (selected by most-recent modified time
310 * of a packfile containing the object).
312 static void compute_sorted_entries(struct write_midx_context
*ctx
,
315 uint32_t cur_fanout
, cur_pack
, cur_object
;
316 size_t alloc_objects
, total_objects
= 0;
317 struct midx_fanout fanout
= { 0 };
319 for (cur_pack
= start_pack
; cur_pack
< ctx
->nr
; cur_pack
++)
320 total_objects
= st_add(total_objects
,
321 ctx
->info
[cur_pack
].p
->num_objects
);
324 * As we de-duplicate by fanout value, we expect the fanout
325 * slices to be evenly distributed, with some noise. Hence,
326 * allocate slightly more than one 256th.
328 alloc_objects
= fanout
.alloc
= total_objects
> 3200 ? total_objects
/ 200 : 16;
330 ALLOC_ARRAY(fanout
.entries
, fanout
.alloc
);
331 ALLOC_ARRAY(ctx
->entries
, alloc_objects
);
334 for (cur_fanout
= 0; cur_fanout
< 256; cur_fanout
++) {
338 midx_fanout_add_midx_fanout(&fanout
, ctx
->m
, cur_fanout
,
339 ctx
->preferred_pack_idx
);
341 for (cur_pack
= start_pack
; cur_pack
< ctx
->nr
; cur_pack
++) {
342 int preferred
= cur_pack
== ctx
->preferred_pack_idx
;
343 midx_fanout_add_pack_fanout(&fanout
,
345 preferred
, cur_fanout
);
348 if (-1 < ctx
->preferred_pack_idx
&& ctx
->preferred_pack_idx
< start_pack
)
349 midx_fanout_add_pack_fanout(&fanout
, ctx
->info
,
350 ctx
->preferred_pack_idx
, 1,
353 midx_fanout_sort(&fanout
);
356 * The batch is now sorted by OID and then mtime (descending).
357 * Take only the first duplicate.
359 for (cur_object
= 0; cur_object
< fanout
.nr
; cur_object
++) {
360 if (cur_object
&& oideq(&fanout
.entries
[cur_object
- 1].oid
,
361 &fanout
.entries
[cur_object
].oid
))
364 ALLOC_GROW(ctx
->entries
, st_add(ctx
->entries_nr
, 1),
366 memcpy(&ctx
->entries
[ctx
->entries_nr
],
367 &fanout
.entries
[cur_object
],
368 sizeof(struct pack_midx_entry
));
373 free(fanout
.entries
);
376 static int write_midx_pack_names(struct hashfile
*f
, void *data
)
378 struct write_midx_context
*ctx
= data
;
380 unsigned char padding
[MIDX_CHUNK_ALIGNMENT
];
383 for (i
= 0; i
< ctx
->nr
; i
++) {
386 if (ctx
->info
[i
].expired
)
389 if (i
&& strcmp(ctx
->info
[i
].pack_name
, ctx
->info
[i
- 1].pack_name
) <= 0)
390 BUG("incorrect pack-file order: %s before %s",
391 ctx
->info
[i
- 1].pack_name
,
392 ctx
->info
[i
].pack_name
);
394 writelen
= strlen(ctx
->info
[i
].pack_name
) + 1;
395 hashwrite(f
, ctx
->info
[i
].pack_name
, writelen
);
399 /* add padding to be aligned */
400 i
= MIDX_CHUNK_ALIGNMENT
- (written
% MIDX_CHUNK_ALIGNMENT
);
401 if (i
< MIDX_CHUNK_ALIGNMENT
) {
402 memset(padding
, 0, sizeof(padding
));
403 hashwrite(f
, padding
, i
);
409 static int write_midx_bitmapped_packs(struct hashfile
*f
, void *data
)
411 struct write_midx_context
*ctx
= data
;
414 for (i
= 0; i
< ctx
->nr
; i
++) {
415 struct pack_info
*pack
= &ctx
->info
[i
];
419 if (pack
->bitmap_pos
== BITMAP_POS_UNKNOWN
&& pack
->bitmap_nr
)
420 BUG("pack '%s' has no bitmap position, but has %d bitmapped object(s)",
421 pack
->pack_name
, pack
->bitmap_nr
);
423 hashwrite_be32(f
, pack
->bitmap_pos
);
424 hashwrite_be32(f
, pack
->bitmap_nr
);
429 static int write_midx_oid_fanout(struct hashfile
*f
,
432 struct write_midx_context
*ctx
= data
;
433 struct pack_midx_entry
*list
= ctx
->entries
;
434 struct pack_midx_entry
*last
= ctx
->entries
+ ctx
->entries_nr
;
439 * Write the first-level table (the list is sorted,
440 * but we use a 256-entry lookup to be able to avoid
441 * having to do eight extra binary search iterations).
443 for (i
= 0; i
< 256; i
++) {
444 struct pack_midx_entry
*next
= list
;
446 while (next
< last
&& next
->oid
.hash
[0] == i
) {
451 hashwrite_be32(f
, count
);
458 static int write_midx_oid_lookup(struct hashfile
*f
,
461 struct write_midx_context
*ctx
= data
;
462 unsigned char hash_len
= the_hash_algo
->rawsz
;
463 struct pack_midx_entry
*list
= ctx
->entries
;
466 for (i
= 0; i
< ctx
->entries_nr
; i
++) {
467 struct pack_midx_entry
*obj
= list
++;
469 if (i
< ctx
->entries_nr
- 1) {
470 struct pack_midx_entry
*next
= list
;
471 if (oidcmp(&obj
->oid
, &next
->oid
) >= 0)
472 BUG("OIDs not in order: %s >= %s",
473 oid_to_hex(&obj
->oid
),
474 oid_to_hex(&next
->oid
));
477 hashwrite(f
, obj
->oid
.hash
, (int)hash_len
);
483 static int write_midx_object_offsets(struct hashfile
*f
,
486 struct write_midx_context
*ctx
= data
;
487 struct pack_midx_entry
*list
= ctx
->entries
;
488 uint32_t i
, nr_large_offset
= 0;
490 for (i
= 0; i
< ctx
->entries_nr
; i
++) {
491 struct pack_midx_entry
*obj
= list
++;
493 if (ctx
->pack_perm
[obj
->pack_int_id
] == PACK_EXPIRED
)
494 BUG("object %s is in an expired pack with int-id %d",
495 oid_to_hex(&obj
->oid
),
498 hashwrite_be32(f
, ctx
->pack_perm
[obj
->pack_int_id
]);
500 if (ctx
->large_offsets_needed
&& obj
->offset
>> 31)
501 hashwrite_be32(f
, MIDX_LARGE_OFFSET_NEEDED
| nr_large_offset
++);
502 else if (!ctx
->large_offsets_needed
&& obj
->offset
>> 32)
503 BUG("object %s requires a large offset (%"PRIx64
") but the MIDX is not writing large offsets!",
504 oid_to_hex(&obj
->oid
),
507 hashwrite_be32(f
, (uint32_t)obj
->offset
);
513 static int write_midx_large_offsets(struct hashfile
*f
,
516 struct write_midx_context
*ctx
= data
;
517 struct pack_midx_entry
*list
= ctx
->entries
;
518 struct pack_midx_entry
*end
= ctx
->entries
+ ctx
->entries_nr
;
519 uint32_t nr_large_offset
= ctx
->num_large_offsets
;
521 while (nr_large_offset
) {
522 struct pack_midx_entry
*obj
;
526 BUG("too many large-offset objects");
529 offset
= obj
->offset
;
534 hashwrite_be64(f
, offset
);
542 static int write_midx_revindex(struct hashfile
*f
,
545 struct write_midx_context
*ctx
= data
;
548 for (i
= 0; i
< ctx
->entries_nr
; i
++)
549 hashwrite_be32(f
, ctx
->pack_order
[i
]);
554 struct midx_pack_order_data
{
560 static int midx_pack_order_cmp(const void *va
, const void *vb
)
562 const struct midx_pack_order_data
*a
= va
, *b
= vb
;
563 if (a
->pack
< b
->pack
)
565 else if (a
->pack
> b
->pack
)
567 else if (a
->offset
< b
->offset
)
569 else if (a
->offset
> b
->offset
)
575 static uint32_t *midx_pack_order(struct write_midx_context
*ctx
)
577 struct midx_pack_order_data
*data
;
578 uint32_t *pack_order
;
581 trace2_region_enter("midx", "midx_pack_order", the_repository
);
583 ALLOC_ARRAY(data
, ctx
->entries_nr
);
584 for (i
= 0; i
< ctx
->entries_nr
; i
++) {
585 struct pack_midx_entry
*e
= &ctx
->entries
[i
];
587 data
[i
].pack
= ctx
->pack_perm
[e
->pack_int_id
];
589 data
[i
].pack
|= (1U << 31);
590 data
[i
].offset
= e
->offset
;
593 QSORT(data
, ctx
->entries_nr
, midx_pack_order_cmp
);
595 ALLOC_ARRAY(pack_order
, ctx
->entries_nr
);
596 for (i
= 0; i
< ctx
->entries_nr
; i
++) {
597 struct pack_midx_entry
*e
= &ctx
->entries
[data
[i
].nr
];
598 struct pack_info
*pack
= &ctx
->info
[ctx
->pack_perm
[e
->pack_int_id
]];
599 if (pack
->bitmap_pos
== BITMAP_POS_UNKNOWN
)
600 pack
->bitmap_pos
= i
;
602 pack_order
[i
] = data
[i
].nr
;
604 for (i
= 0; i
< ctx
->nr
; i
++) {
605 struct pack_info
*pack
= &ctx
->info
[ctx
->pack_perm
[i
]];
606 if (pack
->bitmap_pos
== BITMAP_POS_UNKNOWN
)
607 pack
->bitmap_pos
= 0;
611 trace2_region_leave("midx", "midx_pack_order", the_repository
);
616 static void write_midx_reverse_index(char *midx_name
, unsigned char *midx_hash
,
617 struct write_midx_context
*ctx
)
619 struct strbuf buf
= STRBUF_INIT
;
620 const char *tmp_file
;
622 trace2_region_enter("midx", "write_midx_reverse_index", the_repository
);
624 strbuf_addf(&buf
, "%s-%s.rev", midx_name
, hash_to_hex(midx_hash
));
626 tmp_file
= write_rev_file_order(NULL
, ctx
->pack_order
, ctx
->entries_nr
,
627 midx_hash
, WRITE_REV
);
629 if (finalize_object_file(tmp_file
, buf
.buf
))
630 die(_("cannot store reverse index file"));
632 strbuf_release(&buf
);
634 trace2_region_leave("midx", "write_midx_reverse_index", the_repository
);
637 static void prepare_midx_packing_data(struct packing_data
*pdata
,
638 struct write_midx_context
*ctx
)
642 trace2_region_enter("midx", "prepare_midx_packing_data", the_repository
);
644 memset(pdata
, 0, sizeof(struct packing_data
));
645 prepare_packing_data(the_repository
, pdata
);
647 for (i
= 0; i
< ctx
->entries_nr
; i
++) {
648 struct pack_midx_entry
*from
= &ctx
->entries
[ctx
->pack_order
[i
]];
649 struct object_entry
*to
= packlist_alloc(pdata
, &from
->oid
);
651 oe_set_in_pack(pdata
, to
,
652 ctx
->info
[ctx
->pack_perm
[from
->pack_int_id
]].p
);
655 trace2_region_leave("midx", "prepare_midx_packing_data", the_repository
);
658 static int add_ref_to_pending(const char *refname
,
659 const struct object_id
*oid
,
660 int flag
, void *cb_data
)
662 struct rev_info
*revs
= (struct rev_info
*)cb_data
;
663 struct object_id peeled
;
664 struct object
*object
;
666 if ((flag
& REF_ISSYMREF
) && (flag
& REF_ISBROKEN
)) {
667 warning("symbolic ref is dangling: %s", refname
);
671 if (!peel_iterated_oid(the_repository
, oid
, &peeled
))
674 object
= parse_object_or_die(oid
, refname
);
675 if (object
->type
!= OBJ_COMMIT
)
678 add_pending_object(revs
, object
, "");
679 if (bitmap_is_preferred_refname(revs
->repo
, refname
))
680 object
->flags
|= NEEDS_BITMAP
;
684 struct bitmap_commit_cb
{
685 struct commit
**commits
;
686 size_t commits_nr
, commits_alloc
;
688 struct write_midx_context
*ctx
;
691 static const struct object_id
*bitmap_oid_access(size_t index
,
692 const void *_entries
)
694 const struct pack_midx_entry
*entries
= _entries
;
695 return &entries
[index
].oid
;
698 static void bitmap_show_commit(struct commit
*commit
, void *_data
)
700 struct bitmap_commit_cb
*data
= _data
;
701 int pos
= oid_pos(&commit
->object
.oid
, data
->ctx
->entries
,
702 data
->ctx
->entries_nr
,
707 ALLOC_GROW(data
->commits
, data
->commits_nr
+ 1, data
->commits_alloc
);
708 data
->commits
[data
->commits_nr
++] = commit
;
711 static int read_refs_snapshot(const char *refs_snapshot
,
712 struct rev_info
*revs
)
714 struct strbuf buf
= STRBUF_INIT
;
715 struct object_id oid
;
716 FILE *f
= xfopen(refs_snapshot
, "r");
718 while (strbuf_getline(&buf
, f
) != EOF
) {
719 struct object
*object
;
722 const char *end
= NULL
;
724 if (buf
.len
&& *buf
.buf
== '+') {
729 if (parse_oid_hex(hex
, &oid
, &end
) < 0)
730 die(_("could not parse line: %s"), buf
.buf
);
732 die(_("malformed line: %s"), buf
.buf
);
734 object
= parse_object_or_die(&oid
, NULL
);
736 object
->flags
|= NEEDS_BITMAP
;
738 add_pending_object(revs
, object
, "");
742 strbuf_release(&buf
);
745 static struct commit
**find_commits_for_midx_bitmap(uint32_t *indexed_commits_nr_p
,
746 const char *refs_snapshot
,
747 struct write_midx_context
*ctx
)
749 struct rev_info revs
;
750 struct bitmap_commit_cb cb
= {0};
752 trace2_region_enter("midx", "find_commits_for_midx_bitmap",
757 repo_init_revisions(the_repository
, &revs
, NULL
);
759 read_refs_snapshot(refs_snapshot
, &revs
);
761 setup_revisions(0, NULL
, &revs
, NULL
);
762 refs_for_each_ref(get_main_ref_store(the_repository
),
763 add_ref_to_pending
, &revs
);
767 * Skipping promisor objects here is intentional, since it only excludes
768 * them from the list of reachable commits that we want to select from
769 * when computing the selection of MIDX'd commits to receive bitmaps.
771 * Reachability bitmaps do require that their objects be closed under
772 * reachability, but fetching any objects missing from promisors at this
773 * point is too late. But, if one of those objects can be reached from
774 * an another object that is included in the bitmap, then we will
775 * complain later that we don't have reachability closure (and fail
778 fetch_if_missing
= 0;
779 revs
.exclude_promisor_objects
= 1;
781 if (prepare_revision_walk(&revs
))
782 die(_("revision walk setup failed"));
784 traverse_commit_list(&revs
, bitmap_show_commit
, NULL
, &cb
);
785 if (indexed_commits_nr_p
)
786 *indexed_commits_nr_p
= cb
.commits_nr
;
788 release_revisions(&revs
);
790 trace2_region_leave("midx", "find_commits_for_midx_bitmap",
796 static int write_midx_bitmap(const char *midx_name
,
797 const unsigned char *midx_hash
,
798 struct packing_data
*pdata
,
799 struct commit
**commits
,
801 uint32_t *pack_order
,
805 uint16_t options
= 0;
806 struct bitmap_writer writer
;
807 struct pack_idx_entry
**index
;
808 char *bitmap_name
= xstrfmt("%s-%s.bitmap", midx_name
,
809 hash_to_hex(midx_hash
));
811 trace2_region_enter("midx", "write_midx_bitmap", the_repository
);
813 if (flags
& MIDX_WRITE_BITMAP_HASH_CACHE
)
814 options
|= BITMAP_OPT_HASH_CACHE
;
816 if (flags
& MIDX_WRITE_BITMAP_LOOKUP_TABLE
)
817 options
|= BITMAP_OPT_LOOKUP_TABLE
;
820 * Build the MIDX-order index based on pdata.objects (which is already
821 * in MIDX order; c.f., 'midx_pack_order_cmp()' for the definition of
824 ALLOC_ARRAY(index
, pdata
->nr_objects
);
825 for (i
= 0; i
< pdata
->nr_objects
; i
++)
826 index
[i
] = &pdata
->objects
[i
].idx
;
828 bitmap_writer_init(&writer
, the_repository
);
829 bitmap_writer_show_progress(&writer
, flags
& MIDX_PROGRESS
);
830 bitmap_writer_build_type_index(&writer
, pdata
, index
,
834 * bitmap_writer_finish expects objects in lex order, but pack_order
835 * gives us exactly that. use it directly instead of re-sorting the
838 * This changes the order of objects in 'index' between
839 * bitmap_writer_build_type_index and bitmap_writer_finish.
841 * The same re-ordering takes place in the single-pack bitmap code via
842 * write_idx_file(), which is called by finish_tmp_packfile(), which
843 * happens between bitmap_writer_build_type_index() and
844 * bitmap_writer_finish().
846 for (i
= 0; i
< pdata
->nr_objects
; i
++)
847 index
[pack_order
[i
]] = &pdata
->objects
[i
].idx
;
849 bitmap_writer_select_commits(&writer
, commits
, commits_nr
);
850 ret
= bitmap_writer_build(&writer
, pdata
);
854 bitmap_writer_set_checksum(&writer
, midx_hash
);
855 bitmap_writer_finish(&writer
, index
, pdata
->nr_objects
, bitmap_name
,
861 bitmap_writer_free(&writer
);
863 trace2_region_leave("midx", "write_midx_bitmap", the_repository
);
868 static struct multi_pack_index
*lookup_multi_pack_index(struct repository
*r
,
869 const char *object_dir
)
871 struct multi_pack_index
*result
= NULL
;
872 struct multi_pack_index
*cur
;
873 char *obj_dir_real
= real_pathdup(object_dir
, 1);
874 struct strbuf cur_path_real
= STRBUF_INIT
;
876 /* Ensure the given object_dir is local, or a known alternate. */
877 find_odb(r
, obj_dir_real
);
879 for (cur
= get_multi_pack_index(r
); cur
; cur
= cur
->next
) {
880 strbuf_realpath(&cur_path_real
, cur
->object_dir
, 1);
881 if (!strcmp(obj_dir_real
, cur_path_real
.buf
)) {
889 strbuf_release(&cur_path_real
);
893 static int fill_packs_from_midx(struct write_midx_context
*ctx
,
894 const char *preferred_pack_name
, uint32_t flags
)
898 for (i
= 0; i
< ctx
->m
->num_packs
; i
++) {
899 ALLOC_GROW(ctx
->info
, ctx
->nr
+ 1, ctx
->alloc
);
901 if (flags
& MIDX_WRITE_REV_INDEX
|| preferred_pack_name
) {
903 * If generating a reverse index, need to have
904 * packed_git's loaded to compare their
905 * mtimes and object count.
908 * If a preferred pack is specified, need to
909 * have packed_git's loaded to ensure the chosen
910 * preferred pack has a non-zero object count.
912 if (prepare_midx_pack(the_repository
, ctx
->m
, i
))
913 return error(_("could not load pack"));
915 if (open_pack_index(ctx
->m
->packs
[i
]))
916 die(_("could not open index for %s"),
917 ctx
->m
->packs
[i
]->pack_name
);
920 fill_pack_info(&ctx
->info
[ctx
->nr
++], ctx
->m
->packs
[i
],
921 ctx
->m
->pack_names
[i
], i
);
927 static int write_midx_internal(const char *object_dir
,
928 struct string_list
*packs_to_include
,
929 struct string_list
*packs_to_drop
,
930 const char *preferred_pack_name
,
931 const char *refs_snapshot
,
934 struct strbuf midx_name
= STRBUF_INIT
;
935 unsigned char midx_hash
[GIT_MAX_RAWSZ
];
936 uint32_t i
, start_pack
;
937 struct hashfile
*f
= NULL
;
939 struct write_midx_context ctx
= { 0 };
940 int bitmapped_packs_concat_len
= 0;
941 int pack_name_concat_len
= 0;
942 int dropped_packs
= 0;
944 struct chunkfile
*cf
;
946 trace2_region_enter("midx", "write_midx_internal", the_repository
);
948 get_midx_filename(&midx_name
, object_dir
);
949 if (safe_create_leading_directories(midx_name
.buf
))
950 die_errno(_("unable to create leading directories of %s"),
953 if (!packs_to_include
) {
955 * Only reference an existing MIDX when not filtering which
956 * packs to include, since all packs and objects are copied
957 * blindly from an existing MIDX if one is present.
959 ctx
.m
= lookup_multi_pack_index(the_repository
, object_dir
);
962 if (ctx
.m
&& !midx_checksum_valid(ctx
.m
)) {
963 warning(_("ignoring existing multi-pack-index; checksum mismatch"));
968 ctx
.alloc
= ctx
.m
? ctx
.m
->num_packs
: 16;
970 ALLOC_ARRAY(ctx
.info
, ctx
.alloc
);
972 if (ctx
.m
&& fill_packs_from_midx(&ctx
, preferred_pack_name
,
980 ctx
.pack_paths_checked
= 0;
981 if (flags
& MIDX_PROGRESS
)
982 ctx
.progress
= start_delayed_progress(_("Adding packfiles to multi-pack-index"), 0);
986 ctx
.to_include
= packs_to_include
;
988 for_each_file_in_pack_dir(object_dir
, add_pack_to_midx
, &ctx
);
989 stop_progress(&ctx
.progress
);
991 if ((ctx
.m
&& ctx
.nr
== ctx
.m
->num_packs
) &&
992 !(packs_to_include
|| packs_to_drop
)) {
993 struct bitmap_index
*bitmap_git
;
995 int want_bitmap
= flags
& MIDX_WRITE_BITMAP
;
997 bitmap_git
= prepare_midx_bitmap_git(ctx
.m
);
998 bitmap_exists
= bitmap_git
&& bitmap_is_midx(bitmap_git
);
999 free_bitmap_index(bitmap_git
);
1001 if (bitmap_exists
|| !want_bitmap
) {
1003 * The correct MIDX already exists, and so does a
1004 * corresponding bitmap (or one wasn't requested).
1007 clear_midx_files_ext(object_dir
, ".bitmap",
1013 if (preferred_pack_name
) {
1014 ctx
.preferred_pack_idx
= -1;
1016 for (i
= 0; i
< ctx
.nr
; i
++) {
1017 if (!cmp_idx_or_pack_name(preferred_pack_name
,
1018 ctx
.info
[i
].pack_name
)) {
1019 ctx
.preferred_pack_idx
= i
;
1024 if (ctx
.preferred_pack_idx
== -1)
1025 warning(_("unknown preferred pack: '%s'"),
1026 preferred_pack_name
);
1027 } else if (ctx
.nr
&&
1028 (flags
& (MIDX_WRITE_REV_INDEX
| MIDX_WRITE_BITMAP
))) {
1029 struct packed_git
*oldest
= ctx
.info
[ctx
.preferred_pack_idx
].p
;
1030 ctx
.preferred_pack_idx
= 0;
1032 if (packs_to_drop
&& packs_to_drop
->nr
)
1033 BUG("cannot write a MIDX bitmap during expiration");
1036 * set a preferred pack when writing a bitmap to ensure that
1037 * the pack from which the first object is selected in pseudo
1038 * pack-order has all of its objects selected from that pack
1039 * (and not another pack containing a duplicate)
1041 for (i
= 1; i
< ctx
.nr
; i
++) {
1042 struct packed_git
*p
= ctx
.info
[i
].p
;
1044 if (!oldest
->num_objects
|| p
->mtime
< oldest
->mtime
) {
1046 ctx
.preferred_pack_idx
= i
;
1050 if (!oldest
->num_objects
) {
1052 * If all packs are empty; unset the preferred index.
1053 * This is acceptable since there will be no duplicate
1054 * objects to resolve, so the preferred value doesn't
1057 ctx
.preferred_pack_idx
= -1;
1061 * otherwise don't mark any pack as preferred to avoid
1062 * interfering with expiration logic below
1064 ctx
.preferred_pack_idx
= -1;
1067 if (ctx
.preferred_pack_idx
> -1) {
1068 struct packed_git
*preferred
= ctx
.info
[ctx
.preferred_pack_idx
].p
;
1069 if (!preferred
->num_objects
) {
1070 error(_("cannot select preferred pack %s with no objects"),
1071 preferred
->pack_name
);
1077 compute_sorted_entries(&ctx
, start_pack
);
1079 ctx
.large_offsets_needed
= 0;
1080 for (i
= 0; i
< ctx
.entries_nr
; i
++) {
1081 if (ctx
.entries
[i
].offset
> 0x7fffffff)
1082 ctx
.num_large_offsets
++;
1083 if (ctx
.entries
[i
].offset
> 0xffffffff)
1084 ctx
.large_offsets_needed
= 1;
1087 QSORT(ctx
.info
, ctx
.nr
, pack_info_compare
);
1089 if (packs_to_drop
&& packs_to_drop
->nr
) {
1091 int missing_drops
= 0;
1093 for (i
= 0; i
< ctx
.nr
&& drop_index
< packs_to_drop
->nr
; i
++) {
1094 int cmp
= strcmp(ctx
.info
[i
].pack_name
,
1095 packs_to_drop
->items
[drop_index
].string
);
1099 ctx
.info
[i
].expired
= 1;
1100 } else if (cmp
> 0) {
1101 error(_("did not see pack-file %s to drop"),
1102 packs_to_drop
->items
[drop_index
].string
);
1107 ctx
.info
[i
].expired
= 0;
1111 if (missing_drops
) {
1118 * pack_perm stores a permutation between pack-int-ids from the
1119 * previous multi-pack-index to the new one we are writing:
1121 * pack_perm[old_id] = new_id
1123 ALLOC_ARRAY(ctx
.pack_perm
, ctx
.nr
);
1124 for (i
= 0; i
< ctx
.nr
; i
++) {
1125 if (ctx
.info
[i
].expired
) {
1127 ctx
.pack_perm
[ctx
.info
[i
].orig_pack_int_id
] = PACK_EXPIRED
;
1129 ctx
.pack_perm
[ctx
.info
[i
].orig_pack_int_id
] = i
- dropped_packs
;
1133 for (i
= 0; i
< ctx
.nr
; i
++) {
1134 if (ctx
.info
[i
].expired
)
1136 pack_name_concat_len
+= strlen(ctx
.info
[i
].pack_name
) + 1;
1137 bitmapped_packs_concat_len
+= 2 * sizeof(uint32_t);
1140 /* Check that the preferred pack wasn't expired (if given). */
1141 if (preferred_pack_name
) {
1142 struct pack_info
*preferred
= bsearch(preferred_pack_name
,
1145 idx_or_pack_name_cmp
);
1147 uint32_t perm
= ctx
.pack_perm
[preferred
->orig_pack_int_id
];
1148 if (perm
== PACK_EXPIRED
)
1149 warning(_("preferred pack '%s' is expired"),
1150 preferred_pack_name
);
1154 if (pack_name_concat_len
% MIDX_CHUNK_ALIGNMENT
)
1155 pack_name_concat_len
+= MIDX_CHUNK_ALIGNMENT
-
1156 (pack_name_concat_len
% MIDX_CHUNK_ALIGNMENT
);
1158 hold_lock_file_for_update(&lk
, midx_name
.buf
, LOCK_DIE_ON_ERROR
);
1159 f
= hashfd(get_lock_file_fd(&lk
), get_lock_file_path(&lk
));
1161 if (ctx
.nr
- dropped_packs
== 0) {
1162 error(_("no pack files to index."));
1167 if (!ctx
.entries_nr
) {
1168 if (flags
& MIDX_WRITE_BITMAP
)
1169 warning(_("refusing to write multi-pack .bitmap without any objects"));
1170 flags
&= ~(MIDX_WRITE_REV_INDEX
| MIDX_WRITE_BITMAP
);
1173 cf
= init_chunkfile(f
);
1175 add_chunk(cf
, MIDX_CHUNKID_PACKNAMES
, pack_name_concat_len
,
1176 write_midx_pack_names
);
1177 add_chunk(cf
, MIDX_CHUNKID_OIDFANOUT
, MIDX_CHUNK_FANOUT_SIZE
,
1178 write_midx_oid_fanout
);
1179 add_chunk(cf
, MIDX_CHUNKID_OIDLOOKUP
,
1180 st_mult(ctx
.entries_nr
, the_hash_algo
->rawsz
),
1181 write_midx_oid_lookup
);
1182 add_chunk(cf
, MIDX_CHUNKID_OBJECTOFFSETS
,
1183 st_mult(ctx
.entries_nr
, MIDX_CHUNK_OFFSET_WIDTH
),
1184 write_midx_object_offsets
);
1186 if (ctx
.large_offsets_needed
)
1187 add_chunk(cf
, MIDX_CHUNKID_LARGEOFFSETS
,
1188 st_mult(ctx
.num_large_offsets
,
1189 MIDX_CHUNK_LARGE_OFFSET_WIDTH
),
1190 write_midx_large_offsets
);
1192 if (flags
& (MIDX_WRITE_REV_INDEX
| MIDX_WRITE_BITMAP
)) {
1193 ctx
.pack_order
= midx_pack_order(&ctx
);
1194 add_chunk(cf
, MIDX_CHUNKID_REVINDEX
,
1195 st_mult(ctx
.entries_nr
, sizeof(uint32_t)),
1196 write_midx_revindex
);
1197 add_chunk(cf
, MIDX_CHUNKID_BITMAPPEDPACKS
,
1198 bitmapped_packs_concat_len
,
1199 write_midx_bitmapped_packs
);
1202 write_midx_header(f
, get_num_chunks(cf
), ctx
.nr
- dropped_packs
);
1203 write_chunkfile(cf
, &ctx
);
1205 finalize_hashfile(f
, midx_hash
, FSYNC_COMPONENT_PACK_METADATA
,
1206 CSUM_FSYNC
| CSUM_HASH_IN_STREAM
);
1209 if (flags
& MIDX_WRITE_REV_INDEX
&&
1210 git_env_bool("GIT_TEST_MIDX_WRITE_REV", 0))
1211 write_midx_reverse_index(midx_name
.buf
, midx_hash
, &ctx
);
1213 if (flags
& MIDX_WRITE_BITMAP
) {
1214 struct packing_data pdata
;
1215 struct commit
**commits
;
1216 uint32_t commits_nr
;
1218 if (!ctx
.entries_nr
)
1219 BUG("cannot write a bitmap without any objects");
1221 prepare_midx_packing_data(&pdata
, &ctx
);
1223 commits
= find_commits_for_midx_bitmap(&commits_nr
, refs_snapshot
, &ctx
);
1226 * The previous steps translated the information from
1227 * 'entries' into information suitable for constructing
1228 * bitmaps. We no longer need that array, so clear it to
1229 * reduce memory pressure.
1231 FREE_AND_NULL(ctx
.entries
);
1234 if (write_midx_bitmap(midx_name
.buf
, midx_hash
, &pdata
,
1235 commits
, commits_nr
, ctx
.pack_order
,
1237 error(_("could not write multi-pack bitmap"));
1239 clear_packing_data(&pdata
);
1244 clear_packing_data(&pdata
);
1248 * NOTE: Do not use ctx.entries beyond this point, since it might
1249 * have been freed in the previous if block.
1253 close_object_store(the_repository
->objects
);
1255 if (commit_lock_file(&lk
) < 0)
1256 die_errno(_("could not write multi-pack-index"));
1258 clear_midx_files_ext(object_dir
, ".bitmap", midx_hash
);
1259 clear_midx_files_ext(object_dir
, ".rev", midx_hash
);
1262 for (i
= 0; i
< ctx
.nr
; i
++) {
1263 if (ctx
.info
[i
].p
) {
1264 close_pack(ctx
.info
[i
].p
);
1265 free(ctx
.info
[i
].p
);
1267 free(ctx
.info
[i
].pack_name
);
1272 free(ctx
.pack_perm
);
1273 free(ctx
.pack_order
);
1274 strbuf_release(&midx_name
);
1276 trace2_region_leave("midx", "write_midx_internal", the_repository
);
1281 int write_midx_file(const char *object_dir
,
1282 const char *preferred_pack_name
,
1283 const char *refs_snapshot
,
1286 return write_midx_internal(object_dir
, NULL
, NULL
, preferred_pack_name
,
1287 refs_snapshot
, flags
);
1290 int write_midx_file_only(const char *object_dir
,
1291 struct string_list
*packs_to_include
,
1292 const char *preferred_pack_name
,
1293 const char *refs_snapshot
,
1296 return write_midx_internal(object_dir
, packs_to_include
, NULL
,
1297 preferred_pack_name
, refs_snapshot
, flags
);
1300 int expire_midx_packs(struct repository
*r
, const char *object_dir
, unsigned flags
)
1302 uint32_t i
, *count
, result
= 0;
1303 struct string_list packs_to_drop
= STRING_LIST_INIT_DUP
;
1304 struct multi_pack_index
*m
= lookup_multi_pack_index(r
, object_dir
);
1305 struct progress
*progress
= NULL
;
1310 CALLOC_ARRAY(count
, m
->num_packs
);
1312 if (flags
& MIDX_PROGRESS
)
1313 progress
= start_delayed_progress(_("Counting referenced objects"),
1315 for (i
= 0; i
< m
->num_objects
; i
++) {
1316 int pack_int_id
= nth_midxed_pack_int_id(m
, i
);
1317 count
[pack_int_id
]++;
1318 display_progress(progress
, i
+ 1);
1320 stop_progress(&progress
);
1322 if (flags
& MIDX_PROGRESS
)
1323 progress
= start_delayed_progress(_("Finding and deleting unreferenced packfiles"),
1325 for (i
= 0; i
< m
->num_packs
; i
++) {
1327 display_progress(progress
, i
+ 1);
1332 if (prepare_midx_pack(r
, m
, i
))
1335 if (m
->packs
[i
]->pack_keep
|| m
->packs
[i
]->is_cruft
)
1338 pack_name
= xstrdup(m
->packs
[i
]->pack_name
);
1339 close_pack(m
->packs
[i
]);
1341 string_list_insert(&packs_to_drop
, m
->pack_names
[i
]);
1342 unlink_pack_path(pack_name
, 0);
1345 stop_progress(&progress
);
1349 if (packs_to_drop
.nr
)
1350 result
= write_midx_internal(object_dir
, NULL
, &packs_to_drop
, NULL
, NULL
, flags
);
1352 string_list_clear(&packs_to_drop
, 0);
1357 struct repack_info
{
1359 uint32_t referenced_objects
;
1360 uint32_t pack_int_id
;
1363 static int compare_by_mtime(const void *a_
, const void *b_
)
1365 const struct repack_info
*a
, *b
;
1367 a
= (const struct repack_info
*)a_
;
1368 b
= (const struct repack_info
*)b_
;
1370 if (a
->mtime
< b
->mtime
)
1372 if (a
->mtime
> b
->mtime
)
1377 static int want_included_pack(struct repository
*r
,
1378 struct multi_pack_index
*m
,
1379 int pack_kept_objects
,
1380 uint32_t pack_int_id
)
1382 struct packed_git
*p
;
1383 if (prepare_midx_pack(r
, m
, pack_int_id
))
1385 p
= m
->packs
[pack_int_id
];
1386 if (!pack_kept_objects
&& p
->pack_keep
)
1390 if (open_pack_index(p
) || !p
->num_objects
)
1395 static void fill_included_packs_all(struct repository
*r
,
1396 struct multi_pack_index
*m
,
1397 unsigned char *include_pack
)
1400 int pack_kept_objects
= 0;
1402 repo_config_get_bool(r
, "repack.packkeptobjects", &pack_kept_objects
);
1404 for (i
= 0; i
< m
->num_packs
; i
++) {
1405 if (!want_included_pack(r
, m
, pack_kept_objects
, i
))
1408 include_pack
[i
] = 1;
1412 static void fill_included_packs_batch(struct repository
*r
,
1413 struct multi_pack_index
*m
,
1414 unsigned char *include_pack
,
1419 struct repack_info
*pack_info
;
1420 int pack_kept_objects
= 0;
1422 CALLOC_ARRAY(pack_info
, m
->num_packs
);
1424 repo_config_get_bool(r
, "repack.packkeptobjects", &pack_kept_objects
);
1426 for (i
= 0; i
< m
->num_packs
; i
++) {
1427 pack_info
[i
].pack_int_id
= i
;
1429 if (prepare_midx_pack(r
, m
, i
))
1432 pack_info
[i
].mtime
= m
->packs
[i
]->mtime
;
1435 for (i
= 0; i
< m
->num_objects
; i
++) {
1436 uint32_t pack_int_id
= nth_midxed_pack_int_id(m
, i
);
1437 pack_info
[pack_int_id
].referenced_objects
++;
1440 QSORT(pack_info
, m
->num_packs
, compare_by_mtime
);
1443 for (i
= 0; total_size
< batch_size
&& i
< m
->num_packs
; i
++) {
1444 int pack_int_id
= pack_info
[i
].pack_int_id
;
1445 struct packed_git
*p
= m
->packs
[pack_int_id
];
1446 size_t expected_size
;
1448 if (!want_included_pack(r
, m
, pack_kept_objects
, pack_int_id
))
1451 expected_size
= st_mult(p
->pack_size
,
1452 pack_info
[i
].referenced_objects
);
1453 expected_size
/= p
->num_objects
;
1455 if (expected_size
>= batch_size
)
1458 total_size
+= expected_size
;
1459 include_pack
[pack_int_id
] = 1;
1465 int midx_repack(struct repository
*r
, const char *object_dir
, size_t batch_size
, unsigned flags
)
1468 uint32_t i
, packs_to_repack
= 0;
1469 unsigned char *include_pack
;
1470 struct child_process cmd
= CHILD_PROCESS_INIT
;
1472 struct multi_pack_index
*m
= lookup_multi_pack_index(r
, object_dir
);
1475 * When updating the default for these configuration
1476 * variables in builtin/repack.c, these must be adjusted
1479 int delta_base_offset
= 1;
1480 int use_delta_islands
= 0;
1485 CALLOC_ARRAY(include_pack
, m
->num_packs
);
1488 fill_included_packs_batch(r
, m
, include_pack
, batch_size
);
1490 fill_included_packs_all(r
, m
, include_pack
);
1492 for (i
= 0; i
< m
->num_packs
; i
++) {
1493 if (include_pack
[i
])
1496 if (packs_to_repack
<= 1)
1499 repo_config_get_bool(r
, "repack.usedeltabaseoffset", &delta_base_offset
);
1500 repo_config_get_bool(r
, "repack.usedeltaislands", &use_delta_islands
);
1502 strvec_push(&cmd
.args
, "pack-objects");
1504 strvec_pushf(&cmd
.args
, "%s/pack/pack", object_dir
);
1506 if (delta_base_offset
)
1507 strvec_push(&cmd
.args
, "--delta-base-offset");
1508 if (use_delta_islands
)
1509 strvec_push(&cmd
.args
, "--delta-islands");
1511 if (flags
& MIDX_PROGRESS
)
1512 strvec_push(&cmd
.args
, "--progress");
1514 strvec_push(&cmd
.args
, "-q");
1517 cmd
.in
= cmd
.out
= -1;
1519 if (start_command(&cmd
)) {
1520 error(_("could not start pack-objects"));
1525 cmd_in
= xfdopen(cmd
.in
, "w");
1527 for (i
= 0; i
< m
->num_objects
; i
++) {
1528 struct object_id oid
;
1529 uint32_t pack_int_id
= nth_midxed_pack_int_id(m
, i
);
1531 if (!include_pack
[pack_int_id
])
1534 nth_midxed_object_oid(&oid
, m
, i
);
1535 fprintf(cmd_in
, "%s\n", oid_to_hex(&oid
));
1539 if (finish_command(&cmd
)) {
1540 error(_("could not finish pack-objects"));
1545 result
= write_midx_internal(object_dir
, NULL
, NULL
, NULL
, NULL
, flags
);