2 Copyright 2020 Google LLC
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file or at
6 https://developers.google.com/open-source/licenses/bsd
10 #include "lib-reftable.h"
11 #include "reftable/merged.h"
12 #include "reftable/reader.h"
13 #include "reftable/reftable-error.h"
14 #include "reftable/stack.h"
17 static void clear_dir(const char *dirname
)
19 struct strbuf path
= STRBUF_INIT
;
20 strbuf_addstr(&path
, dirname
);
21 remove_dir_recursively(&path
, 0);
22 strbuf_release(&path
);
25 static int count_dir_entries(const char *dirname
)
27 DIR *dir
= opendir(dirname
);
33 while ((d
= readdir(dir
))) {
35 * Besides skipping over "." and "..", we also need to
36 * skip over other files that have a leading ".". This
37 * is due to behaviour of NFS, which will rename files
38 * to ".nfs*" to emulate delete-on-last-close.
40 * In any case this should be fine as the reftable
41 * library will never write files with leading dots
44 if (starts_with(d
->d_name
, "."))
53 * Work linenumber into the tempdir, so we can see which tests forget to
56 static char *get_tmp_template(int linenumber
)
58 const char *tmp
= getenv("TMPDIR");
59 static char template[1024];
60 snprintf(template, sizeof(template) - 1, "%s/stack_test-%d.XXXXXX",
61 tmp
? tmp
: "/tmp", linenumber
);
65 static char *get_tmp_dir(int linenumber
)
67 char *dir
= get_tmp_template(linenumber
);
68 check(mkdtemp(dir
) != NULL
);
72 static void t_read_file(void)
74 char *fn
= get_tmp_template(__LINE__
);
75 struct tempfile
*tmp
= mks_tempfile(fn
);
76 int fd
= get_tempfile_fd(tmp
);
77 char out
[1024] = "line1\n\nline2\nline3";
80 const char *want
[] = { "line1", "line2", "line3" };
83 n
= write_in_full(fd
, out
, strlen(out
));
84 check_int(n
, ==, strlen(out
));
86 check_int(err
, >=, 0);
88 err
= read_lines(fn
, &names
);
91 for (size_t i
= 0; names
[i
]; i
++)
92 check_str(want
[i
], names
[i
]);
95 delete_tempfile(&tmp
);
98 static int write_test_ref(struct reftable_writer
*wr
, void *arg
)
100 struct reftable_ref_record
*ref
= arg
;
101 reftable_writer_set_limits(wr
, ref
->update_index
, ref
->update_index
);
102 return reftable_writer_add_ref(wr
, ref
);
105 static void write_n_ref_tables(struct reftable_stack
*st
,
108 struct strbuf buf
= STRBUF_INIT
;
109 int disable_auto_compact
;
112 disable_auto_compact
= st
->opts
.disable_auto_compact
;
113 st
->opts
.disable_auto_compact
= 1;
115 for (size_t i
= 0; i
< n
; i
++) {
116 struct reftable_ref_record ref
= {
117 .update_index
= reftable_stack_next_update_index(st
),
118 .value_type
= REFTABLE_REF_VAL1
,
122 strbuf_addf(&buf
, "refs/heads/branch-%04"PRIuMAX
, (uintmax_t)i
);
123 ref
.refname
= buf
.buf
;
124 t_reftable_set_hash(ref
.value
.val1
, i
, GIT_SHA1_FORMAT_ID
);
126 err
= reftable_stack_add(st
, &write_test_ref
, &ref
);
130 st
->opts
.disable_auto_compact
= disable_auto_compact
;
131 strbuf_release(&buf
);
134 struct write_log_arg
{
135 struct reftable_log_record
*log
;
136 uint64_t update_index
;
139 static int write_test_log(struct reftable_writer
*wr
, void *arg
)
141 struct write_log_arg
*wla
= arg
;
143 reftable_writer_set_limits(wr
, wla
->update_index
, wla
->update_index
);
144 return reftable_writer_add_log(wr
, wla
->log
);
147 static void t_reftable_stack_add_one(void)
149 char *dir
= get_tmp_dir(__LINE__
);
150 struct strbuf scratch
= STRBUF_INIT
;
151 int mask
= umask(002);
152 struct reftable_write_options opts
= {
153 .default_permissions
= 0660,
155 struct reftable_stack
*st
= NULL
;
157 struct reftable_ref_record ref
= {
158 .refname
= (char *) "HEAD",
160 .value_type
= REFTABLE_REF_SYMREF
,
161 .value
.symref
= (char *) "master",
163 struct reftable_ref_record dest
= { 0 };
164 struct stat stat_result
= { 0 };
165 err
= reftable_new_stack(&st
, dir
, &opts
);
168 err
= reftable_stack_add(st
, write_test_ref
, &ref
);
171 err
= reftable_stack_read_ref(st
, ref
.refname
, &dest
);
173 check(reftable_ref_record_equal(&ref
, &dest
, GIT_SHA1_RAWSZ
));
174 check_int(st
->readers_len
, >, 0);
176 #ifndef GIT_WINDOWS_NATIVE
177 strbuf_addstr(&scratch
, dir
);
178 strbuf_addstr(&scratch
, "/tables.list");
179 err
= stat(scratch
.buf
, &stat_result
);
181 check_int((stat_result
.st_mode
& 0777), ==, opts
.default_permissions
);
183 strbuf_reset(&scratch
);
184 strbuf_addstr(&scratch
, dir
);
185 strbuf_addstr(&scratch
, "/");
186 /* do not try at home; not an external API for reftable. */
187 strbuf_addstr(&scratch
, st
->readers
[0]->name
);
188 err
= stat(scratch
.buf
, &stat_result
);
190 check_int((stat_result
.st_mode
& 0777), ==, opts
.default_permissions
);
195 reftable_ref_record_release(&dest
);
196 reftable_stack_destroy(st
);
197 strbuf_release(&scratch
);
202 static void t_reftable_stack_uptodate(void)
204 struct reftable_write_options opts
= { 0 };
205 struct reftable_stack
*st1
= NULL
;
206 struct reftable_stack
*st2
= NULL
;
207 char *dir
= get_tmp_dir(__LINE__
);
210 struct reftable_ref_record ref1
= {
211 .refname
= (char *) "HEAD",
213 .value_type
= REFTABLE_REF_SYMREF
,
214 .value
.symref
= (char *) "master",
216 struct reftable_ref_record ref2
= {
217 .refname
= (char *) "branch2",
219 .value_type
= REFTABLE_REF_SYMREF
,
220 .value
.symref
= (char *) "master",
224 /* simulate multi-process access to the same stack
225 by creating two stacks for the same directory.
227 err
= reftable_new_stack(&st1
, dir
, &opts
);
230 err
= reftable_new_stack(&st2
, dir
, &opts
);
233 err
= reftable_stack_add(st1
, write_test_ref
, &ref1
);
236 err
= reftable_stack_add(st2
, write_test_ref
, &ref2
);
237 check_int(err
, ==, REFTABLE_OUTDATED_ERROR
);
239 err
= reftable_stack_reload(st2
);
242 err
= reftable_stack_add(st2
, write_test_ref
, &ref2
);
244 reftable_stack_destroy(st1
);
245 reftable_stack_destroy(st2
);
249 static void t_reftable_stack_transaction_api(void)
251 char *dir
= get_tmp_dir(__LINE__
);
252 struct reftable_write_options opts
= { 0 };
253 struct reftable_stack
*st
= NULL
;
255 struct reftable_addition
*add
= NULL
;
257 struct reftable_ref_record ref
= {
258 .refname
= (char *) "HEAD",
260 .value_type
= REFTABLE_REF_SYMREF
,
261 .value
.symref
= (char *) "master",
263 struct reftable_ref_record dest
= { 0 };
265 err
= reftable_new_stack(&st
, dir
, &opts
);
268 reftable_addition_destroy(add
);
270 err
= reftable_stack_new_addition(&add
, st
, 0);
273 err
= reftable_addition_add(add
, write_test_ref
, &ref
);
276 err
= reftable_addition_commit(add
);
279 reftable_addition_destroy(add
);
281 err
= reftable_stack_read_ref(st
, ref
.refname
, &dest
);
283 check_int(REFTABLE_REF_SYMREF
, ==, dest
.value_type
);
284 check(reftable_ref_record_equal(&ref
, &dest
, GIT_SHA1_RAWSZ
));
286 reftable_ref_record_release(&dest
);
287 reftable_stack_destroy(st
);
291 static void t_reftable_stack_transaction_with_reload(void)
293 char *dir
= get_tmp_dir(__LINE__
);
294 struct reftable_stack
*st1
= NULL
, *st2
= NULL
;
296 struct reftable_addition
*add
= NULL
;
297 struct reftable_ref_record refs
[2] = {
299 .refname
= (char *) "refs/heads/a",
301 .value_type
= REFTABLE_REF_VAL1
,
302 .value
.val1
= { '1' },
305 .refname
= (char *) "refs/heads/b",
307 .value_type
= REFTABLE_REF_VAL1
,
308 .value
.val1
= { '1' },
311 struct reftable_ref_record ref
= { 0 };
313 err
= reftable_new_stack(&st1
, dir
, NULL
);
315 err
= reftable_new_stack(&st2
, dir
, NULL
);
318 err
= reftable_stack_new_addition(&add
, st1
, 0);
320 err
= reftable_addition_add(add
, write_test_ref
, &refs
[0]);
322 err
= reftable_addition_commit(add
);
324 reftable_addition_destroy(add
);
327 * The second stack is now outdated, which we should notice. We do not
328 * create the addition and lock the stack by default, but allow the
329 * reload to happen when REFTABLE_STACK_NEW_ADDITION_RELOAD is set.
331 err
= reftable_stack_new_addition(&add
, st2
, 0);
332 check_int(err
, ==, REFTABLE_OUTDATED_ERROR
);
333 err
= reftable_stack_new_addition(&add
, st2
, REFTABLE_STACK_NEW_ADDITION_RELOAD
);
335 err
= reftable_addition_add(add
, write_test_ref
, &refs
[1]);
337 err
= reftable_addition_commit(add
);
339 reftable_addition_destroy(add
);
341 for (size_t i
= 0; i
< ARRAY_SIZE(refs
); i
++) {
342 err
= reftable_stack_read_ref(st2
, refs
[i
].refname
, &ref
);
344 check(reftable_ref_record_equal(&refs
[i
], &ref
, GIT_SHA1_RAWSZ
));
347 reftable_ref_record_release(&ref
);
348 reftable_stack_destroy(st1
);
349 reftable_stack_destroy(st2
);
353 static void t_reftable_stack_transaction_api_performs_auto_compaction(void)
355 char *dir
= get_tmp_dir(__LINE__
);
356 struct reftable_write_options opts
= {0};
357 struct reftable_addition
*add
= NULL
;
358 struct reftable_stack
*st
= NULL
;
362 err
= reftable_new_stack(&st
, dir
, &opts
);
365 for (size_t i
= 0; i
<= n
; i
++) {
366 struct reftable_ref_record ref
= {
367 .update_index
= reftable_stack_next_update_index(st
),
368 .value_type
= REFTABLE_REF_SYMREF
,
369 .value
.symref
= (char *) "master",
373 snprintf(name
, sizeof(name
), "branch%04"PRIuMAX
, (uintmax_t)i
);
377 * Disable auto-compaction for all but the last runs. Like this
378 * we can ensure that we indeed honor this setting and have
379 * better control over when exactly auto compaction runs.
381 st
->opts
.disable_auto_compact
= i
!= n
;
383 err
= reftable_stack_new_addition(&add
, st
, 0);
386 err
= reftable_addition_add(add
, write_test_ref
, &ref
);
389 err
= reftable_addition_commit(add
);
392 reftable_addition_destroy(add
);
395 * The stack length should grow continuously for all runs where
396 * auto compaction is disabled. When enabled, we should merge
397 * all tables in the stack.
400 check_int(st
->merged
->readers_len
, ==, i
+ 1);
402 check_int(st
->merged
->readers_len
, ==, 1);
405 reftable_stack_destroy(st
);
409 static void t_reftable_stack_auto_compaction_fails_gracefully(void)
411 struct reftable_ref_record ref
= {
412 .refname
= (char *) "refs/heads/master",
414 .value_type
= REFTABLE_REF_VAL1
,
415 .value
.val1
= {0x01},
417 struct reftable_write_options opts
= { 0 };
418 struct reftable_stack
*st
;
419 struct strbuf table_path
= STRBUF_INIT
;
420 char *dir
= get_tmp_dir(__LINE__
);
423 err
= reftable_new_stack(&st
, dir
, &opts
);
426 err
= reftable_stack_add(st
, write_test_ref
, &ref
);
428 check_int(st
->merged
->readers_len
, ==, 1);
429 check_int(st
->stats
.attempts
, ==, 0);
430 check_int(st
->stats
.failures
, ==, 0);
433 * Lock the newly written table such that it cannot be compacted.
434 * Adding a new table to the stack should not be impacted by this, even
435 * though auto-compaction will now fail.
437 strbuf_addf(&table_path
, "%s/%s.lock", dir
, st
->readers
[0]->name
);
438 write_file_buf(table_path
.buf
, "", 0);
440 ref
.update_index
= 2;
441 err
= reftable_stack_add(st
, write_test_ref
, &ref
);
443 check_int(st
->merged
->readers_len
, ==, 2);
444 check_int(st
->stats
.attempts
, ==, 1);
445 check_int(st
->stats
.failures
, ==, 1);
447 reftable_stack_destroy(st
);
448 strbuf_release(&table_path
);
452 static int write_error(struct reftable_writer
*wr UNUSED
, void *arg
)
454 return *((int *)arg
);
457 static void t_reftable_stack_update_index_check(void)
459 char *dir
= get_tmp_dir(__LINE__
);
460 struct reftable_write_options opts
= { 0 };
461 struct reftable_stack
*st
= NULL
;
463 struct reftable_ref_record ref1
= {
464 .refname
= (char *) "name1",
466 .value_type
= REFTABLE_REF_SYMREF
,
467 .value
.symref
= (char *) "master",
469 struct reftable_ref_record ref2
= {
470 .refname
= (char *) "name2",
472 .value_type
= REFTABLE_REF_SYMREF
,
473 .value
.symref
= (char *) "master",
476 err
= reftable_new_stack(&st
, dir
, &opts
);
479 err
= reftable_stack_add(st
, write_test_ref
, &ref1
);
482 err
= reftable_stack_add(st
, write_test_ref
, &ref2
);
483 check_int(err
, ==, REFTABLE_API_ERROR
);
484 reftable_stack_destroy(st
);
488 static void t_reftable_stack_lock_failure(void)
490 char *dir
= get_tmp_dir(__LINE__
);
491 struct reftable_write_options opts
= { 0 };
492 struct reftable_stack
*st
= NULL
;
495 err
= reftable_new_stack(&st
, dir
, &opts
);
497 for (i
= -1; i
!= REFTABLE_EMPTY_TABLE_ERROR
; i
--) {
498 err
= reftable_stack_add(st
, write_error
, &i
);
499 check_int(err
, ==, i
);
502 reftable_stack_destroy(st
);
506 static void t_reftable_stack_add(void)
509 struct reftable_write_options opts
= {
510 .exact_log_message
= 1,
511 .default_permissions
= 0660,
512 .disable_auto_compact
= 1,
514 struct reftable_stack
*st
= NULL
;
515 char *dir
= get_tmp_dir(__LINE__
);
516 struct reftable_ref_record refs
[2] = { 0 };
517 struct reftable_log_record logs
[2] = { 0 };
518 struct strbuf path
= STRBUF_INIT
;
519 struct stat stat_result
;
520 size_t i
, N
= ARRAY_SIZE(refs
);
522 err
= reftable_new_stack(&st
, dir
, &opts
);
525 for (i
= 0; i
< N
; i
++) {
527 snprintf(buf
, sizeof(buf
), "branch%02"PRIuMAX
, (uintmax_t)i
);
528 refs
[i
].refname
= xstrdup(buf
);
529 refs
[i
].update_index
= i
+ 1;
530 refs
[i
].value_type
= REFTABLE_REF_VAL1
;
531 t_reftable_set_hash(refs
[i
].value
.val1
, i
, GIT_SHA1_FORMAT_ID
);
533 logs
[i
].refname
= xstrdup(buf
);
534 logs
[i
].update_index
= N
+ i
+ 1;
535 logs
[i
].value_type
= REFTABLE_LOG_UPDATE
;
536 logs
[i
].value
.update
.email
= xstrdup("identity@invalid");
537 t_reftable_set_hash(logs
[i
].value
.update
.new_hash
, i
, GIT_SHA1_FORMAT_ID
);
540 for (i
= 0; i
< N
; i
++) {
541 int err
= reftable_stack_add(st
, write_test_ref
, &refs
[i
]);
545 for (i
= 0; i
< N
; i
++) {
546 struct write_log_arg arg
= {
548 .update_index
= reftable_stack_next_update_index(st
),
550 int err
= reftable_stack_add(st
, write_test_log
, &arg
);
554 err
= reftable_stack_compact_all(st
, NULL
);
557 for (i
= 0; i
< N
; i
++) {
558 struct reftable_ref_record dest
= { 0 };
560 int err
= reftable_stack_read_ref(st
, refs
[i
].refname
, &dest
);
562 check(reftable_ref_record_equal(&dest
, refs
+ i
,
564 reftable_ref_record_release(&dest
);
567 for (i
= 0; i
< N
; i
++) {
568 struct reftable_log_record dest
= { 0 };
569 int err
= reftable_stack_read_log(st
, refs
[i
].refname
, &dest
);
571 check(reftable_log_record_equal(&dest
, logs
+ i
,
573 reftable_log_record_release(&dest
);
576 #ifndef GIT_WINDOWS_NATIVE
577 strbuf_addstr(&path
, dir
);
578 strbuf_addstr(&path
, "/tables.list");
579 err
= stat(path
.buf
, &stat_result
);
581 check_int((stat_result
.st_mode
& 0777), ==, opts
.default_permissions
);
584 strbuf_addstr(&path
, dir
);
585 strbuf_addstr(&path
, "/");
586 /* do not try at home; not an external API for reftable. */
587 strbuf_addstr(&path
, st
->readers
[0]->name
);
588 err
= stat(path
.buf
, &stat_result
);
590 check_int((stat_result
.st_mode
& 0777), ==, opts
.default_permissions
);
596 reftable_stack_destroy(st
);
597 for (i
= 0; i
< N
; i
++) {
598 reftable_ref_record_release(&refs
[i
]);
599 reftable_log_record_release(&logs
[i
]);
601 strbuf_release(&path
);
605 static void t_reftable_stack_iterator(void)
607 struct reftable_write_options opts
= { 0 };
608 struct reftable_stack
*st
= NULL
;
609 char *dir
= get_tmp_dir(__LINE__
);
610 struct reftable_ref_record refs
[10] = { 0 };
611 struct reftable_log_record logs
[10] = { 0 };
612 struct reftable_iterator it
= { 0 };
613 size_t N
= ARRAY_SIZE(refs
), i
;
616 err
= reftable_new_stack(&st
, dir
, &opts
);
619 for (i
= 0; i
< N
; i
++) {
620 refs
[i
].refname
= xstrfmt("branch%02"PRIuMAX
, (uintmax_t)i
);
621 refs
[i
].update_index
= i
+ 1;
622 refs
[i
].value_type
= REFTABLE_REF_VAL1
;
623 t_reftable_set_hash(refs
[i
].value
.val1
, i
, GIT_SHA1_FORMAT_ID
);
625 logs
[i
].refname
= xstrfmt("branch%02"PRIuMAX
, (uintmax_t)i
);
626 logs
[i
].update_index
= i
+ 1;
627 logs
[i
].value_type
= REFTABLE_LOG_UPDATE
;
628 logs
[i
].value
.update
.email
= xstrdup("johndoe@invalid");
629 logs
[i
].value
.update
.message
= xstrdup("commit\n");
630 t_reftable_set_hash(logs
[i
].value
.update
.new_hash
, i
, GIT_SHA1_FORMAT_ID
);
633 for (i
= 0; i
< N
; i
++) {
634 err
= reftable_stack_add(st
, write_test_ref
, &refs
[i
]);
638 for (i
= 0; i
< N
; i
++) {
639 struct write_log_arg arg
= {
641 .update_index
= reftable_stack_next_update_index(st
),
644 err
= reftable_stack_add(st
, write_test_log
, &arg
);
648 reftable_stack_init_ref_iterator(st
, &it
);
649 reftable_iterator_seek_ref(&it
, refs
[0].refname
);
651 struct reftable_ref_record ref
= { 0 };
653 err
= reftable_iterator_next_ref(&it
, &ref
);
657 check(reftable_ref_record_equal(&ref
, &refs
[i
], GIT_SHA1_RAWSZ
));
658 reftable_ref_record_release(&ref
);
662 reftable_iterator_destroy(&it
);
664 reftable_stack_init_log_iterator(st
, &it
);
665 reftable_iterator_seek_log(&it
, logs
[0].refname
);
667 struct reftable_log_record log
= { 0 };
669 err
= reftable_iterator_next_log(&it
, &log
);
673 check(reftable_log_record_equal(&log
, &logs
[i
], GIT_SHA1_RAWSZ
));
674 reftable_log_record_release(&log
);
678 reftable_stack_destroy(st
);
679 reftable_iterator_destroy(&it
);
680 for (i
= 0; i
< N
; i
++) {
681 reftable_ref_record_release(&refs
[i
]);
682 reftable_log_record_release(&logs
[i
]);
687 static void t_reftable_stack_log_normalize(void)
690 struct reftable_write_options opts
= {
693 struct reftable_stack
*st
= NULL
;
694 char *dir
= get_tmp_dir(__LINE__
);
695 struct reftable_log_record input
= {
696 .refname
= (char *) "branch",
698 .value_type
= REFTABLE_LOG_UPDATE
,
706 struct reftable_log_record dest
= {
709 struct write_log_arg arg
= {
714 err
= reftable_new_stack(&st
, dir
, &opts
);
717 input
.value
.update
.message
= (char *) "one\ntwo";
718 err
= reftable_stack_add(st
, write_test_log
, &arg
);
719 check_int(err
, ==, REFTABLE_API_ERROR
);
721 input
.value
.update
.message
= (char *) "one";
722 err
= reftable_stack_add(st
, write_test_log
, &arg
);
725 err
= reftable_stack_read_log(st
, input
.refname
, &dest
);
727 check_str(dest
.value
.update
.message
, "one\n");
729 input
.value
.update
.message
= (char *) "two\n";
730 arg
.update_index
= 2;
731 err
= reftable_stack_add(st
, write_test_log
, &arg
);
733 err
= reftable_stack_read_log(st
, input
.refname
, &dest
);
735 check_str(dest
.value
.update
.message
, "two\n");
738 reftable_stack_destroy(st
);
739 reftable_log_record_release(&dest
);
743 static void t_reftable_stack_tombstone(void)
745 char *dir
= get_tmp_dir(__LINE__
);
746 struct reftable_write_options opts
= { 0 };
747 struct reftable_stack
*st
= NULL
;
749 struct reftable_ref_record refs
[2] = { 0 };
750 struct reftable_log_record logs
[2] = { 0 };
751 size_t i
, N
= ARRAY_SIZE(refs
);
752 struct reftable_ref_record dest
= { 0 };
753 struct reftable_log_record log_dest
= { 0 };
755 err
= reftable_new_stack(&st
, dir
, &opts
);
758 /* even entries add the refs, odd entries delete them. */
759 for (i
= 0; i
< N
; i
++) {
760 const char *buf
= "branch";
761 refs
[i
].refname
= xstrdup(buf
);
762 refs
[i
].update_index
= i
+ 1;
764 refs
[i
].value_type
= REFTABLE_REF_VAL1
;
765 t_reftable_set_hash(refs
[i
].value
.val1
, i
,
769 logs
[i
].refname
= xstrdup(buf
);
770 /* update_index is part of the key. */
771 logs
[i
].update_index
= 42;
773 logs
[i
].value_type
= REFTABLE_LOG_UPDATE
;
774 t_reftable_set_hash(logs
[i
].value
.update
.new_hash
, i
,
776 logs
[i
].value
.update
.email
=
777 xstrdup("identity@invalid");
780 for (i
= 0; i
< N
; i
++) {
781 int err
= reftable_stack_add(st
, write_test_ref
, &refs
[i
]);
785 for (i
= 0; i
< N
; i
++) {
786 struct write_log_arg arg
= {
788 .update_index
= reftable_stack_next_update_index(st
),
790 int err
= reftable_stack_add(st
, write_test_log
, &arg
);
794 err
= reftable_stack_read_ref(st
, "branch", &dest
);
795 check_int(err
, ==, 1);
796 reftable_ref_record_release(&dest
);
798 err
= reftable_stack_read_log(st
, "branch", &log_dest
);
799 check_int(err
, ==, 1);
800 reftable_log_record_release(&log_dest
);
802 err
= reftable_stack_compact_all(st
, NULL
);
805 err
= reftable_stack_read_ref(st
, "branch", &dest
);
806 check_int(err
, ==, 1);
808 err
= reftable_stack_read_log(st
, "branch", &log_dest
);
809 check_int(err
, ==, 1);
810 reftable_ref_record_release(&dest
);
811 reftable_log_record_release(&log_dest
);
814 reftable_stack_destroy(st
);
815 for (i
= 0; i
< N
; i
++) {
816 reftable_ref_record_release(&refs
[i
]);
817 reftable_log_record_release(&logs
[i
]);
822 static void t_reftable_stack_hash_id(void)
824 char *dir
= get_tmp_dir(__LINE__
);
825 struct reftable_write_options opts
= { 0 };
826 struct reftable_stack
*st
= NULL
;
829 struct reftable_ref_record ref
= {
830 .refname
= (char *) "master",
831 .value_type
= REFTABLE_REF_SYMREF
,
832 .value
.symref
= (char *) "target",
835 struct reftable_write_options opts32
= { .hash_id
= GIT_SHA256_FORMAT_ID
};
836 struct reftable_stack
*st32
= NULL
;
837 struct reftable_write_options opts_default
= { 0 };
838 struct reftable_stack
*st_default
= NULL
;
839 struct reftable_ref_record dest
= { 0 };
841 err
= reftable_new_stack(&st
, dir
, &opts
);
844 err
= reftable_stack_add(st
, write_test_ref
, &ref
);
847 /* can't read it with the wrong hash ID. */
848 err
= reftable_new_stack(&st32
, dir
, &opts32
);
849 check_int(err
, ==, REFTABLE_FORMAT_ERROR
);
851 /* check that we can read it back with default opts too. */
852 err
= reftable_new_stack(&st_default
, dir
, &opts_default
);
855 err
= reftable_stack_read_ref(st_default
, "master", &dest
);
858 check(reftable_ref_record_equal(&ref
, &dest
, GIT_SHA1_RAWSZ
));
859 reftable_ref_record_release(&dest
);
860 reftable_stack_destroy(st
);
861 reftable_stack_destroy(st_default
);
865 static void t_suggest_compaction_segment(void)
867 uint64_t sizes
[] = { 512, 64, 17, 16, 9, 9, 9, 16, 2, 16 };
869 suggest_compaction_segment(sizes
, ARRAY_SIZE(sizes
), 2);
870 check_int(min
.start
, ==, 1);
871 check_int(min
.end
, ==, 10);
874 static void t_suggest_compaction_segment_nothing(void)
876 uint64_t sizes
[] = { 64, 32, 16, 8, 4, 2 };
877 struct segment result
=
878 suggest_compaction_segment(sizes
, ARRAY_SIZE(sizes
), 2);
879 check_int(result
.start
, ==, result
.end
);
882 static void t_reflog_expire(void)
884 char *dir
= get_tmp_dir(__LINE__
);
885 struct reftable_write_options opts
= { 0 };
886 struct reftable_stack
*st
= NULL
;
887 struct reftable_log_record logs
[20] = { 0 };
888 size_t i
, N
= ARRAY_SIZE(logs
) - 1;
890 struct reftable_log_expiry_config expiry
= {
893 struct reftable_log_record log
= { 0 };
895 err
= reftable_new_stack(&st
, dir
, &opts
);
898 for (i
= 1; i
<= N
; i
++) {
900 snprintf(buf
, sizeof(buf
), "branch%02"PRIuMAX
, (uintmax_t)i
);
902 logs
[i
].refname
= xstrdup(buf
);
903 logs
[i
].update_index
= i
;
904 logs
[i
].value_type
= REFTABLE_LOG_UPDATE
;
905 logs
[i
].value
.update
.time
= i
;
906 logs
[i
].value
.update
.email
= xstrdup("identity@invalid");
907 t_reftable_set_hash(logs
[i
].value
.update
.new_hash
, i
,
911 for (i
= 1; i
<= N
; i
++) {
912 struct write_log_arg arg
= {
914 .update_index
= reftable_stack_next_update_index(st
),
916 int err
= reftable_stack_add(st
, write_test_log
, &arg
);
920 err
= reftable_stack_compact_all(st
, NULL
);
923 err
= reftable_stack_compact_all(st
, &expiry
);
926 err
= reftable_stack_read_log(st
, logs
[9].refname
, &log
);
927 check_int(err
, ==, 1);
929 err
= reftable_stack_read_log(st
, logs
[11].refname
, &log
);
932 expiry
.min_update_index
= 15;
933 err
= reftable_stack_compact_all(st
, &expiry
);
936 err
= reftable_stack_read_log(st
, logs
[14].refname
, &log
);
937 check_int(err
, ==, 1);
939 err
= reftable_stack_read_log(st
, logs
[16].refname
, &log
);
943 reftable_stack_destroy(st
);
944 for (i
= 0; i
<= N
; i
++)
945 reftable_log_record_release(&logs
[i
]);
947 reftable_log_record_release(&log
);
950 static int write_nothing(struct reftable_writer
*wr
, void *arg UNUSED
)
952 reftable_writer_set_limits(wr
, 1, 1);
956 static void t_empty_add(void)
958 struct reftable_write_options opts
= { 0 };
959 struct reftable_stack
*st
= NULL
;
961 char *dir
= get_tmp_dir(__LINE__
);
962 struct reftable_stack
*st2
= NULL
;
964 err
= reftable_new_stack(&st
, dir
, &opts
);
967 err
= reftable_stack_add(st
, write_nothing
, NULL
);
970 err
= reftable_new_stack(&st2
, dir
, &opts
);
973 reftable_stack_destroy(st
);
974 reftable_stack_destroy(st2
);
977 static int fastlogN(uint64_t sz
, uint64_t N
)
987 static void t_reftable_stack_auto_compaction(void)
989 struct reftable_write_options opts
= {
990 .disable_auto_compact
= 1,
992 struct reftable_stack
*st
= NULL
;
993 char *dir
= get_tmp_dir(__LINE__
);
997 err
= reftable_new_stack(&st
, dir
, &opts
);
1000 for (i
= 0; i
< N
; i
++) {
1002 struct reftable_ref_record ref
= {
1004 .update_index
= reftable_stack_next_update_index(st
),
1005 .value_type
= REFTABLE_REF_SYMREF
,
1006 .value
.symref
= (char *) "master",
1008 snprintf(name
, sizeof(name
), "branch%04"PRIuMAX
, (uintmax_t)i
);
1010 err
= reftable_stack_add(st
, write_test_ref
, &ref
);
1013 err
= reftable_stack_auto_compact(st
);
1015 check(i
< 2 || st
->merged
->readers_len
< 2 * fastlogN(i
, 2));
1018 check_int(reftable_stack_compaction_stats(st
)->entries_written
, <,
1019 (uint64_t)(N
* fastlogN(N
, 2)));
1021 reftable_stack_destroy(st
);
1025 static void t_reftable_stack_auto_compaction_factor(void)
1027 struct reftable_write_options opts
= {
1028 .auto_compaction_factor
= 5,
1030 struct reftable_stack
*st
= NULL
;
1031 char *dir
= get_tmp_dir(__LINE__
);
1035 err
= reftable_new_stack(&st
, dir
, &opts
);
1038 for (size_t i
= 0; i
< N
; i
++) {
1040 struct reftable_ref_record ref
= {
1042 .update_index
= reftable_stack_next_update_index(st
),
1043 .value_type
= REFTABLE_REF_VAL1
,
1045 xsnprintf(name
, sizeof(name
), "branch%04"PRIuMAX
, (uintmax_t)i
);
1047 err
= reftable_stack_add(st
, &write_test_ref
, &ref
);
1050 check(i
< 5 || st
->merged
->readers_len
< 5 * fastlogN(i
, 5));
1053 reftable_stack_destroy(st
);
1057 static void t_reftable_stack_auto_compaction_with_locked_tables(void)
1059 struct reftable_write_options opts
= {
1060 .disable_auto_compact
= 1,
1062 struct reftable_stack
*st
= NULL
;
1063 struct strbuf buf
= STRBUF_INIT
;
1064 char *dir
= get_tmp_dir(__LINE__
);
1067 err
= reftable_new_stack(&st
, dir
, &opts
);
1070 write_n_ref_tables(st
, 5);
1071 check_int(st
->merged
->readers_len
, ==, 5);
1074 * Given that all tables we have written should be roughly the same
1075 * size, we expect that auto-compaction will want to compact all of the
1076 * tables. Locking any of the tables will keep it from doing so.
1079 strbuf_addf(&buf
, "%s/%s.lock", dir
, st
->readers
[2]->name
);
1080 write_file_buf(buf
.buf
, "", 0);
1083 * When parts of the stack are locked, then auto-compaction does a best
1084 * effort compaction of those tables which aren't locked. So while this
1085 * would in theory compact all tables, due to the preexisting lock we
1086 * only compact the newest two tables.
1088 err
= reftable_stack_auto_compact(st
);
1090 check_int(st
->stats
.failures
, ==, 0);
1091 check_int(st
->merged
->readers_len
, ==, 4);
1093 reftable_stack_destroy(st
);
1094 strbuf_release(&buf
);
1098 static void t_reftable_stack_add_performs_auto_compaction(void)
1100 struct reftable_write_options opts
= { 0 };
1101 struct reftable_stack
*st
= NULL
;
1102 struct strbuf refname
= STRBUF_INIT
;
1103 char *dir
= get_tmp_dir(__LINE__
);
1107 err
= reftable_new_stack(&st
, dir
, &opts
);
1110 for (i
= 0; i
<= n
; i
++) {
1111 struct reftable_ref_record ref
= {
1112 .update_index
= reftable_stack_next_update_index(st
),
1113 .value_type
= REFTABLE_REF_SYMREF
,
1114 .value
.symref
= (char *) "master",
1118 * Disable auto-compaction for all but the last runs. Like this
1119 * we can ensure that we indeed honor this setting and have
1120 * better control over when exactly auto compaction runs.
1122 st
->opts
.disable_auto_compact
= i
!= n
;
1124 strbuf_reset(&refname
);
1125 strbuf_addf(&refname
, "branch-%04"PRIuMAX
, (uintmax_t)i
);
1126 ref
.refname
= refname
.buf
;
1128 err
= reftable_stack_add(st
, write_test_ref
, &ref
);
1132 * The stack length should grow continuously for all runs where
1133 * auto compaction is disabled. When enabled, we should merge
1134 * all tables in the stack.
1137 check_int(st
->merged
->readers_len
, ==, i
+ 1);
1139 check_int(st
->merged
->readers_len
, ==, 1);
1142 reftable_stack_destroy(st
);
1143 strbuf_release(&refname
);
1147 static void t_reftable_stack_compaction_with_locked_tables(void)
1149 struct reftable_write_options opts
= {
1150 .disable_auto_compact
= 1,
1152 struct reftable_stack
*st
= NULL
;
1153 struct strbuf buf
= STRBUF_INIT
;
1154 char *dir
= get_tmp_dir(__LINE__
);
1157 err
= reftable_new_stack(&st
, dir
, &opts
);
1160 write_n_ref_tables(st
, 3);
1161 check_int(st
->merged
->readers_len
, ==, 3);
1163 /* Lock one of the tables that we're about to compact. */
1165 strbuf_addf(&buf
, "%s/%s.lock", dir
, st
->readers
[1]->name
);
1166 write_file_buf(buf
.buf
, "", 0);
1169 * Compaction is expected to fail given that we were not able to
1170 * compact all tables.
1172 err
= reftable_stack_compact_all(st
, NULL
);
1173 check_int(err
, ==, REFTABLE_LOCK_ERROR
);
1174 check_int(st
->stats
.failures
, ==, 1);
1175 check_int(st
->merged
->readers_len
, ==, 3);
1177 reftable_stack_destroy(st
);
1178 strbuf_release(&buf
);
1182 static void t_reftable_stack_compaction_concurrent(void)
1184 struct reftable_write_options opts
= { 0 };
1185 struct reftable_stack
*st1
= NULL
, *st2
= NULL
;
1186 char *dir
= get_tmp_dir(__LINE__
);
1189 err
= reftable_new_stack(&st1
, dir
, &opts
);
1191 write_n_ref_tables(st1
, 3);
1193 err
= reftable_new_stack(&st2
, dir
, &opts
);
1196 err
= reftable_stack_compact_all(st1
, NULL
);
1199 reftable_stack_destroy(st1
);
1200 reftable_stack_destroy(st2
);
1202 check_int(count_dir_entries(dir
), ==, 2);
1206 static void unclean_stack_close(struct reftable_stack
*st
)
1208 /* break abstraction boundary to simulate unclean shutdown. */
1209 for (size_t i
= 0; i
< st
->readers_len
; i
++)
1210 reftable_reader_decref(st
->readers
[i
]);
1211 st
->readers_len
= 0;
1212 FREE_AND_NULL(st
->readers
);
1215 static void t_reftable_stack_compaction_concurrent_clean(void)
1217 struct reftable_write_options opts
= { 0 };
1218 struct reftable_stack
*st1
= NULL
, *st2
= NULL
, *st3
= NULL
;
1219 char *dir
= get_tmp_dir(__LINE__
);
1222 err
= reftable_new_stack(&st1
, dir
, &opts
);
1224 write_n_ref_tables(st1
, 3);
1226 err
= reftable_new_stack(&st2
, dir
, &opts
);
1229 err
= reftable_stack_compact_all(st1
, NULL
);
1232 unclean_stack_close(st1
);
1233 unclean_stack_close(st2
);
1235 err
= reftable_new_stack(&st3
, dir
, &opts
);
1238 err
= reftable_stack_clean(st3
);
1240 check_int(count_dir_entries(dir
), ==, 2);
1242 reftable_stack_destroy(st1
);
1243 reftable_stack_destroy(st2
);
1244 reftable_stack_destroy(st3
);
1249 static void t_reftable_stack_read_across_reload(void)
1251 struct reftable_write_options opts
= { 0 };
1252 struct reftable_stack
*st1
= NULL
, *st2
= NULL
;
1253 struct reftable_ref_record rec
= { 0 };
1254 struct reftable_iterator it
= { 0 };
1255 char *dir
= get_tmp_dir(__LINE__
);
1258 /* Create a first stack and set up an iterator for it. */
1259 err
= reftable_new_stack(&st1
, dir
, &opts
);
1261 write_n_ref_tables(st1
, 2);
1262 check_int(st1
->merged
->readers_len
, ==, 2);
1263 reftable_stack_init_ref_iterator(st1
, &it
);
1264 err
= reftable_iterator_seek_ref(&it
, "");
1267 /* Set up a second stack for the same directory and compact it. */
1268 err
= reftable_new_stack(&st2
, dir
, &opts
);
1270 check_int(st2
->merged
->readers_len
, ==, 2);
1271 err
= reftable_stack_compact_all(st2
, NULL
);
1273 check_int(st2
->merged
->readers_len
, ==, 1);
1276 * Verify that we can continue to use the old iterator even after we
1277 * have reloaded its stack.
1279 err
= reftable_stack_reload(st1
);
1281 check_int(st1
->merged
->readers_len
, ==, 1);
1282 err
= reftable_iterator_next_ref(&it
, &rec
);
1284 check_str(rec
.refname
, "refs/heads/branch-0000");
1285 err
= reftable_iterator_next_ref(&it
, &rec
);
1287 check_str(rec
.refname
, "refs/heads/branch-0001");
1288 err
= reftable_iterator_next_ref(&it
, &rec
);
1289 check_int(err
, >, 0);
1291 reftable_ref_record_release(&rec
);
1292 reftable_iterator_destroy(&it
);
1293 reftable_stack_destroy(st1
);
1294 reftable_stack_destroy(st2
);
1298 static void t_reftable_stack_reload_with_missing_table(void)
1300 struct reftable_write_options opts
= { 0 };
1301 struct reftable_stack
*st
= NULL
;
1302 struct reftable_ref_record rec
= { 0 };
1303 struct reftable_iterator it
= { 0 };
1304 struct strbuf table_path
= STRBUF_INIT
, content
= STRBUF_INIT
;
1305 char *dir
= get_tmp_dir(__LINE__
);
1308 /* Create a first stack and set up an iterator for it. */
1309 err
= reftable_new_stack(&st
, dir
, &opts
);
1311 write_n_ref_tables(st
, 2);
1312 check_int(st
->merged
->readers_len
, ==, 2);
1313 reftable_stack_init_ref_iterator(st
, &it
);
1314 err
= reftable_iterator_seek_ref(&it
, "");
1318 * Update the tables.list file with some garbage data, while reusing
1319 * our old readers. This should trigger a partial reload of the stack,
1320 * where we try to reuse our old readers.
1322 strbuf_addf(&content
, "%s\n", st
->readers
[0]->name
);
1323 strbuf_addf(&content
, "%s\n", st
->readers
[1]->name
);
1324 strbuf_addstr(&content
, "garbage\n");
1325 strbuf_addf(&table_path
, "%s.lock", st
->list_file
);
1326 write_file_buf(table_path
.buf
, content
.buf
, content
.len
);
1327 err
= rename(table_path
.buf
, st
->list_file
);
1330 err
= reftable_stack_reload(st
);
1331 check_int(err
, ==, -4);
1332 check_int(st
->merged
->readers_len
, ==, 2);
1335 * Even though the reload has failed, we should be able to continue
1336 * using the iterator.
1338 err
= reftable_iterator_next_ref(&it
, &rec
);
1340 check_str(rec
.refname
, "refs/heads/branch-0000");
1341 err
= reftable_iterator_next_ref(&it
, &rec
);
1343 check_str(rec
.refname
, "refs/heads/branch-0001");
1344 err
= reftable_iterator_next_ref(&it
, &rec
);
1345 check_int(err
, >, 0);
1347 reftable_ref_record_release(&rec
);
1348 reftable_iterator_destroy(&it
);
1349 reftable_stack_destroy(st
);
1350 strbuf_release(&table_path
);
1351 strbuf_release(&content
);
1355 int cmd_main(int argc UNUSED
, const char *argv
[] UNUSED
)
1357 TEST(t_empty_add(), "empty addition to stack");
1358 TEST(t_read_file(), "read_lines works");
1359 TEST(t_reflog_expire(), "expire reflog entries");
1360 TEST(t_reftable_stack_add(), "add multiple refs and logs to stack");
1361 TEST(t_reftable_stack_add_one(), "add a single ref record to stack");
1362 TEST(t_reftable_stack_add_performs_auto_compaction(), "addition to stack triggers auto-compaction");
1363 TEST(t_reftable_stack_auto_compaction(), "stack must form geometric sequence after compaction");
1364 TEST(t_reftable_stack_auto_compaction_factor(), "auto-compaction with non-default geometric factor");
1365 TEST(t_reftable_stack_auto_compaction_fails_gracefully(), "failure on auto-compaction");
1366 TEST(t_reftable_stack_auto_compaction_with_locked_tables(), "auto compaction with locked tables");
1367 TEST(t_reftable_stack_compaction_concurrent(), "compaction with concurrent stack");
1368 TEST(t_reftable_stack_compaction_concurrent_clean(), "compaction with unclean stack shutdown");
1369 TEST(t_reftable_stack_compaction_with_locked_tables(), "compaction with locked tables");
1370 TEST(t_reftable_stack_hash_id(), "read stack with wrong hash ID");
1371 TEST(t_reftable_stack_iterator(), "log and ref iterator for reftable stack");
1372 TEST(t_reftable_stack_lock_failure(), "stack addition with lockfile failure");
1373 TEST(t_reftable_stack_log_normalize(), "log messages should be normalized");
1374 TEST(t_reftable_stack_read_across_reload(), "stack iterators work across reloads");
1375 TEST(t_reftable_stack_reload_with_missing_table(), "stack iteration with garbage tables");
1376 TEST(t_reftable_stack_tombstone(), "'tombstone' refs in stack");
1377 TEST(t_reftable_stack_transaction_api(), "update transaction to stack");
1378 TEST(t_reftable_stack_transaction_with_reload(), "transaction with reload");
1379 TEST(t_reftable_stack_transaction_api_performs_auto_compaction(), "update transaction triggers auto-compaction");
1380 TEST(t_reftable_stack_update_index_check(), "update transactions with equal update indices");
1381 TEST(t_reftable_stack_uptodate(), "stack must be reloaded before ref update");
1382 TEST(t_suggest_compaction_segment(), "suggest_compaction_segment with basic input");
1383 TEST(t_suggest_compaction_segment_nothing(), "suggest_compaction_segment with pre-compacted input");