1 // SPDX-License-Identifier: GPL-2.0
2 #ifdef CONFIG_BCACHEFS_TESTS
5 #include "btree_update.h"
6 #include "journal_reclaim.h"
10 #include "linux/kthread.h"
11 #include "linux/random.h"
13 static void delete_test_keys(struct bch_fs
*c
)
17 ret
= bch2_btree_delete_range(c
, BTREE_ID_extents
,
23 ret
= bch2_btree_delete_range(c
, BTREE_ID_xattrs
,
32 static int test_delete(struct bch_fs
*c
, u64 nr
)
34 struct btree_trans
*trans
= bch2_trans_get(c
);
35 struct btree_iter iter
;
36 struct bkey_i_cookie k
;
39 bkey_cookie_init(&k
.k_i
);
40 k
.k
.p
.snapshot
= U32_MAX
;
42 bch2_trans_iter_init(trans
, &iter
, BTREE_ID_xattrs
, k
.k
.p
,
45 ret
= commit_do(trans
, NULL
, NULL
, 0,
46 bch2_btree_iter_traverse(&iter
) ?:
47 bch2_trans_update(trans
, &iter
, &k
.k_i
, 0));
48 bch_err_msg(c
, ret
, "update error");
52 pr_info("deleting once");
53 ret
= commit_do(trans
, NULL
, NULL
, 0,
54 bch2_btree_iter_traverse(&iter
) ?:
55 bch2_btree_delete_at(trans
, &iter
, 0));
56 bch_err_msg(c
, ret
, "delete error (first)");
60 pr_info("deleting twice");
61 ret
= commit_do(trans
, NULL
, NULL
, 0,
62 bch2_btree_iter_traverse(&iter
) ?:
63 bch2_btree_delete_at(trans
, &iter
, 0));
64 bch_err_msg(c
, ret
, "delete error (second)");
68 bch2_trans_iter_exit(trans
, &iter
);
69 bch2_trans_put(trans
);
73 static int test_delete_written(struct bch_fs
*c
, u64 nr
)
75 struct btree_trans
*trans
= bch2_trans_get(c
);
76 struct btree_iter iter
;
77 struct bkey_i_cookie k
;
80 bkey_cookie_init(&k
.k_i
);
81 k
.k
.p
.snapshot
= U32_MAX
;
83 bch2_trans_iter_init(trans
, &iter
, BTREE_ID_xattrs
, k
.k
.p
,
86 ret
= commit_do(trans
, NULL
, NULL
, 0,
87 bch2_btree_iter_traverse(&iter
) ?:
88 bch2_trans_update(trans
, &iter
, &k
.k_i
, 0));
89 bch_err_msg(c
, ret
, "update error");
93 bch2_trans_unlock(trans
);
94 bch2_journal_flush_all_pins(&c
->journal
);
96 ret
= commit_do(trans
, NULL
, NULL
, 0,
97 bch2_btree_iter_traverse(&iter
) ?:
98 bch2_btree_delete_at(trans
, &iter
, 0));
99 bch_err_msg(c
, ret
, "delete error");
103 bch2_trans_iter_exit(trans
, &iter
);
104 bch2_trans_put(trans
);
108 static int test_iterate(struct bch_fs
*c
, u64 nr
)
115 pr_info("inserting test keys");
117 for (i
= 0; i
< nr
; i
++) {
118 struct bkey_i_cookie ck
;
120 bkey_cookie_init(&ck
.k_i
);
122 ck
.k
.p
.snapshot
= U32_MAX
;
124 ret
= bch2_btree_insert(c
, BTREE_ID_xattrs
, &ck
.k_i
, NULL
, 0, 0);
125 bch_err_msg(c
, ret
, "insert error");
130 pr_info("iterating forwards");
133 ret
= bch2_trans_run(c
,
134 for_each_btree_key_upto(trans
, iter
, BTREE_ID_xattrs
,
135 SPOS(0, 0, U32_MAX
), POS(0, U64_MAX
),
137 BUG_ON(k
.k
->p
.offset
!= i
++);
140 bch_err_msg(c
, ret
, "error iterating forwards");
146 pr_info("iterating backwards");
148 ret
= bch2_trans_run(c
,
149 for_each_btree_key_reverse(trans
, iter
, BTREE_ID_xattrs
,
150 SPOS(0, U64_MAX
, U32_MAX
), 0, k
, ({
151 BUG_ON(k
.k
->p
.offset
!= --i
);
154 bch_err_msg(c
, ret
, "error iterating backwards");
162 static int test_iterate_extents(struct bch_fs
*c
, u64 nr
)
169 pr_info("inserting test extents");
171 for (i
= 0; i
< nr
; i
+= 8) {
172 struct bkey_i_cookie ck
;
174 bkey_cookie_init(&ck
.k_i
);
175 ck
.k
.p
.offset
= i
+ 8;
176 ck
.k
.p
.snapshot
= U32_MAX
;
179 ret
= bch2_btree_insert(c
, BTREE_ID_extents
, &ck
.k_i
, NULL
, 0, 0);
180 bch_err_msg(c
, ret
, "insert error");
185 pr_info("iterating forwards");
188 ret
= bch2_trans_run(c
,
189 for_each_btree_key_upto(trans
, iter
, BTREE_ID_extents
,
190 SPOS(0, 0, U32_MAX
), POS(0, U64_MAX
),
192 BUG_ON(bkey_start_offset(k
.k
) != i
);
196 bch_err_msg(c
, ret
, "error iterating forwards");
202 pr_info("iterating backwards");
204 ret
= bch2_trans_run(c
,
205 for_each_btree_key_reverse(trans
, iter
, BTREE_ID_extents
,
206 SPOS(0, U64_MAX
, U32_MAX
), 0, k
, ({
207 BUG_ON(k
.k
->p
.offset
!= i
);
208 i
= bkey_start_offset(k
.k
);
211 bch_err_msg(c
, ret
, "error iterating backwards");
219 static int test_iterate_slots(struct bch_fs
*c
, u64 nr
)
226 pr_info("inserting test keys");
228 for (i
= 0; i
< nr
; i
++) {
229 struct bkey_i_cookie ck
;
231 bkey_cookie_init(&ck
.k_i
);
232 ck
.k
.p
.offset
= i
* 2;
233 ck
.k
.p
.snapshot
= U32_MAX
;
235 ret
= bch2_btree_insert(c
, BTREE_ID_xattrs
, &ck
.k_i
, NULL
, 0, 0);
236 bch_err_msg(c
, ret
, "insert error");
241 pr_info("iterating forwards");
244 ret
= bch2_trans_run(c
,
245 for_each_btree_key_upto(trans
, iter
, BTREE_ID_xattrs
,
246 SPOS(0, 0, U32_MAX
), POS(0, U64_MAX
),
248 BUG_ON(k
.k
->p
.offset
!= i
);
252 bch_err_msg(c
, ret
, "error iterating forwards");
258 pr_info("iterating forwards by slots");
261 ret
= bch2_trans_run(c
,
262 for_each_btree_key_upto(trans
, iter
, BTREE_ID_xattrs
,
263 SPOS(0, 0, U32_MAX
), POS(0, U64_MAX
),
264 BTREE_ITER_slots
, k
, ({
268 BUG_ON(k
.k
->p
.offset
!= i
);
269 BUG_ON(bkey_deleted(k
.k
) != (i
& 1));
274 bch_err_msg(c
, ret
, "error iterating forwards by slots");
278 static int test_iterate_slots_extents(struct bch_fs
*c
, u64 nr
)
285 pr_info("inserting test keys");
287 for (i
= 0; i
< nr
; i
+= 16) {
288 struct bkey_i_cookie ck
;
290 bkey_cookie_init(&ck
.k_i
);
291 ck
.k
.p
.offset
= i
+ 16;
292 ck
.k
.p
.snapshot
= U32_MAX
;
295 ret
= bch2_btree_insert(c
, BTREE_ID_extents
, &ck
.k_i
, NULL
, 0, 0);
296 bch_err_msg(c
, ret
, "insert error");
301 pr_info("iterating forwards");
304 ret
= bch2_trans_run(c
,
305 for_each_btree_key_upto(trans
, iter
, BTREE_ID_extents
,
306 SPOS(0, 0, U32_MAX
), POS(0, U64_MAX
),
308 BUG_ON(bkey_start_offset(k
.k
) != i
+ 8);
309 BUG_ON(k
.k
->size
!= 8);
313 bch_err_msg(c
, ret
, "error iterating forwards");
319 pr_info("iterating forwards by slots");
322 ret
= bch2_trans_run(c
,
323 for_each_btree_key_upto(trans
, iter
, BTREE_ID_extents
,
324 SPOS(0, 0, U32_MAX
), POS(0, U64_MAX
),
325 BTREE_ITER_slots
, k
, ({
328 BUG_ON(bkey_deleted(k
.k
) != !(i
% 16));
330 BUG_ON(bkey_start_offset(k
.k
) != i
);
331 BUG_ON(k
.k
->size
!= 8);
335 bch_err_msg(c
, ret
, "error iterating forwards by slots");
340 * XXX: we really want to make sure we've got a btree with depth > 0 for these
343 static int test_peek_end(struct bch_fs
*c
, u64 nr
)
345 struct btree_trans
*trans
= bch2_trans_get(c
);
346 struct btree_iter iter
;
349 bch2_trans_iter_init(trans
, &iter
, BTREE_ID_xattrs
,
350 SPOS(0, 0, U32_MAX
), 0);
352 lockrestart_do(trans
, bkey_err(k
= bch2_btree_iter_peek_upto(&iter
, POS(0, U64_MAX
))));
355 lockrestart_do(trans
, bkey_err(k
= bch2_btree_iter_peek_upto(&iter
, POS(0, U64_MAX
))));
358 bch2_trans_iter_exit(trans
, &iter
);
359 bch2_trans_put(trans
);
363 static int test_peek_end_extents(struct bch_fs
*c
, u64 nr
)
365 struct btree_trans
*trans
= bch2_trans_get(c
);
366 struct btree_iter iter
;
369 bch2_trans_iter_init(trans
, &iter
, BTREE_ID_extents
,
370 SPOS(0, 0, U32_MAX
), 0);
372 lockrestart_do(trans
, bkey_err(k
= bch2_btree_iter_peek_upto(&iter
, POS(0, U64_MAX
))));
375 lockrestart_do(trans
, bkey_err(k
= bch2_btree_iter_peek_upto(&iter
, POS(0, U64_MAX
))));
378 bch2_trans_iter_exit(trans
, &iter
);
379 bch2_trans_put(trans
);
383 /* extent unit tests */
385 static u64 test_version
;
387 static int insert_test_extent(struct bch_fs
*c
,
390 struct bkey_i_cookie k
;
393 bkey_cookie_init(&k
.k_i
);
394 k
.k_i
.k
.p
.offset
= end
;
395 k
.k_i
.k
.p
.snapshot
= U32_MAX
;
396 k
.k_i
.k
.size
= end
- start
;
397 k
.k_i
.k
.bversion
.lo
= test_version
++;
399 ret
= bch2_btree_insert(c
, BTREE_ID_extents
, &k
.k_i
, NULL
, 0, 0);
404 static int __test_extent_overwrite(struct bch_fs
*c
,
405 u64 e1_start
, u64 e1_end
,
406 u64 e2_start
, u64 e2_end
)
410 ret
= insert_test_extent(c
, e1_start
, e1_end
) ?:
411 insert_test_extent(c
, e2_start
, e2_end
);
417 static int test_extent_overwrite_front(struct bch_fs
*c
, u64 nr
)
419 return __test_extent_overwrite(c
, 0, 64, 0, 32) ?:
420 __test_extent_overwrite(c
, 8, 64, 0, 32);
423 static int test_extent_overwrite_back(struct bch_fs
*c
, u64 nr
)
425 return __test_extent_overwrite(c
, 0, 64, 32, 64) ?:
426 __test_extent_overwrite(c
, 0, 64, 32, 72);
429 static int test_extent_overwrite_middle(struct bch_fs
*c
, u64 nr
)
431 return __test_extent_overwrite(c
, 0, 64, 32, 40);
434 static int test_extent_overwrite_all(struct bch_fs
*c
, u64 nr
)
436 return __test_extent_overwrite(c
, 32, 64, 0, 64) ?:
437 __test_extent_overwrite(c
, 32, 64, 0, 128) ?:
438 __test_extent_overwrite(c
, 32, 64, 32, 64) ?:
439 __test_extent_overwrite(c
, 32, 64, 32, 128);
442 static int insert_test_overlapping_extent(struct bch_fs
*c
, u64 inum
, u64 start
, u32 len
, u32 snapid
)
444 struct bkey_i_cookie k
;
447 bkey_cookie_init(&k
.k_i
);
448 k
.k_i
.k
.p
.inode
= inum
;
449 k
.k_i
.k
.p
.offset
= start
+ len
;
450 k
.k_i
.k
.p
.snapshot
= snapid
;
453 ret
= bch2_trans_commit_do(c
, NULL
, NULL
, 0,
454 bch2_btree_insert_nonextent(trans
, BTREE_ID_extents
, &k
.k_i
,
455 BTREE_UPDATE_internal_snapshot_node
));
460 static int test_extent_create_overlapping(struct bch_fs
*c
, u64 inum
)
462 return insert_test_overlapping_extent(c
, inum
, 0, 16, U32_MAX
- 2) ?: /* overwrite entire */
463 insert_test_overlapping_extent(c
, inum
, 2, 8, U32_MAX
- 2) ?:
464 insert_test_overlapping_extent(c
, inum
, 4, 4, U32_MAX
) ?:
465 insert_test_overlapping_extent(c
, inum
, 32, 8, U32_MAX
- 2) ?: /* overwrite front/back */
466 insert_test_overlapping_extent(c
, inum
, 36, 8, U32_MAX
) ?:
467 insert_test_overlapping_extent(c
, inum
, 60, 8, U32_MAX
- 2) ?:
468 insert_test_overlapping_extent(c
, inum
, 64, 8, U32_MAX
);
471 /* snapshot unit tests */
473 /* Test skipping over keys in unrelated snapshots: */
474 static int test_snapshot_filter(struct bch_fs
*c
, u32 snapid_lo
, u32 snapid_hi
)
476 struct btree_trans
*trans
;
477 struct btree_iter iter
;
479 struct bkey_i_cookie cookie
;
482 bkey_cookie_init(&cookie
.k_i
);
483 cookie
.k
.p
.snapshot
= snapid_hi
;
484 ret
= bch2_btree_insert(c
, BTREE_ID_xattrs
, &cookie
.k_i
, NULL
, 0, 0);
488 trans
= bch2_trans_get(c
);
489 bch2_trans_iter_init(trans
, &iter
, BTREE_ID_xattrs
,
490 SPOS(0, 0, snapid_lo
), 0);
491 lockrestart_do(trans
, bkey_err(k
= bch2_btree_iter_peek_upto(&iter
, POS(0, U64_MAX
))));
493 BUG_ON(k
.k
->p
.snapshot
!= U32_MAX
);
495 bch2_trans_iter_exit(trans
, &iter
);
496 bch2_trans_put(trans
);
500 static int test_snapshots(struct bch_fs
*c
, u64 nr
)
502 struct bkey_i_cookie cookie
;
504 u32 snapid_subvols
[2] = { 1, 1 };
507 bkey_cookie_init(&cookie
.k_i
);
508 cookie
.k
.p
.snapshot
= U32_MAX
;
509 ret
= bch2_btree_insert(c
, BTREE_ID_xattrs
, &cookie
.k_i
, NULL
, 0, 0);
513 ret
= bch2_trans_commit_do(c
, NULL
, NULL
, 0,
514 bch2_snapshot_node_create(trans
, U32_MAX
,
521 if (snapids
[0] > snapids
[1])
522 swap(snapids
[0], snapids
[1]);
524 ret
= test_snapshot_filter(c
, snapids
[0], snapids
[1]);
525 bch_err_msg(c
, ret
, "from test_snapshot_filter");
531 static u64
test_rand(void)
535 get_random_bytes(&v
, sizeof(v
));
539 static int rand_insert(struct bch_fs
*c
, u64 nr
)
541 struct btree_trans
*trans
= bch2_trans_get(c
);
542 struct bkey_i_cookie k
;
546 for (i
= 0; i
< nr
; i
++) {
547 bkey_cookie_init(&k
.k_i
);
548 k
.k
.p
.offset
= test_rand();
549 k
.k
.p
.snapshot
= U32_MAX
;
551 ret
= commit_do(trans
, NULL
, NULL
, 0,
552 bch2_btree_insert_trans(trans
, BTREE_ID_xattrs
, &k
.k_i
, 0));
557 bch2_trans_put(trans
);
561 static int rand_insert_multi(struct bch_fs
*c
, u64 nr
)
563 struct btree_trans
*trans
= bch2_trans_get(c
);
564 struct bkey_i_cookie k
[8];
569 for (i
= 0; i
< nr
; i
+= ARRAY_SIZE(k
)) {
570 for (j
= 0; j
< ARRAY_SIZE(k
); j
++) {
571 bkey_cookie_init(&k
[j
].k_i
);
572 k
[j
].k
.p
.offset
= test_rand();
573 k
[j
].k
.p
.snapshot
= U32_MAX
;
576 ret
= commit_do(trans
, NULL
, NULL
, 0,
577 bch2_btree_insert_trans(trans
, BTREE_ID_xattrs
, &k
[0].k_i
, 0) ?:
578 bch2_btree_insert_trans(trans
, BTREE_ID_xattrs
, &k
[1].k_i
, 0) ?:
579 bch2_btree_insert_trans(trans
, BTREE_ID_xattrs
, &k
[2].k_i
, 0) ?:
580 bch2_btree_insert_trans(trans
, BTREE_ID_xattrs
, &k
[3].k_i
, 0) ?:
581 bch2_btree_insert_trans(trans
, BTREE_ID_xattrs
, &k
[4].k_i
, 0) ?:
582 bch2_btree_insert_trans(trans
, BTREE_ID_xattrs
, &k
[5].k_i
, 0) ?:
583 bch2_btree_insert_trans(trans
, BTREE_ID_xattrs
, &k
[6].k_i
, 0) ?:
584 bch2_btree_insert_trans(trans
, BTREE_ID_xattrs
, &k
[7].k_i
, 0));
589 bch2_trans_put(trans
);
593 static int rand_lookup(struct bch_fs
*c
, u64 nr
)
595 struct btree_trans
*trans
= bch2_trans_get(c
);
596 struct btree_iter iter
;
601 bch2_trans_iter_init(trans
, &iter
, BTREE_ID_xattrs
,
602 SPOS(0, 0, U32_MAX
), 0);
604 for (i
= 0; i
< nr
; i
++) {
605 bch2_btree_iter_set_pos(&iter
, SPOS(0, test_rand(), U32_MAX
));
607 lockrestart_do(trans
, bkey_err(k
= bch2_btree_iter_peek(&iter
)));
613 bch2_trans_iter_exit(trans
, &iter
);
614 bch2_trans_put(trans
);
618 static int rand_mixed_trans(struct btree_trans
*trans
,
619 struct btree_iter
*iter
,
620 struct bkey_i_cookie
*cookie
,
626 bch2_btree_iter_set_pos(iter
, SPOS(0, pos
, U32_MAX
));
628 k
= bch2_btree_iter_peek(iter
);
630 bch_err_msg(trans
->c
, ret
, "lookup error");
634 if (!(i
& 3) && k
.k
) {
635 bkey_cookie_init(&cookie
->k_i
);
636 cookie
->k
.p
= iter
->pos
;
637 ret
= bch2_trans_update(trans
, iter
, &cookie
->k_i
, 0);
643 static int rand_mixed(struct bch_fs
*c
, u64 nr
)
645 struct btree_trans
*trans
= bch2_trans_get(c
);
646 struct btree_iter iter
;
647 struct bkey_i_cookie cookie
;
651 bch2_trans_iter_init(trans
, &iter
, BTREE_ID_xattrs
,
652 SPOS(0, 0, U32_MAX
), 0);
654 for (i
= 0; i
< nr
; i
++) {
656 ret
= commit_do(trans
, NULL
, NULL
, 0,
657 rand_mixed_trans(trans
, &iter
, &cookie
, i
, rand
));
662 bch2_trans_iter_exit(trans
, &iter
);
663 bch2_trans_put(trans
);
667 static int __do_delete(struct btree_trans
*trans
, struct bpos pos
)
669 struct btree_iter iter
;
673 bch2_trans_iter_init(trans
, &iter
, BTREE_ID_xattrs
, pos
,
675 k
= bch2_btree_iter_peek_upto(&iter
, POS(0, U64_MAX
));
683 ret
= bch2_btree_delete_at(trans
, &iter
, 0);
685 bch2_trans_iter_exit(trans
, &iter
);
689 static int rand_delete(struct bch_fs
*c
, u64 nr
)
691 struct btree_trans
*trans
= bch2_trans_get(c
);
695 for (i
= 0; i
< nr
; i
++) {
696 struct bpos pos
= SPOS(0, test_rand(), U32_MAX
);
698 ret
= commit_do(trans
, NULL
, NULL
, 0,
699 __do_delete(trans
, pos
));
704 bch2_trans_put(trans
);
708 static int seq_insert(struct bch_fs
*c
, u64 nr
)
710 struct bkey_i_cookie insert
;
712 bkey_cookie_init(&insert
.k_i
);
714 return bch2_trans_run(c
,
715 for_each_btree_key_commit(trans
, iter
, BTREE_ID_xattrs
,
717 BTREE_ITER_slots
|BTREE_ITER_intent
, k
,
719 if (iter
.pos
.offset
>= nr
)
721 insert
.k
.p
= iter
.pos
;
722 bch2_trans_update(trans
, &iter
, &insert
.k_i
, 0);
726 static int seq_lookup(struct bch_fs
*c
, u64 nr
)
728 return bch2_trans_run(c
,
729 for_each_btree_key_upto(trans
, iter
, BTREE_ID_xattrs
,
730 SPOS(0, 0, U32_MAX
), POS(0, U64_MAX
),
735 static int seq_overwrite(struct bch_fs
*c
, u64 nr
)
737 return bch2_trans_run(c
,
738 for_each_btree_key_commit(trans
, iter
, BTREE_ID_xattrs
,
740 BTREE_ITER_intent
, k
,
742 struct bkey_i_cookie u
;
744 bkey_reassemble(&u
.k_i
, k
);
745 bch2_trans_update(trans
, &iter
, &u
.k_i
, 0);
749 static int seq_delete(struct bch_fs
*c
, u64 nr
)
751 return bch2_btree_delete_range(c
, BTREE_ID_xattrs
,
757 typedef int (*perf_test_fn
)(struct bch_fs
*, u64
);
766 wait_queue_head_t ready_wait
;
769 struct completion done_completion
;
776 static int btree_perf_test_thread(void *data
)
778 struct test_job
*j
= data
;
781 if (atomic_dec_and_test(&j
->ready
)) {
782 wake_up(&j
->ready_wait
);
783 j
->start
= sched_clock();
785 wait_event(j
->ready_wait
, !atomic_read(&j
->ready
));
788 ret
= j
->fn(j
->c
, div64_u64(j
->nr
, j
->nr_threads
));
790 bch_err(j
->c
, "%ps: error %s", j
->fn
, bch2_err_str(ret
));
794 if (atomic_dec_and_test(&j
->done
)) {
795 j
->finish
= sched_clock();
796 complete(&j
->done_completion
);
802 int bch2_btree_perf_test(struct bch_fs
*c
, const char *testname
,
803 u64 nr
, unsigned nr_threads
)
805 struct test_job j
= { .c
= c
, .nr
= nr
, .nr_threads
= nr_threads
};
807 struct printbuf nr_buf
= PRINTBUF
;
808 struct printbuf per_sec_buf
= PRINTBUF
;
812 if (nr
== 0 || nr_threads
== 0) {
813 pr_err("nr of iterations or threads is not allowed to be 0");
817 atomic_set(&j
.ready
, nr_threads
);
818 init_waitqueue_head(&j
.ready_wait
);
820 atomic_set(&j
.done
, nr_threads
);
821 init_completion(&j
.done_completion
);
823 #define perf_test(_test) \
824 if (!strcmp(testname, #_test)) j.fn = _test
826 perf_test(rand_insert
);
827 perf_test(rand_insert_multi
);
828 perf_test(rand_lookup
);
829 perf_test(rand_mixed
);
830 perf_test(rand_delete
);
832 perf_test(seq_insert
);
833 perf_test(seq_lookup
);
834 perf_test(seq_overwrite
);
835 perf_test(seq_delete
);
837 /* a unit test, not a perf test: */
838 perf_test(test_delete
);
839 perf_test(test_delete_written
);
840 perf_test(test_iterate
);
841 perf_test(test_iterate_extents
);
842 perf_test(test_iterate_slots
);
843 perf_test(test_iterate_slots_extents
);
844 perf_test(test_peek_end
);
845 perf_test(test_peek_end_extents
);
847 perf_test(test_extent_overwrite_front
);
848 perf_test(test_extent_overwrite_back
);
849 perf_test(test_extent_overwrite_middle
);
850 perf_test(test_extent_overwrite_all
);
851 perf_test(test_extent_create_overlapping
);
853 perf_test(test_snapshots
);
856 pr_err("unknown test %s", testname
);
860 //pr_info("running test %s:", testname);
863 btree_perf_test_thread(&j
);
865 for (i
= 0; i
< nr_threads
; i
++)
866 kthread_run(btree_perf_test_thread
, &j
,
867 "bcachefs perf test[%u]", i
);
869 while (wait_for_completion_interruptible(&j
.done_completion
))
872 time
= j
.finish
- j
.start
;
874 scnprintf(name_buf
, sizeof(name_buf
), "%s:", testname
);
875 prt_human_readable_u64(&nr_buf
, nr
);
876 prt_human_readable_u64(&per_sec_buf
, div64_u64(nr
* NSEC_PER_SEC
, time
));
877 printk(KERN_INFO
"%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
878 name_buf
, nr_buf
.buf
, nr_threads
,
879 div_u64(time
, NSEC_PER_SEC
),
880 div_u64(time
* nr_threads
, nr
),
882 printbuf_exit(&per_sec_buf
);
883 printbuf_exit(&nr_buf
);
887 #endif /* CONFIG_BCACHEFS_TESTS */