1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2013 Fusion IO. All rights reserved.
6 #include <linux/pagemap.h>
7 #include <linux/pagevec.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/sizes.h>
11 #include "btrfs-tests.h"
13 #include "../extent_io.h"
14 #include "../disk-io.h"
15 #include "../btrfs_inode.h"
17 #define PROCESS_UNLOCK (1 << 0)
18 #define PROCESS_RELEASE (1 << 1)
19 #define PROCESS_TEST_LOCKED (1 << 2)
21 static noinline
int process_page_range(struct inode
*inode
, u64 start
, u64 end
,
25 struct folio_batch fbatch
;
26 unsigned long index
= start
>> PAGE_SHIFT
;
27 unsigned long end_index
= end
>> PAGE_SHIFT
;
32 folio_batch_init(&fbatch
);
34 while (index
<= end_index
) {
35 ret
= filemap_get_folios_contig(inode
->i_mapping
, &index
,
37 for (i
= 0; i
< ret
; i
++) {
38 struct folio
*folio
= fbatch
.folios
[i
];
40 if (flags
& PROCESS_TEST_LOCKED
&&
41 !folio_test_locked(folio
))
43 if (flags
& PROCESS_UNLOCK
&& folio_test_locked(folio
))
45 if (flags
& PROCESS_RELEASE
)
48 folio_batch_release(&fbatch
);
53 "stuck in a loop, start %llu, end %llu, ret %d\n",
62 #define STATE_FLAG_STR_LEN 256
64 #define PRINT_ONE_FLAG(state, dest, cur, name) \
66 if (state->state & EXTENT_##name) \
67 cur += scnprintf(dest + cur, STATE_FLAG_STR_LEN - cur, \
68 "%s" #name, cur == 0 ? "" : "|"); \
71 static void extent_flag_to_str(const struct extent_state
*state
, char *dest
)
76 PRINT_ONE_FLAG(state
, dest
, cur
, DIRTY
);
77 PRINT_ONE_FLAG(state
, dest
, cur
, UPTODATE
);
78 PRINT_ONE_FLAG(state
, dest
, cur
, LOCKED
);
79 PRINT_ONE_FLAG(state
, dest
, cur
, NEW
);
80 PRINT_ONE_FLAG(state
, dest
, cur
, DELALLOC
);
81 PRINT_ONE_FLAG(state
, dest
, cur
, DEFRAG
);
82 PRINT_ONE_FLAG(state
, dest
, cur
, BOUNDARY
);
83 PRINT_ONE_FLAG(state
, dest
, cur
, NODATASUM
);
84 PRINT_ONE_FLAG(state
, dest
, cur
, CLEAR_META_RESV
);
85 PRINT_ONE_FLAG(state
, dest
, cur
, NEED_WAIT
);
86 PRINT_ONE_FLAG(state
, dest
, cur
, NORESERVE
);
87 PRINT_ONE_FLAG(state
, dest
, cur
, QGROUP_RESERVED
);
88 PRINT_ONE_FLAG(state
, dest
, cur
, CLEAR_DATA_RESV
);
91 static void dump_extent_io_tree(const struct extent_io_tree
*tree
)
94 char flags_str
[STATE_FLAG_STR_LEN
];
96 node
= rb_first(&tree
->state
);
97 test_msg("io tree content:");
99 struct extent_state
*state
;
101 state
= rb_entry(node
, struct extent_state
, rb_node
);
102 extent_flag_to_str(state
, flags_str
);
103 test_msg(" start=%llu len=%llu flags=%s", state
->start
,
104 state
->end
+ 1 - state
->start
, flags_str
);
105 node
= rb_next(node
);
109 static int test_find_delalloc(u32 sectorsize
, u32 nodesize
)
111 struct btrfs_fs_info
*fs_info
;
112 struct btrfs_root
*root
= NULL
;
113 struct inode
*inode
= NULL
;
114 struct extent_io_tree
*tmp
;
116 struct page
*locked_page
= NULL
;
117 unsigned long index
= 0;
118 /* In this test we need at least 2 file extents at its maximum size */
119 u64 max_bytes
= BTRFS_MAX_EXTENT_SIZE
;
120 u64 total_dirty
= 2 * max_bytes
;
121 u64 start
, end
, test_start
;
125 test_msg("running find delalloc tests");
127 fs_info
= btrfs_alloc_dummy_fs_info(nodesize
, sectorsize
);
129 test_std_err(TEST_ALLOC_FS_INFO
);
133 root
= btrfs_alloc_dummy_root(fs_info
);
135 test_std_err(TEST_ALLOC_ROOT
);
140 inode
= btrfs_new_test_inode();
142 test_std_err(TEST_ALLOC_INODE
);
146 tmp
= &BTRFS_I(inode
)->io_tree
;
147 BTRFS_I(inode
)->root
= root
;
150 * Passing NULL as we don't have fs_info but tracepoints are not used
153 extent_io_tree_init(NULL
, tmp
, IO_TREE_SELFTEST
);
156 * First go through and create and mark all of our pages dirty, we pin
157 * everything to make sure our pages don't get evicted and screw up our
160 for (index
= 0; index
< (total_dirty
>> PAGE_SHIFT
); index
++) {
161 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_KERNEL
);
163 test_err("failed to allocate test page");
176 /* Test this scenario
180 set_extent_bit(tmp
, 0, sectorsize
- 1, EXTENT_DELALLOC
, NULL
);
182 end
= start
+ PAGE_SIZE
- 1;
183 found
= find_lock_delalloc_range(inode
, page_folio(locked_page
), &start
,
186 test_err("should have found at least one delalloc");
189 if (start
!= 0 || end
!= (sectorsize
- 1)) {
190 test_err("expected start 0 end %u, got start %llu end %llu",
191 sectorsize
- 1, start
, end
);
194 unlock_extent(tmp
, start
, end
, NULL
);
195 unlock_page(locked_page
);
196 put_page(locked_page
);
205 locked_page
= find_lock_page(inode
->i_mapping
,
206 test_start
>> PAGE_SHIFT
);
208 test_err("couldn't find the locked page");
211 set_extent_bit(tmp
, sectorsize
, max_bytes
- 1, EXTENT_DELALLOC
, NULL
);
213 end
= start
+ PAGE_SIZE
- 1;
214 found
= find_lock_delalloc_range(inode
, page_folio(locked_page
), &start
,
217 test_err("couldn't find delalloc in our range");
220 if (start
!= test_start
|| end
!= max_bytes
- 1) {
221 test_err("expected start %llu end %llu, got start %llu, end %llu",
222 test_start
, max_bytes
- 1, start
, end
);
225 if (process_page_range(inode
, start
, end
,
226 PROCESS_TEST_LOCKED
| PROCESS_UNLOCK
)) {
227 test_err("there were unlocked pages in the range");
230 unlock_extent(tmp
, start
, end
, NULL
);
231 /* locked_page was unlocked above */
232 put_page(locked_page
);
239 test_start
= max_bytes
+ sectorsize
;
240 locked_page
= find_lock_page(inode
->i_mapping
, test_start
>>
243 test_err("couldn't find the locked page");
247 end
= start
+ PAGE_SIZE
- 1;
248 found
= find_lock_delalloc_range(inode
, page_folio(locked_page
), &start
,
251 test_err("found range when we shouldn't have");
254 if (end
!= test_start
+ PAGE_SIZE
- 1) {
255 test_err("did not return the proper end offset");
261 * [------- delalloc -------|
262 * [max_bytes]|-- search--|
264 * We are re-using our test_start from above since it works out well.
266 set_extent_bit(tmp
, max_bytes
, total_dirty
- 1, EXTENT_DELALLOC
, NULL
);
268 end
= start
+ PAGE_SIZE
- 1;
269 found
= find_lock_delalloc_range(inode
, page_folio(locked_page
), &start
,
272 test_err("didn't find our range");
275 if (start
!= test_start
|| end
!= total_dirty
- 1) {
276 test_err("expected start %llu end %llu, got start %llu end %llu",
277 test_start
, total_dirty
- 1, start
, end
);
280 if (process_page_range(inode
, start
, end
,
281 PROCESS_TEST_LOCKED
| PROCESS_UNLOCK
)) {
282 test_err("pages in range were not all locked");
285 unlock_extent(tmp
, start
, end
, NULL
);
288 * Now to test where we run into a page that is no longer dirty in the
289 * range we want to find.
291 page
= find_get_page(inode
->i_mapping
,
292 (max_bytes
+ SZ_1M
) >> PAGE_SHIFT
);
294 test_err("couldn't find our page");
297 ClearPageDirty(page
);
300 /* We unlocked it in the previous test */
301 lock_page(locked_page
);
303 end
= start
+ PAGE_SIZE
- 1;
305 * Currently if we fail to find dirty pages in the delalloc range we
306 * will adjust max_bytes down to PAGE_SIZE and then re-search. If
307 * this changes at any point in the future we will need to fix this
308 * tests expected behavior.
310 found
= find_lock_delalloc_range(inode
, page_folio(locked_page
), &start
,
313 test_err("didn't find our range");
316 if (start
!= test_start
&& end
!= test_start
+ PAGE_SIZE
- 1) {
317 test_err("expected start %llu end %llu, got start %llu end %llu",
318 test_start
, test_start
+ PAGE_SIZE
- 1, start
, end
);
321 if (process_page_range(inode
, start
, end
, PROCESS_TEST_LOCKED
|
323 test_err("pages in range were not all locked");
329 dump_extent_io_tree(tmp
);
330 clear_extent_bits(tmp
, 0, total_dirty
- 1, (unsigned)-1);
333 put_page(locked_page
);
334 process_page_range(inode
, 0, total_dirty
- 1,
335 PROCESS_UNLOCK
| PROCESS_RELEASE
);
337 btrfs_free_dummy_root(root
);
338 btrfs_free_dummy_fs_info(fs_info
);
342 static int check_eb_bitmap(unsigned long *bitmap
, struct extent_buffer
*eb
)
346 for (i
= 0; i
< eb
->len
* BITS_PER_BYTE
; i
++) {
349 bit
= !!test_bit(i
, bitmap
);
350 bit1
= !!extent_buffer_test_bit(eb
, 0, i
);
355 read_extent_buffer(eb
, &has
, i
/ BITS_PER_BYTE
, 1);
356 expect
= bitmap_get_value8(bitmap
, ALIGN(i
, BITS_PER_BYTE
));
359 "bits do not match, start byte 0 bit %lu, byte %lu has 0x%02x expect 0x%02x",
360 i
, i
/ BITS_PER_BYTE
, has
, expect
);
364 bit1
= !!extent_buffer_test_bit(eb
, i
/ BITS_PER_BYTE
,
370 read_extent_buffer(eb
, &has
, i
/ BITS_PER_BYTE
, 1);
371 expect
= bitmap_get_value8(bitmap
, ALIGN(i
, BITS_PER_BYTE
));
374 "bits do not match, start byte %lu bit %lu, byte %lu has 0x%02x expect 0x%02x",
375 i
/ BITS_PER_BYTE
, i
% BITS_PER_BYTE
,
376 i
/ BITS_PER_BYTE
, has
, expect
);
383 static int test_bitmap_set(const char *name
, unsigned long *bitmap
,
384 struct extent_buffer
*eb
,
385 unsigned long byte_start
, unsigned long bit_start
,
386 unsigned long bit_len
)
390 bitmap_set(bitmap
, byte_start
* BITS_PER_BYTE
+ bit_start
, bit_len
);
391 extent_buffer_bitmap_set(eb
, byte_start
, bit_start
, bit_len
);
392 ret
= check_eb_bitmap(bitmap
, eb
);
394 test_err("%s test failed", name
);
398 static int test_bitmap_clear(const char *name
, unsigned long *bitmap
,
399 struct extent_buffer
*eb
,
400 unsigned long byte_start
, unsigned long bit_start
,
401 unsigned long bit_len
)
405 bitmap_clear(bitmap
, byte_start
* BITS_PER_BYTE
+ bit_start
, bit_len
);
406 extent_buffer_bitmap_clear(eb
, byte_start
, bit_start
, bit_len
);
407 ret
= check_eb_bitmap(bitmap
, eb
);
409 test_err("%s test failed", name
);
412 static int __test_eb_bitmaps(unsigned long *bitmap
, struct extent_buffer
*eb
)
415 unsigned long byte_len
= eb
->len
;
419 ret
= test_bitmap_clear("clear all run 1", bitmap
, eb
, 0, 0,
420 byte_len
* BITS_PER_BYTE
);
424 ret
= test_bitmap_set("set all", bitmap
, eb
, 0, 0, byte_len
* BITS_PER_BYTE
);
428 ret
= test_bitmap_clear("clear all run 2", bitmap
, eb
, 0, 0,
429 byte_len
* BITS_PER_BYTE
);
433 ret
= test_bitmap_set("same byte set", bitmap
, eb
, 0, 2, 4);
437 ret
= test_bitmap_clear("same byte partial clear", bitmap
, eb
, 0, 4, 1);
441 ret
= test_bitmap_set("cross byte set", bitmap
, eb
, 2, 4, 8);
445 ret
= test_bitmap_set("cross multi byte set", bitmap
, eb
, 4, 4, 24);
449 ret
= test_bitmap_clear("cross byte clear", bitmap
, eb
, 2, 6, 4);
453 ret
= test_bitmap_clear("cross multi byte clear", bitmap
, eb
, 4, 6, 20);
457 /* Straddling pages test */
458 if (byte_len
> PAGE_SIZE
) {
459 ret
= test_bitmap_set("cross page set", bitmap
, eb
,
460 PAGE_SIZE
- sizeof(long) / 2, 0,
461 sizeof(long) * BITS_PER_BYTE
);
465 ret
= test_bitmap_set("cross page set all", bitmap
, eb
, 0, 0,
466 byte_len
* BITS_PER_BYTE
);
470 ret
= test_bitmap_clear("cross page clear", bitmap
, eb
,
471 PAGE_SIZE
- sizeof(long) / 2, 0,
472 sizeof(long) * BITS_PER_BYTE
);
478 * Generate a wonky pseudo-random bit pattern for the sake of not using
479 * something repetitive that could miss some hypothetical off-by-n bug.
482 ret
= test_bitmap_clear("clear all run 3", bitmap
, eb
, 0, 0,
483 byte_len
* BITS_PER_BYTE
);
487 for (i
= 0; i
< byte_len
* BITS_PER_BYTE
/ 32; i
++) {
488 x
= (0x19660dULL
* (u64
)x
+ 0x3c6ef35fULL
) & 0xffffffffU
;
489 for (j
= 0; j
< 32; j
++) {
491 bitmap_set(bitmap
, i
* 32 + j
, 1);
492 extent_buffer_bitmap_set(eb
, 0, i
* 32 + j
, 1);
497 ret
= check_eb_bitmap(bitmap
, eb
);
499 test_err("random bit pattern failed");
506 static int test_eb_bitmaps(u32 sectorsize
, u32 nodesize
)
508 struct btrfs_fs_info
*fs_info
;
509 unsigned long *bitmap
= NULL
;
510 struct extent_buffer
*eb
= NULL
;
513 test_msg("running extent buffer bitmap tests");
515 fs_info
= btrfs_alloc_dummy_fs_info(nodesize
, sectorsize
);
517 test_std_err(TEST_ALLOC_FS_INFO
);
521 bitmap
= kmalloc(nodesize
, GFP_KERNEL
);
523 test_err("couldn't allocate test bitmap");
528 eb
= __alloc_dummy_extent_buffer(fs_info
, 0, nodesize
);
530 test_std_err(TEST_ALLOC_ROOT
);
535 ret
= __test_eb_bitmaps(bitmap
, eb
);
539 free_extent_buffer(eb
);
542 * Test again for case where the tree block is sectorsize aligned but
543 * not nodesize aligned.
545 eb
= __alloc_dummy_extent_buffer(fs_info
, sectorsize
, nodesize
);
547 test_std_err(TEST_ALLOC_ROOT
);
552 ret
= __test_eb_bitmaps(bitmap
, eb
);
554 free_extent_buffer(eb
);
556 btrfs_free_dummy_fs_info(fs_info
);
560 static int test_find_first_clear_extent_bit(void)
562 struct extent_io_tree tree
;
566 test_msg("running find_first_clear_extent_bit test");
568 extent_io_tree_init(NULL
, &tree
, IO_TREE_SELFTEST
);
570 /* Test correct handling of empty tree */
571 find_first_clear_extent_bit(&tree
, 0, &start
, &end
, CHUNK_TRIMMED
);
572 if (start
!= 0 || end
!= -1) {
574 "error getting a range from completely empty tree: start %llu end %llu",
579 * Set 1M-4M alloc/discard and 32M-64M thus leaving a hole between
582 set_extent_bit(&tree
, SZ_1M
, SZ_4M
- 1,
583 CHUNK_TRIMMED
| CHUNK_ALLOCATED
, NULL
);
585 find_first_clear_extent_bit(&tree
, SZ_512K
, &start
, &end
,
586 CHUNK_TRIMMED
| CHUNK_ALLOCATED
);
588 if (start
!= 0 || end
!= SZ_1M
- 1) {
589 test_err("error finding beginning range: start %llu end %llu",
594 /* Now add 32M-64M so that we have a hole between 4M-32M */
595 set_extent_bit(&tree
, SZ_32M
, SZ_64M
- 1,
596 CHUNK_TRIMMED
| CHUNK_ALLOCATED
, NULL
);
599 * Request first hole starting at 12M, we should get 4M-32M
601 find_first_clear_extent_bit(&tree
, 12 * SZ_1M
, &start
, &end
,
602 CHUNK_TRIMMED
| CHUNK_ALLOCATED
);
604 if (start
!= SZ_4M
|| end
!= SZ_32M
- 1) {
605 test_err("error finding trimmed range: start %llu end %llu",
611 * Search in the middle of allocated range, should get the next one
612 * available, which happens to be unallocated -> 4M-32M
614 find_first_clear_extent_bit(&tree
, SZ_2M
, &start
, &end
,
615 CHUNK_TRIMMED
| CHUNK_ALLOCATED
);
617 if (start
!= SZ_4M
|| end
!= SZ_32M
- 1) {
618 test_err("error finding next unalloc range: start %llu end %llu",
624 * Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag
625 * being unset in this range, we should get the entry in range 64M-72M
627 set_extent_bit(&tree
, SZ_64M
, SZ_64M
+ SZ_8M
- 1, CHUNK_ALLOCATED
, NULL
);
628 find_first_clear_extent_bit(&tree
, SZ_64M
+ SZ_1M
, &start
, &end
,
631 if (start
!= SZ_64M
|| end
!= SZ_64M
+ SZ_8M
- 1) {
632 test_err("error finding exact range: start %llu end %llu",
637 find_first_clear_extent_bit(&tree
, SZ_64M
- SZ_8M
, &start
, &end
,
641 * Search in the middle of set range whose immediate neighbour doesn't
642 * have the bits set so it must be returned
644 if (start
!= SZ_64M
|| end
!= SZ_64M
+ SZ_8M
- 1) {
645 test_err("error finding next alloc range: start %llu end %llu",
651 * Search beyond any known range, shall return after last known range
652 * and end should be -1
654 find_first_clear_extent_bit(&tree
, -1, &start
, &end
, CHUNK_TRIMMED
);
655 if (start
!= SZ_64M
+ SZ_8M
|| end
!= -1) {
657 "error handling beyond end of range search: start %llu end %llu",
665 dump_extent_io_tree(&tree
);
666 clear_extent_bits(&tree
, 0, (u64
)-1, CHUNK_TRIMMED
| CHUNK_ALLOCATED
);
671 static void dump_eb_and_memory_contents(struct extent_buffer
*eb
, void *memory
,
672 const char *test_name
)
674 for (int i
= 0; i
< eb
->len
; i
++) {
675 struct page
*page
= folio_page(eb
->folios
[i
>> PAGE_SHIFT
], 0);
676 void *addr
= page_address(page
) + offset_in_page(i
);
678 if (memcmp(addr
, memory
+ i
, 1) != 0) {
679 test_err("%s failed", test_name
);
680 test_err("eb and memory diffs at byte %u, eb has 0x%02x memory has 0x%02x",
681 i
, *(u8
*)addr
, *(u8
*)(memory
+ i
));
687 static int verify_eb_and_memory(struct extent_buffer
*eb
, void *memory
,
688 const char *test_name
)
690 for (int i
= 0; i
< (eb
->len
>> PAGE_SHIFT
); i
++) {
691 void *eb_addr
= folio_address(eb
->folios
[i
]);
693 if (memcmp(memory
+ (i
<< PAGE_SHIFT
), eb_addr
, PAGE_SIZE
) != 0) {
694 dump_eb_and_memory_contents(eb
, memory
, test_name
);
702 * Init both memory and extent buffer contents to the same randomly generated
705 static void init_eb_and_memory(struct extent_buffer
*eb
, void *memory
)
707 get_random_bytes(memory
, eb
->len
);
708 write_extent_buffer(eb
, memory
, 0, eb
->len
);
711 static int test_eb_mem_ops(u32 sectorsize
, u32 nodesize
)
713 struct btrfs_fs_info
*fs_info
;
714 struct extent_buffer
*eb
= NULL
;
718 test_msg("running extent buffer memory operation tests");
720 fs_info
= btrfs_alloc_dummy_fs_info(nodesize
, sectorsize
);
722 test_std_err(TEST_ALLOC_FS_INFO
);
726 memory
= kvzalloc(nodesize
, GFP_KERNEL
);
728 test_err("failed to allocate memory");
733 eb
= __alloc_dummy_extent_buffer(fs_info
, SZ_1M
, nodesize
);
735 test_std_err(TEST_ALLOC_EXTENT_BUFFER
);
740 init_eb_and_memory(eb
, memory
);
741 ret
= verify_eb_and_memory(eb
, memory
, "full eb write");
745 memcpy(memory
, memory
+ 16, 16);
746 memcpy_extent_buffer(eb
, 0, 16, 16);
747 ret
= verify_eb_and_memory(eb
, memory
, "same page non-overlapping memcpy 1");
751 memcpy(memory
, memory
+ 2048, 16);
752 memcpy_extent_buffer(eb
, 0, 2048, 16);
753 ret
= verify_eb_and_memory(eb
, memory
, "same page non-overlapping memcpy 2");
756 memcpy(memory
, memory
+ 2048, 2048);
757 memcpy_extent_buffer(eb
, 0, 2048, 2048);
758 ret
= verify_eb_and_memory(eb
, memory
, "same page non-overlapping memcpy 3");
762 memmove(memory
+ 512, memory
+ 256, 512);
763 memmove_extent_buffer(eb
, 512, 256, 512);
764 ret
= verify_eb_and_memory(eb
, memory
, "same page overlapping memcpy 1");
768 memmove(memory
+ 2048, memory
+ 512, 2048);
769 memmove_extent_buffer(eb
, 2048, 512, 2048);
770 ret
= verify_eb_and_memory(eb
, memory
, "same page overlapping memcpy 2");
773 memmove(memory
+ 512, memory
+ 2048, 2048);
774 memmove_extent_buffer(eb
, 512, 2048, 2048);
775 ret
= verify_eb_and_memory(eb
, memory
, "same page overlapping memcpy 3");
779 if (nodesize
> PAGE_SIZE
) {
780 memcpy(memory
, memory
+ 4096 - 128, 256);
781 memcpy_extent_buffer(eb
, 0, 4096 - 128, 256);
782 ret
= verify_eb_and_memory(eb
, memory
, "cross page non-overlapping memcpy 1");
786 memcpy(memory
+ 4096 - 128, memory
+ 4096 + 128, 256);
787 memcpy_extent_buffer(eb
, 4096 - 128, 4096 + 128, 256);
788 ret
= verify_eb_and_memory(eb
, memory
, "cross page non-overlapping memcpy 2");
792 memmove(memory
+ 4096 - 128, memory
+ 4096 - 64, 256);
793 memmove_extent_buffer(eb
, 4096 - 128, 4096 - 64, 256);
794 ret
= verify_eb_and_memory(eb
, memory
, "cross page overlapping memcpy 1");
798 memmove(memory
+ 4096 - 64, memory
+ 4096 - 128, 256);
799 memmove_extent_buffer(eb
, 4096 - 64, 4096 - 128, 256);
800 ret
= verify_eb_and_memory(eb
, memory
, "cross page overlapping memcpy 2");
805 free_extent_buffer(eb
);
807 btrfs_free_dummy_fs_info(fs_info
);
811 int btrfs_test_extent_io(u32 sectorsize
, u32 nodesize
)
815 test_msg("running extent I/O tests");
817 ret
= test_find_delalloc(sectorsize
, nodesize
);
821 ret
= test_find_first_clear_extent_bit();
825 ret
= test_eb_bitmaps(sectorsize
, nodesize
);
829 ret
= test_eb_mem_ops(sectorsize
, nodesize
);