1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Oracle. All rights reserved.
6 #include <linux/types.h>
7 #include "btrfs-tests.h"
9 #include "../volumes.h"
10 #include "../disk-io.h"
11 #include "../block-group.h"
13 static void free_extent_map_tree(struct extent_map_tree
*em_tree
)
15 struct extent_map
*em
;
18 while (!RB_EMPTY_ROOT(&em_tree
->map
.rb_root
)) {
19 node
= rb_first_cached(&em_tree
->map
);
20 em
= rb_entry(node
, struct extent_map
, rb_node
);
21 remove_extent_mapping(em_tree
, em
);
23 #ifdef CONFIG_BTRFS_DEBUG
24 if (refcount_read(&em
->refs
) != 1) {
26 "em leak: em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx) refs %d",
27 em
->start
, em
->len
, em
->block_start
,
28 em
->block_len
, refcount_read(&em
->refs
));
30 refcount_set(&em
->refs
, 1);
40 * Suppose that no extent map has been loaded into memory yet, there is a file
41 * extent [0, 16K), followed by another file extent [16K, 20K), two dio reads
42 * are entering btrfs_get_extent() concurrently, t1 is reading [8K, 16K), t2 is
46 * btrfs_get_extent() btrfs_get_extent()
47 * -> lookup_extent_mapping() ->lookup_extent_mapping()
48 * -> add_extent_mapping(0, 16K)
50 * ->add_extent_mapping(0, 16K)
53 static int test_case_1(struct btrfs_fs_info
*fs_info
,
54 struct extent_map_tree
*em_tree
)
56 struct extent_map
*em
;
61 em
= alloc_extent_map();
63 test_std_err(TEST_ALLOC_EXTENT_MAP
);
71 em
->block_len
= SZ_16K
;
72 write_lock(&em_tree
->lock
);
73 ret
= add_extent_mapping(em_tree
, em
, 0);
74 write_unlock(&em_tree
->lock
);
76 test_err("cannot add extent range [0, 16K)");
81 /* Add [16K, 20K) following [0, 16K) */
82 em
= alloc_extent_map();
84 test_std_err(TEST_ALLOC_EXTENT_MAP
);
91 em
->block_start
= SZ_32K
; /* avoid merging */
92 em
->block_len
= SZ_4K
;
93 write_lock(&em_tree
->lock
);
94 ret
= add_extent_mapping(em_tree
, em
, 0);
95 write_unlock(&em_tree
->lock
);
97 test_err("cannot add extent range [16K, 20K)");
102 em
= alloc_extent_map();
104 test_std_err(TEST_ALLOC_EXTENT_MAP
);
109 /* Add [0, 8K), should return [0, 16K) instead. */
112 em
->block_start
= start
;
114 write_lock(&em_tree
->lock
);
115 ret
= btrfs_add_extent_mapping(fs_info
, em_tree
, &em
, em
->start
, em
->len
);
116 write_unlock(&em_tree
->lock
);
118 test_err("case1 [%llu %llu]: ret %d", start
, start
+ len
, ret
);
122 (em
->start
!= 0 || extent_map_end(em
) != SZ_16K
||
123 em
->block_start
!= 0 || em
->block_len
!= SZ_16K
)) {
125 "case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu",
126 start
, start
+ len
, ret
, em
->start
, em
->len
,
127 em
->block_start
, em
->block_len
);
132 free_extent_map_tree(em_tree
);
140 * Reading the inline ending up with EEXIST, ie. read an inline
141 * extent and discard page cache and read it again.
143 static int test_case_2(struct btrfs_fs_info
*fs_info
,
144 struct extent_map_tree
*em_tree
)
146 struct extent_map
*em
;
149 em
= alloc_extent_map();
151 test_std_err(TEST_ALLOC_EXTENT_MAP
);
158 em
->block_start
= EXTENT_MAP_INLINE
;
159 em
->block_len
= (u64
)-1;
160 write_lock(&em_tree
->lock
);
161 ret
= add_extent_mapping(em_tree
, em
, 0);
162 write_unlock(&em_tree
->lock
);
164 test_err("cannot add extent range [0, 1K)");
169 /* Add [4K, 8K) following [0, 1K) */
170 em
= alloc_extent_map();
172 test_std_err(TEST_ALLOC_EXTENT_MAP
);
179 em
->block_start
= SZ_4K
;
180 em
->block_len
= SZ_4K
;
181 write_lock(&em_tree
->lock
);
182 ret
= add_extent_mapping(em_tree
, em
, 0);
183 write_unlock(&em_tree
->lock
);
185 test_err("cannot add extent range [4K, 8K)");
190 em
= alloc_extent_map();
192 test_std_err(TEST_ALLOC_EXTENT_MAP
);
200 em
->block_start
= EXTENT_MAP_INLINE
;
201 em
->block_len
= (u64
)-1;
202 write_lock(&em_tree
->lock
);
203 ret
= btrfs_add_extent_mapping(fs_info
, em_tree
, &em
, em
->start
, em
->len
);
204 write_unlock(&em_tree
->lock
);
206 test_err("case2 [0 1K]: ret %d", ret
);
210 (em
->start
!= 0 || extent_map_end(em
) != SZ_1K
||
211 em
->block_start
!= EXTENT_MAP_INLINE
|| em
->block_len
!= (u64
)-1)) {
213 "case2 [0 1K]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu",
214 ret
, em
->start
, em
->len
, em
->block_start
,
220 free_extent_map_tree(em_tree
);
225 static int __test_case_3(struct btrfs_fs_info
*fs_info
,
226 struct extent_map_tree
*em_tree
, u64 start
)
228 struct extent_map
*em
;
232 em
= alloc_extent_map();
234 test_std_err(TEST_ALLOC_EXTENT_MAP
);
241 em
->block_start
= SZ_4K
;
242 em
->block_len
= SZ_4K
;
243 write_lock(&em_tree
->lock
);
244 ret
= add_extent_mapping(em_tree
, em
, 0);
245 write_unlock(&em_tree
->lock
);
247 test_err("cannot add extent range [4K, 8K)");
252 em
= alloc_extent_map();
254 test_std_err(TEST_ALLOC_EXTENT_MAP
);
263 em
->block_len
= SZ_16K
;
264 write_lock(&em_tree
->lock
);
265 ret
= btrfs_add_extent_mapping(fs_info
, em_tree
, &em
, start
, len
);
266 write_unlock(&em_tree
->lock
);
268 test_err("case3 [0x%llx 0x%llx): ret %d",
269 start
, start
+ len
, ret
);
273 * Since bytes within em are contiguous, em->block_start is identical to
277 (start
< em
->start
|| start
+ len
> extent_map_end(em
) ||
278 em
->start
!= em
->block_start
|| em
->len
!= em
->block_len
)) {
280 "case3 [0x%llx 0x%llx): ret %d em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)",
281 start
, start
+ len
, ret
, em
->start
, em
->len
,
282 em
->block_start
, em
->block_len
);
287 free_extent_map_tree(em_tree
);
295 * Suppose that no extent map has been loaded into memory yet.
296 * There is a file extent [0, 16K), two jobs are running concurrently
297 * against it, t1 is buffered writing to [4K, 8K) and t2 is doing dio
298 * read from [0, 4K) or [8K, 12K) or [12K, 16K).
300 * t1 goes ahead of t2 and adds em [4K, 8K) into tree.
303 * cow_file_range() btrfs_get_extent()
304 * -> lookup_extent_mapping()
305 * -> add_extent_mapping()
306 * -> add_extent_mapping()
308 static int test_case_3(struct btrfs_fs_info
*fs_info
,
309 struct extent_map_tree
*em_tree
)
313 ret
= __test_case_3(fs_info
, em_tree
, 0);
316 ret
= __test_case_3(fs_info
, em_tree
, SZ_8K
);
319 ret
= __test_case_3(fs_info
, em_tree
, (12 * SZ_1K
));
324 static int __test_case_4(struct btrfs_fs_info
*fs_info
,
325 struct extent_map_tree
*em_tree
, u64 start
)
327 struct extent_map
*em
;
331 em
= alloc_extent_map();
333 test_std_err(TEST_ALLOC_EXTENT_MAP
);
341 em
->block_len
= SZ_8K
;
342 write_lock(&em_tree
->lock
);
343 ret
= add_extent_mapping(em_tree
, em
, 0);
344 write_unlock(&em_tree
->lock
);
346 test_err("cannot add extent range [0, 8K)");
351 em
= alloc_extent_map();
353 test_std_err(TEST_ALLOC_EXTENT_MAP
);
360 em
->len
= 24 * SZ_1K
;
361 em
->block_start
= SZ_16K
; /* avoid merging */
362 em
->block_len
= 24 * SZ_1K
;
363 write_lock(&em_tree
->lock
);
364 ret
= add_extent_mapping(em_tree
, em
, 0);
365 write_unlock(&em_tree
->lock
);
367 test_err("cannot add extent range [8K, 32K)");
372 em
= alloc_extent_map();
374 test_std_err(TEST_ALLOC_EXTENT_MAP
);
382 em
->block_len
= SZ_32K
;
383 write_lock(&em_tree
->lock
);
384 ret
= btrfs_add_extent_mapping(fs_info
, em_tree
, &em
, start
, len
);
385 write_unlock(&em_tree
->lock
);
387 test_err("case4 [0x%llx 0x%llx): ret %d",
391 if (em
&& (start
< em
->start
|| start
+ len
> extent_map_end(em
))) {
393 "case4 [0x%llx 0x%llx): ret %d, added wrong em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)",
394 start
, len
, ret
, em
->start
, em
->len
, em
->block_start
,
400 free_extent_map_tree(em_tree
);
408 * Suppose that no extent map has been loaded into memory yet.
409 * There is a file extent [0, 32K), two jobs are running concurrently
410 * against it, t1 is doing dio write to [8K, 32K) and t2 is doing dio
411 * read from [0, 4K) or [4K, 8K).
413 * t1 goes ahead of t2 and splits em [0, 32K) to em [0K, 8K) and [8K 32K).
416 * btrfs_get_blocks_direct() btrfs_get_blocks_direct()
417 * -> btrfs_get_extent() -> btrfs_get_extent()
418 * -> lookup_extent_mapping()
419 * -> add_extent_mapping() -> lookup_extent_mapping()
421 * -> btrfs_new_extent_direct()
422 * -> btrfs_drop_extent_cache()
424 * -> add_extent_mapping()
426 * -> add_extent_mapping()
427 * # handle -EEXIST when adding
430 static int test_case_4(struct btrfs_fs_info
*fs_info
,
431 struct extent_map_tree
*em_tree
)
435 ret
= __test_case_4(fs_info
, em_tree
, 0);
438 ret
= __test_case_4(fs_info
, em_tree
, SZ_4K
);
443 struct rmap_test_vector
{
446 u64 data_stripe_size
;
447 u64 num_data_stripes
;
449 /* Assume we won't have more than 5 physical stripes */
450 u64 data_stripe_phys_start
[5];
451 bool expected_mapped_addr
;
452 /* Physical to logical addresses */
453 u64 mapped_logical
[5];
456 static int test_rmap_block(struct btrfs_fs_info
*fs_info
,
457 struct rmap_test_vector
*test
)
459 struct extent_map
*em
;
460 struct map_lookup
*map
= NULL
;
462 int i
, out_ndaddrs
, out_stripe_len
;
465 em
= alloc_extent_map();
467 test_std_err(TEST_ALLOC_EXTENT_MAP
);
471 map
= kmalloc(map_lookup_size(test
->num_stripes
), GFP_KERNEL
);
474 test_std_err(TEST_ALLOC_EXTENT_MAP
);
478 set_bit(EXTENT_FLAG_FS_MAPPING
, &em
->flags
);
479 /* Start at 4GiB logical address */
481 em
->len
= test
->data_stripe_size
* test
->num_data_stripes
;
482 em
->block_len
= em
->len
;
483 em
->orig_block_len
= test
->data_stripe_size
;
484 em
->map_lookup
= map
;
486 map
->num_stripes
= test
->num_stripes
;
487 map
->stripe_len
= BTRFS_STRIPE_LEN
;
488 map
->type
= test
->raid_type
;
490 for (i
= 0; i
< map
->num_stripes
; i
++) {
491 struct btrfs_device
*dev
= btrfs_alloc_dummy_device(fs_info
);
494 test_err("cannot allocate device");
498 map
->stripes
[i
].dev
= dev
;
499 map
->stripes
[i
].physical
= test
->data_stripe_phys_start
[i
];
502 write_lock(&fs_info
->mapping_tree
.lock
);
503 ret
= add_extent_mapping(&fs_info
->mapping_tree
, em
, 0);
504 write_unlock(&fs_info
->mapping_tree
.lock
);
506 test_err("error adding block group mapping to mapping tree");
510 ret
= btrfs_rmap_block(fs_info
, em
->start
, btrfs_sb_offset(1),
511 &logical
, &out_ndaddrs
, &out_stripe_len
);
512 if (ret
|| (out_ndaddrs
== 0 && test
->expected_mapped_addr
)) {
513 test_err("didn't rmap anything but expected %d",
514 test
->expected_mapped_addr
);
518 if (out_stripe_len
!= BTRFS_STRIPE_LEN
) {
519 test_err("calculated stripe length doesn't match");
523 if (out_ndaddrs
!= test
->expected_mapped_addr
) {
524 for (i
= 0; i
< out_ndaddrs
; i
++)
525 test_msg("mapped %llu", logical
[i
]);
526 test_err("unexpected number of mapped addresses: %d", out_ndaddrs
);
530 for (i
= 0; i
< out_ndaddrs
; i
++) {
531 if (logical
[i
] != test
->mapped_logical
[i
]) {
532 test_err("unexpected logical address mapped");
539 write_lock(&fs_info
->mapping_tree
.lock
);
540 remove_extent_mapping(&fs_info
->mapping_tree
, em
);
541 write_unlock(&fs_info
->mapping_tree
.lock
);
551 int btrfs_test_extent_map(void)
553 struct btrfs_fs_info
*fs_info
= NULL
;
554 struct extent_map_tree
*em_tree
;
556 struct rmap_test_vector rmap_tests
[] = {
559 * Test a chunk with 2 data stripes one of which
560 * interesects the physical address of the super block
561 * is correctly recognised.
563 .raid_type
= BTRFS_BLOCK_GROUP_RAID1
,
564 .physical_start
= SZ_64M
- SZ_4M
,
565 .data_stripe_size
= SZ_256M
,
566 .num_data_stripes
= 2,
568 .data_stripe_phys_start
=
569 {SZ_64M
- SZ_4M
, SZ_64M
- SZ_4M
+ SZ_256M
},
570 .expected_mapped_addr
= true,
571 .mapped_logical
= {SZ_4G
+ SZ_4M
}
575 * Test that out-of-range physical addresses are
579 /* SINGLE chunk type */
581 .physical_start
= SZ_4G
,
582 .data_stripe_size
= SZ_256M
,
583 .num_data_stripes
= 1,
585 .data_stripe_phys_start
= {SZ_256M
},
586 .expected_mapped_addr
= false,
587 .mapped_logical
= {0}
591 test_msg("running extent_map tests");
594 * Note: the fs_info is not set up completely, we only need
595 * fs_info::fsid for the tracepoint.
597 fs_info
= btrfs_alloc_dummy_fs_info(PAGE_SIZE
, PAGE_SIZE
);
599 test_std_err(TEST_ALLOC_FS_INFO
);
603 em_tree
= kzalloc(sizeof(*em_tree
), GFP_KERNEL
);
609 extent_map_tree_init(em_tree
);
611 ret
= test_case_1(fs_info
, em_tree
);
614 ret
= test_case_2(fs_info
, em_tree
);
617 ret
= test_case_3(fs_info
, em_tree
);
620 ret
= test_case_4(fs_info
, em_tree
);
622 test_msg("running rmap tests");
623 for (i
= 0; i
< ARRAY_SIZE(rmap_tests
); i
++) {
624 ret
= test_rmap_block(fs_info
, &rmap_tests
[i
]);
631 btrfs_free_dummy_fs_info(fs_info
);