1 // SPDX-License-Identifier: GPL-2.0
3 * KUnit test of ext4 multiblocks allocation.
6 #include <kunit/test.h>
7 #include <kunit/static_stub.h>
8 #include <linux/random.h>
13 struct buffer_head bitmap_bh
;
14 /* desc and gd_bh are just the place holders for now */
15 struct ext4_group_desc desc
;
16 struct buffer_head gd_bh
;
20 struct mbt_grp_ctx
*grp_ctx
;
23 struct mbt_ext4_super_block
{
24 struct ext4_super_block es
;
25 struct ext4_sb_info sbi
;
26 struct mbt_ctx mbt_ctx
;
29 #define MBT_SB(_sb) (container_of((_sb)->s_fs_info, struct mbt_ext4_super_block, sbi))
30 #define MBT_CTX(_sb) (&MBT_SB(_sb)->mbt_ctx)
31 #define MBT_GRP_CTX(_sb, _group) (&MBT_CTX(_sb)->grp_ctx[_group])
33 static struct inode
*mbt_alloc_inode(struct super_block
*sb
)
35 struct ext4_inode_info
*ei
;
37 ei
= kmalloc(sizeof(struct ext4_inode_info
), GFP_KERNEL
);
41 INIT_LIST_HEAD(&ei
->i_orphan
);
42 init_rwsem(&ei
->xattr_sem
);
43 init_rwsem(&ei
->i_data_sem
);
44 inode_init_once(&ei
->vfs_inode
);
45 ext4_fc_init_inode(&ei
->vfs_inode
);
47 return &ei
->vfs_inode
;
50 static void mbt_free_inode(struct inode
*inode
)
55 static const struct super_operations mbt_sops
= {
56 .alloc_inode
= mbt_alloc_inode
,
57 .free_inode
= mbt_free_inode
,
60 static void mbt_kill_sb(struct super_block
*sb
)
62 generic_shutdown_super(sb
);
65 static struct file_system_type mbt_fs_type
= {
66 .name
= "mballoc test",
67 .kill_sb
= mbt_kill_sb
,
70 static int mbt_mb_init(struct super_block
*sb
)
75 /* needed by ext4_mb_init->bdev_nonrot(sb->s_bdev) */
76 sb
->s_bdev
= kzalloc(sizeof(*sb
->s_bdev
), GFP_KERNEL
);
77 if (sb
->s_bdev
== NULL
)
80 sb
->s_bdev
->bd_queue
= kzalloc(sizeof(struct request_queue
), GFP_KERNEL
);
81 if (sb
->s_bdev
->bd_queue
== NULL
) {
87 * needed by ext4_mb_init->ext4_mb_init_backend-> sbi->s_buddy_cache =
90 INIT_LIST_HEAD(&sb
->s_inodes
);
93 ret
= ext4_mb_init(sb
);
97 block
= ext4_count_free_clusters(sb
);
98 ret
= percpu_counter_init(&EXT4_SB(sb
)->s_freeclusters_counter
, block
,
103 ret
= percpu_counter_init(&EXT4_SB(sb
)->s_dirtyclusters_counter
, 0,
106 goto err_freeclusters
;
111 percpu_counter_destroy(&EXT4_SB(sb
)->s_freeclusters_counter
);
115 kfree(sb
->s_bdev
->bd_queue
);
120 static void mbt_mb_release(struct super_block
*sb
)
122 percpu_counter_destroy(&EXT4_SB(sb
)->s_dirtyclusters_counter
);
123 percpu_counter_destroy(&EXT4_SB(sb
)->s_freeclusters_counter
);
125 kfree(sb
->s_bdev
->bd_queue
);
129 static int mbt_set(struct super_block
*sb
, void *data
)
134 static struct super_block
*mbt_ext4_alloc_super_block(void)
136 struct mbt_ext4_super_block
*fsb
;
137 struct super_block
*sb
;
138 struct ext4_sb_info
*sbi
;
140 fsb
= kzalloc(sizeof(*fsb
), GFP_KERNEL
);
144 sb
= sget(&mbt_fs_type
, NULL
, mbt_set
, 0, NULL
);
150 sbi
->s_blockgroup_lock
=
151 kzalloc(sizeof(struct blockgroup_lock
), GFP_KERNEL
);
152 if (!sbi
->s_blockgroup_lock
)
155 bgl_lock_init(sbi
->s_blockgroup_lock
);
157 sbi
->s_es
= &fsb
->es
;
160 up_write(&sb
->s_umount
);
164 deactivate_locked_super(sb
);
170 static void mbt_ext4_free_super_block(struct super_block
*sb
)
172 struct mbt_ext4_super_block
*fsb
= MBT_SB(sb
);
173 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
175 kfree(sbi
->s_blockgroup_lock
);
176 deactivate_super(sb
);
180 struct mbt_ext4_block_layout
{
181 unsigned char blocksize_bits
;
182 unsigned int cluster_bits
;
183 uint32_t blocks_per_group
;
184 ext4_group_t group_count
;
188 static void mbt_init_sb_layout(struct super_block
*sb
,
189 struct mbt_ext4_block_layout
*layout
)
191 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
192 struct ext4_super_block
*es
= sbi
->s_es
;
194 sb
->s_blocksize
= 1UL << layout
->blocksize_bits
;
195 sb
->s_blocksize_bits
= layout
->blocksize_bits
;
197 sbi
->s_groups_count
= layout
->group_count
;
198 sbi
->s_blocks_per_group
= layout
->blocks_per_group
;
199 sbi
->s_cluster_bits
= layout
->cluster_bits
;
200 sbi
->s_cluster_ratio
= 1U << layout
->cluster_bits
;
201 sbi
->s_clusters_per_group
= layout
->blocks_per_group
>>
202 layout
->cluster_bits
;
203 sbi
->s_desc_size
= layout
->desc_size
;
204 sbi
->s_desc_per_block_bits
=
205 sb
->s_blocksize_bits
- (fls(layout
->desc_size
) - 1);
206 sbi
->s_desc_per_block
= 1 << sbi
->s_desc_per_block_bits
;
208 es
->s_first_data_block
= cpu_to_le32(0);
209 es
->s_blocks_count_lo
= cpu_to_le32(layout
->blocks_per_group
*
210 layout
->group_count
);
213 static int mbt_grp_ctx_init(struct super_block
*sb
,
214 struct mbt_grp_ctx
*grp_ctx
)
216 ext4_grpblk_t max
= EXT4_CLUSTERS_PER_GROUP(sb
);
218 grp_ctx
->bitmap_bh
.b_data
= kzalloc(EXT4_BLOCK_SIZE(sb
), GFP_KERNEL
);
219 if (grp_ctx
->bitmap_bh
.b_data
== NULL
)
221 mb_set_bits(grp_ctx
->bitmap_bh
.b_data
, max
, sb
->s_blocksize
* 8 - max
);
222 ext4_free_group_clusters_set(sb
, &grp_ctx
->desc
, max
);
227 static void mbt_grp_ctx_release(struct mbt_grp_ctx
*grp_ctx
)
229 kfree(grp_ctx
->bitmap_bh
.b_data
);
230 grp_ctx
->bitmap_bh
.b_data
= NULL
;
233 static void mbt_ctx_mark_used(struct super_block
*sb
, ext4_group_t group
,
234 unsigned int start
, unsigned int len
)
236 struct mbt_grp_ctx
*grp_ctx
= MBT_GRP_CTX(sb
, group
);
238 mb_set_bits(grp_ctx
->bitmap_bh
.b_data
, start
, len
);
241 static void *mbt_ctx_bitmap(struct super_block
*sb
, ext4_group_t group
)
243 struct mbt_grp_ctx
*grp_ctx
= MBT_GRP_CTX(sb
, group
);
245 return grp_ctx
->bitmap_bh
.b_data
;
248 /* called after mbt_init_sb_layout */
249 static int mbt_ctx_init(struct super_block
*sb
)
251 struct mbt_ctx
*ctx
= MBT_CTX(sb
);
252 ext4_group_t i
, ngroups
= ext4_get_groups_count(sb
);
254 ctx
->grp_ctx
= kcalloc(ngroups
, sizeof(struct mbt_grp_ctx
),
256 if (ctx
->grp_ctx
== NULL
)
259 for (i
= 0; i
< ngroups
; i
++)
260 if (mbt_grp_ctx_init(sb
, &ctx
->grp_ctx
[i
]))
264 * first data block(first cluster in first group) is used by
265 * metadata, mark it used to avoid to alloc data block at first
266 * block which will fail ext4_sb_block_valid check.
268 mb_set_bits(ctx
->grp_ctx
[0].bitmap_bh
.b_data
, 0, 1);
269 ext4_free_group_clusters_set(sb
, &ctx
->grp_ctx
[0].desc
,
270 EXT4_CLUSTERS_PER_GROUP(sb
) - 1);
275 mbt_grp_ctx_release(&ctx
->grp_ctx
[i
]);
280 static void mbt_ctx_release(struct super_block
*sb
)
282 struct mbt_ctx
*ctx
= MBT_CTX(sb
);
283 ext4_group_t i
, ngroups
= ext4_get_groups_count(sb
);
285 for (i
= 0; i
< ngroups
; i
++)
286 mbt_grp_ctx_release(&ctx
->grp_ctx
[i
]);
290 static struct buffer_head
*
291 ext4_read_block_bitmap_nowait_stub(struct super_block
*sb
, ext4_group_t block_group
,
294 struct mbt_grp_ctx
*grp_ctx
= MBT_GRP_CTX(sb
, block_group
);
296 /* paired with brelse from caller of ext4_read_block_bitmap_nowait */
297 get_bh(&grp_ctx
->bitmap_bh
);
298 return &grp_ctx
->bitmap_bh
;
301 static int ext4_wait_block_bitmap_stub(struct super_block
*sb
,
302 ext4_group_t block_group
,
303 struct buffer_head
*bh
)
306 * real ext4_wait_block_bitmap will set these flags and
307 * functions like ext4_mb_init_cache will verify the flags.
309 set_buffer_uptodate(bh
);
310 set_bitmap_uptodate(bh
);
311 set_buffer_verified(bh
);
315 static struct ext4_group_desc
*
316 ext4_get_group_desc_stub(struct super_block
*sb
, ext4_group_t block_group
,
317 struct buffer_head
**bh
)
319 struct mbt_grp_ctx
*grp_ctx
= MBT_GRP_CTX(sb
, block_group
);
322 *bh
= &grp_ctx
->gd_bh
;
324 return &grp_ctx
->desc
;
328 ext4_mb_mark_context_stub(handle_t
*handle
, struct super_block
*sb
, bool state
,
329 ext4_group_t group
, ext4_grpblk_t blkoff
,
330 ext4_grpblk_t len
, int flags
,
331 ext4_grpblk_t
*ret_changed
)
333 struct mbt_grp_ctx
*grp_ctx
= MBT_GRP_CTX(sb
, group
);
334 struct buffer_head
*bitmap_bh
= &grp_ctx
->bitmap_bh
;
337 mb_set_bits(bitmap_bh
->b_data
, blkoff
, len
);
339 mb_clear_bits(bitmap_bh
->b_data
, blkoff
, len
);
344 #define TEST_GOAL_GROUP 1
345 static int mbt_kunit_init(struct kunit
*test
)
347 struct mbt_ext4_block_layout
*layout
=
348 (struct mbt_ext4_block_layout
*)(test
->param_value
);
349 struct super_block
*sb
;
352 sb
= mbt_ext4_alloc_super_block();
356 mbt_init_sb_layout(sb
, layout
);
358 ret
= mbt_ctx_init(sb
);
360 mbt_ext4_free_super_block(sb
);
365 kunit_activate_static_stub(test
,
366 ext4_read_block_bitmap_nowait
,
367 ext4_read_block_bitmap_nowait_stub
);
368 kunit_activate_static_stub(test
,
369 ext4_wait_block_bitmap
,
370 ext4_wait_block_bitmap_stub
);
371 kunit_activate_static_stub(test
,
373 ext4_get_group_desc_stub
);
374 kunit_activate_static_stub(test
,
375 ext4_mb_mark_context
,
376 ext4_mb_mark_context_stub
);
378 /* stub function will be called in mbt_mb_init->ext4_mb_init */
379 if (mbt_mb_init(sb
) != 0) {
381 mbt_ext4_free_super_block(sb
);
388 static void mbt_kunit_exit(struct kunit
*test
)
390 struct super_block
*sb
= (struct super_block
*)test
->priv
;
394 mbt_ext4_free_super_block(sb
);
397 static void test_new_blocks_simple(struct kunit
*test
)
399 struct super_block
*sb
= (struct super_block
*)test
->priv
;
401 struct ext4_allocation_request ar
;
402 ext4_group_t i
, goal_group
= TEST_GOAL_GROUP
;
405 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
407 inode
= kunit_kzalloc(test
, sizeof(*inode
), GFP_KERNEL
);
414 /* get block at goal */
415 ar
.goal
= ext4_group_first_block_no(sb
, goal_group
);
416 found
= ext4_mb_new_blocks_simple(&ar
, &err
);
417 KUNIT_ASSERT_EQ_MSG(test
, ar
.goal
, found
,
418 "failed to alloc block at goal, expected %llu found %llu",
421 /* get block after goal in goal group */
422 ar
.goal
= ext4_group_first_block_no(sb
, goal_group
);
423 found
= ext4_mb_new_blocks_simple(&ar
, &err
);
424 KUNIT_ASSERT_EQ_MSG(test
, ar
.goal
+ EXT4_C2B(sbi
, 1), found
,
425 "failed to alloc block after goal in goal group, expected %llu found %llu",
428 /* get block after goal group */
429 mbt_ctx_mark_used(sb
, goal_group
, 0, EXT4_CLUSTERS_PER_GROUP(sb
));
430 ar
.goal
= ext4_group_first_block_no(sb
, goal_group
);
431 found
= ext4_mb_new_blocks_simple(&ar
, &err
);
432 KUNIT_ASSERT_EQ_MSG(test
,
433 ext4_group_first_block_no(sb
, goal_group
+ 1), found
,
434 "failed to alloc block after goal group, expected %llu found %llu",
435 ext4_group_first_block_no(sb
, goal_group
+ 1), found
);
437 /* get block before goal group */
438 for (i
= goal_group
; i
< ext4_get_groups_count(sb
); i
++)
439 mbt_ctx_mark_used(sb
, i
, 0, EXT4_CLUSTERS_PER_GROUP(sb
));
440 ar
.goal
= ext4_group_first_block_no(sb
, goal_group
);
441 found
= ext4_mb_new_blocks_simple(&ar
, &err
);
442 KUNIT_ASSERT_EQ_MSG(test
,
443 ext4_group_first_block_no(sb
, 0) + EXT4_C2B(sbi
, 1), found
,
444 "failed to alloc block before goal group, expected %llu found %llu",
445 ext4_group_first_block_no(sb
, 0 + EXT4_C2B(sbi
, 1)), found
);
447 /* no block available, fail to allocate block */
448 for (i
= 0; i
< ext4_get_groups_count(sb
); i
++)
449 mbt_ctx_mark_used(sb
, i
, 0, EXT4_CLUSTERS_PER_GROUP(sb
));
450 ar
.goal
= ext4_group_first_block_no(sb
, goal_group
);
451 found
= ext4_mb_new_blocks_simple(&ar
, &err
);
452 KUNIT_ASSERT_NE_MSG(test
, err
, 0,
453 "unexpectedly get block when no block is available");
456 #define TEST_RANGE_COUNT 8
464 mbt_generate_test_ranges(struct super_block
*sb
, struct test_range
*ranges
,
467 ext4_grpblk_t start
, len
, max
;
470 max
= EXT4_CLUSTERS_PER_GROUP(sb
) / count
;
471 for (i
= 0; i
< count
; i
++) {
472 start
= get_random_u32() % max
;
473 len
= get_random_u32() % max
;
474 len
= min(len
, max
- start
);
476 ranges
[i
].start
= start
+ i
* max
;
482 validate_free_blocks_simple(struct kunit
*test
, struct super_block
*sb
,
483 ext4_group_t goal_group
, ext4_grpblk_t start
,
487 ext4_grpblk_t bit
, max
= EXT4_CLUSTERS_PER_GROUP(sb
);
490 for (i
= 0; i
< ext4_get_groups_count(sb
); i
++) {
494 bitmap
= mbt_ctx_bitmap(sb
, i
);
495 bit
= mb_find_next_zero_bit(bitmap
, max
, 0);
496 KUNIT_ASSERT_EQ_MSG(test
, bit
, max
,
497 "free block on unexpected group %d", i
);
500 bitmap
= mbt_ctx_bitmap(sb
, goal_group
);
501 bit
= mb_find_next_zero_bit(bitmap
, max
, 0);
502 KUNIT_ASSERT_EQ(test
, bit
, start
);
504 bit
= mb_find_next_bit(bitmap
, max
, bit
+ 1);
505 KUNIT_ASSERT_EQ(test
, bit
, start
+ len
);
509 test_free_blocks_simple_range(struct kunit
*test
, ext4_group_t goal_group
,
510 ext4_grpblk_t start
, ext4_grpblk_t len
)
512 struct super_block
*sb
= (struct super_block
*)test
->priv
;
513 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
517 inode
= kunit_kzalloc(test
, sizeof(*inode
), GFP_KERNEL
);
525 block
= ext4_group_first_block_no(sb
, goal_group
) +
526 EXT4_C2B(sbi
, start
);
527 ext4_free_blocks_simple(inode
, block
, len
);
528 validate_free_blocks_simple(test
, sb
, goal_group
, start
, len
);
529 mbt_ctx_mark_used(sb
, goal_group
, 0, EXT4_CLUSTERS_PER_GROUP(sb
));
532 static void test_free_blocks_simple(struct kunit
*test
)
534 struct super_block
*sb
= (struct super_block
*)test
->priv
;
535 ext4_grpblk_t max
= EXT4_CLUSTERS_PER_GROUP(sb
);
537 struct test_range ranges
[TEST_RANGE_COUNT
];
539 for (i
= 0; i
< ext4_get_groups_count(sb
); i
++)
540 mbt_ctx_mark_used(sb
, i
, 0, max
);
542 mbt_generate_test_ranges(sb
, ranges
, TEST_RANGE_COUNT
);
543 for (i
= 0; i
< TEST_RANGE_COUNT
; i
++)
544 test_free_blocks_simple_range(test
, TEST_GOAL_GROUP
,
545 ranges
[i
].start
, ranges
[i
].len
);
549 test_mark_diskspace_used_range(struct kunit
*test
,
550 struct ext4_allocation_context
*ac
,
554 struct super_block
*sb
= (struct super_block
*)test
->priv
;
557 ext4_grpblk_t i
, max
;
559 /* ext4_mb_mark_diskspace_used will BUG if len is 0 */
563 ac
->ac_b_ex
.fe_group
= TEST_GOAL_GROUP
;
564 ac
->ac_b_ex
.fe_start
= start
;
565 ac
->ac_b_ex
.fe_len
= len
;
567 bitmap
= mbt_ctx_bitmap(sb
, TEST_GOAL_GROUP
);
568 memset(bitmap
, 0, sb
->s_blocksize
);
569 ret
= ext4_mb_mark_diskspace_used(ac
, NULL
, 0);
570 KUNIT_ASSERT_EQ(test
, ret
, 0);
572 max
= EXT4_CLUSTERS_PER_GROUP(sb
);
573 i
= mb_find_next_bit(bitmap
, max
, 0);
574 KUNIT_ASSERT_EQ(test
, i
, start
);
575 i
= mb_find_next_zero_bit(bitmap
, max
, i
+ 1);
576 KUNIT_ASSERT_EQ(test
, i
, start
+ len
);
577 i
= mb_find_next_bit(bitmap
, max
, i
+ 1);
578 KUNIT_ASSERT_EQ(test
, max
, i
);
581 static void test_mark_diskspace_used(struct kunit
*test
)
583 struct super_block
*sb
= (struct super_block
*)test
->priv
;
585 struct ext4_allocation_context ac
;
586 struct test_range ranges
[TEST_RANGE_COUNT
];
589 mbt_generate_test_ranges(sb
, ranges
, TEST_RANGE_COUNT
);
591 inode
= kunit_kzalloc(test
, sizeof(*inode
), GFP_KERNEL
);
596 ac
.ac_status
= AC_STATUS_FOUND
;
599 for (i
= 0; i
< TEST_RANGE_COUNT
; i
++)
600 test_mark_diskspace_used_range(test
, &ac
, ranges
[i
].start
,
604 static void mbt_generate_buddy(struct super_block
*sb
, void *buddy
,
605 void *bitmap
, struct ext4_group_info
*grp
)
607 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
612 memset(buddy
, 0xff, sb
->s_blocksize
);
613 memset(grp
, 0, offsetof(struct ext4_group_info
,
614 bb_counters
[MB_NUM_ORDERS(sb
)]));
617 max
= EXT4_CLUSTERS_PER_GROUP(sb
);
618 bb_h
= buddy
+ sbi
->s_mb_offsets
[1];
620 off
= mb_find_next_zero_bit(bb
, max
, 0);
621 grp
->bb_first_free
= off
;
623 grp
->bb_counters
[0]++;
626 if (!(off
& 1) && !mb_test_bit(off
+ 1, bb
)) {
628 grp
->bb_counters
[0]--;
629 mb_clear_bit(off
>> 1, bb_h
);
630 grp
->bb_counters
[1]++;
631 grp
->bb_largest_free_order
= 1;
635 off
= mb_find_next_zero_bit(bb
, max
, off
+ 1);
638 for (order
= 1; order
< MB_NUM_ORDERS(sb
) - 1; order
++) {
639 bb
= buddy
+ sbi
->s_mb_offsets
[order
];
640 bb_h
= buddy
+ sbi
->s_mb_offsets
[order
+ 1];
642 off
= mb_find_next_zero_bit(bb
, max
, 0);
645 if (!(off
& 1) && !mb_test_bit(off
+ 1, bb
)) {
646 mb_set_bits(bb
, off
, 2);
647 grp
->bb_counters
[order
] -= 2;
648 mb_clear_bit(off
>> 1, bb_h
);
649 grp
->bb_counters
[order
+ 1]++;
650 grp
->bb_largest_free_order
= order
+ 1;
654 off
= mb_find_next_zero_bit(bb
, max
, off
+ 1);
658 max
= EXT4_CLUSTERS_PER_GROUP(sb
);
659 off
= mb_find_next_zero_bit(bitmap
, max
, 0);
663 off
= mb_find_next_bit(bitmap
, max
, off
+ 1);
667 off
= mb_find_next_zero_bit(bitmap
, max
, off
+ 1);
672 mbt_validate_group_info(struct kunit
*test
, struct ext4_group_info
*grp1
,
673 struct ext4_group_info
*grp2
)
675 struct super_block
*sb
= (struct super_block
*)test
->priv
;
678 KUNIT_ASSERT_EQ(test
, grp1
->bb_first_free
,
679 grp2
->bb_first_free
);
680 KUNIT_ASSERT_EQ(test
, grp1
->bb_fragments
,
682 KUNIT_ASSERT_EQ(test
, grp1
->bb_free
, grp2
->bb_free
);
683 KUNIT_ASSERT_EQ(test
, grp1
->bb_largest_free_order
,
684 grp2
->bb_largest_free_order
);
686 for (i
= 1; i
< MB_NUM_ORDERS(sb
); i
++) {
687 KUNIT_ASSERT_EQ_MSG(test
, grp1
->bb_counters
[i
],
688 grp2
->bb_counters
[i
],
689 "bb_counters[%d] diffs, expected %d, generated %d",
690 i
, grp1
->bb_counters
[i
],
691 grp2
->bb_counters
[i
]);
696 do_test_generate_buddy(struct kunit
*test
, struct super_block
*sb
, void *bitmap
,
697 void *mbt_buddy
, struct ext4_group_info
*mbt_grp
,
698 void *ext4_buddy
, struct ext4_group_info
*ext4_grp
)
702 mbt_generate_buddy(sb
, mbt_buddy
, bitmap
, mbt_grp
);
704 for (i
= 0; i
< MB_NUM_ORDERS(sb
); i
++)
705 ext4_grp
->bb_counters
[i
] = 0;
706 /* needed by validation in ext4_mb_generate_buddy */
707 ext4_grp
->bb_free
= mbt_grp
->bb_free
;
708 memset(ext4_buddy
, 0xff, sb
->s_blocksize
);
709 ext4_mb_generate_buddy(sb
, ext4_buddy
, bitmap
, TEST_GOAL_GROUP
,
712 KUNIT_ASSERT_EQ(test
, memcmp(mbt_buddy
, ext4_buddy
, sb
->s_blocksize
),
714 mbt_validate_group_info(test
, mbt_grp
, ext4_grp
);
717 static void test_mb_generate_buddy(struct kunit
*test
)
719 struct super_block
*sb
= (struct super_block
*)test
->priv
;
720 void *bitmap
, *expected_bb
, *generate_bb
;
721 struct ext4_group_info
*expected_grp
, *generate_grp
;
722 struct test_range ranges
[TEST_RANGE_COUNT
];
725 bitmap
= kunit_kzalloc(test
, sb
->s_blocksize
, GFP_KERNEL
);
726 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, bitmap
);
727 expected_bb
= kunit_kzalloc(test
, sb
->s_blocksize
, GFP_KERNEL
);
728 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, expected_bb
);
729 generate_bb
= kunit_kzalloc(test
, sb
->s_blocksize
, GFP_KERNEL
);
730 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, generate_bb
);
731 expected_grp
= kunit_kzalloc(test
, offsetof(struct ext4_group_info
,
732 bb_counters
[MB_NUM_ORDERS(sb
)]), GFP_KERNEL
);
733 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, expected_grp
);
734 generate_grp
= ext4_get_group_info(sb
, TEST_GOAL_GROUP
);
735 KUNIT_ASSERT_NOT_NULL(test
, generate_grp
);
737 mbt_generate_test_ranges(sb
, ranges
, TEST_RANGE_COUNT
);
738 for (i
= 0; i
< TEST_RANGE_COUNT
; i
++) {
739 mb_set_bits(bitmap
, ranges
[i
].start
, ranges
[i
].len
);
740 do_test_generate_buddy(test
, sb
, bitmap
, expected_bb
,
741 expected_grp
, generate_bb
, generate_grp
);
746 test_mb_mark_used_range(struct kunit
*test
, struct ext4_buddy
*e4b
,
747 ext4_grpblk_t start
, ext4_grpblk_t len
, void *bitmap
,
748 void *buddy
, struct ext4_group_info
*grp
)
750 struct super_block
*sb
= (struct super_block
*)test
->priv
;
751 struct ext4_free_extent ex
;
754 /* mb_mark_used only accepts non-zero len */
760 ex
.fe_group
= TEST_GOAL_GROUP
;
762 ext4_lock_group(sb
, TEST_GOAL_GROUP
);
763 mb_mark_used(e4b
, &ex
);
764 ext4_unlock_group(sb
, TEST_GOAL_GROUP
);
766 mb_set_bits(bitmap
, start
, len
);
767 /* bypass bb_free validatoin in ext4_mb_generate_buddy */
769 memset(buddy
, 0xff, sb
->s_blocksize
);
770 for (i
= 0; i
< MB_NUM_ORDERS(sb
); i
++)
771 grp
->bb_counters
[i
] = 0;
772 ext4_mb_generate_buddy(sb
, buddy
, bitmap
, 0, grp
);
774 KUNIT_ASSERT_EQ(test
, memcmp(buddy
, e4b
->bd_buddy
, sb
->s_blocksize
),
776 mbt_validate_group_info(test
, grp
, e4b
->bd_info
);
779 static void test_mb_mark_used(struct kunit
*test
)
781 struct ext4_buddy e4b
;
782 struct super_block
*sb
= (struct super_block
*)test
->priv
;
783 void *bitmap
, *buddy
;
784 struct ext4_group_info
*grp
;
786 struct test_range ranges
[TEST_RANGE_COUNT
];
789 /* buddy cache assumes that each page contains at least one block */
790 if (sb
->s_blocksize
> PAGE_SIZE
)
791 kunit_skip(test
, "blocksize exceeds pagesize");
793 bitmap
= kunit_kzalloc(test
, sb
->s_blocksize
, GFP_KERNEL
);
794 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, bitmap
);
795 buddy
= kunit_kzalloc(test
, sb
->s_blocksize
, GFP_KERNEL
);
796 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, buddy
);
797 grp
= kunit_kzalloc(test
, offsetof(struct ext4_group_info
,
798 bb_counters
[MB_NUM_ORDERS(sb
)]), GFP_KERNEL
);
800 ret
= ext4_mb_load_buddy(sb
, TEST_GOAL_GROUP
, &e4b
);
801 KUNIT_ASSERT_EQ(test
, ret
, 0);
803 grp
->bb_free
= EXT4_CLUSTERS_PER_GROUP(sb
);
804 mbt_generate_test_ranges(sb
, ranges
, TEST_RANGE_COUNT
);
805 for (i
= 0; i
< TEST_RANGE_COUNT
; i
++)
806 test_mb_mark_used_range(test
, &e4b
, ranges
[i
].start
,
807 ranges
[i
].len
, bitmap
, buddy
, grp
);
809 ext4_mb_unload_buddy(&e4b
);
813 test_mb_free_blocks_range(struct kunit
*test
, struct ext4_buddy
*e4b
,
814 ext4_grpblk_t start
, ext4_grpblk_t len
, void *bitmap
,
815 void *buddy
, struct ext4_group_info
*grp
)
817 struct super_block
*sb
= (struct super_block
*)test
->priv
;
820 /* mb_free_blocks will WARN if len is 0 */
824 ext4_lock_group(sb
, e4b
->bd_group
);
825 mb_free_blocks(NULL
, e4b
, start
, len
);
826 ext4_unlock_group(sb
, e4b
->bd_group
);
828 mb_clear_bits(bitmap
, start
, len
);
829 /* bypass bb_free validatoin in ext4_mb_generate_buddy */
831 memset(buddy
, 0xff, sb
->s_blocksize
);
832 for (i
= 0; i
< MB_NUM_ORDERS(sb
); i
++)
833 grp
->bb_counters
[i
] = 0;
834 ext4_mb_generate_buddy(sb
, buddy
, bitmap
, 0, grp
);
836 KUNIT_ASSERT_EQ(test
, memcmp(buddy
, e4b
->bd_buddy
, sb
->s_blocksize
),
838 mbt_validate_group_info(test
, grp
, e4b
->bd_info
);
842 static void test_mb_free_blocks(struct kunit
*test
)
844 struct ext4_buddy e4b
;
845 struct super_block
*sb
= (struct super_block
*)test
->priv
;
846 void *bitmap
, *buddy
;
847 struct ext4_group_info
*grp
;
848 struct ext4_free_extent ex
;
851 struct test_range ranges
[TEST_RANGE_COUNT
];
853 /* buddy cache assumes that each page contains at least one block */
854 if (sb
->s_blocksize
> PAGE_SIZE
)
855 kunit_skip(test
, "blocksize exceeds pagesize");
857 bitmap
= kunit_kzalloc(test
, sb
->s_blocksize
, GFP_KERNEL
);
858 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, bitmap
);
859 buddy
= kunit_kzalloc(test
, sb
->s_blocksize
, GFP_KERNEL
);
860 KUNIT_ASSERT_NOT_ERR_OR_NULL(test
, buddy
);
861 grp
= kunit_kzalloc(test
, offsetof(struct ext4_group_info
,
862 bb_counters
[MB_NUM_ORDERS(sb
)]), GFP_KERNEL
);
864 ret
= ext4_mb_load_buddy(sb
, TEST_GOAL_GROUP
, &e4b
);
865 KUNIT_ASSERT_EQ(test
, ret
, 0);
868 ex
.fe_len
= EXT4_CLUSTERS_PER_GROUP(sb
);
869 ex
.fe_group
= TEST_GOAL_GROUP
;
871 ext4_lock_group(sb
, TEST_GOAL_GROUP
);
872 mb_mark_used(&e4b
, &ex
);
873 ext4_unlock_group(sb
, TEST_GOAL_GROUP
);
876 memset(bitmap
, 0xff, sb
->s_blocksize
);
878 mbt_generate_test_ranges(sb
, ranges
, TEST_RANGE_COUNT
);
879 for (i
= 0; i
< TEST_RANGE_COUNT
; i
++)
880 test_mb_free_blocks_range(test
, &e4b
, ranges
[i
].start
,
881 ranges
[i
].len
, bitmap
, buddy
, grp
);
883 ext4_mb_unload_buddy(&e4b
);
886 #define COUNT_FOR_ESTIMATE 100000
887 static void test_mb_mark_used_cost(struct kunit
*test
)
889 struct ext4_buddy e4b
;
890 struct super_block
*sb
= (struct super_block
*)test
->priv
;
891 struct ext4_free_extent ex
;
893 struct test_range ranges
[TEST_RANGE_COUNT
];
895 unsigned long start
, end
, all
= 0;
897 /* buddy cache assumes that each page contains at least one block */
898 if (sb
->s_blocksize
> PAGE_SIZE
)
899 kunit_skip(test
, "blocksize exceeds pagesize");
901 ret
= ext4_mb_load_buddy(sb
, TEST_GOAL_GROUP
, &e4b
);
902 KUNIT_ASSERT_EQ(test
, ret
, 0);
904 ex
.fe_group
= TEST_GOAL_GROUP
;
905 for (j
= 0; j
< COUNT_FOR_ESTIMATE
; j
++) {
906 mbt_generate_test_ranges(sb
, ranges
, TEST_RANGE_COUNT
);
908 for (i
= 0; i
< TEST_RANGE_COUNT
; i
++) {
909 if (ranges
[i
].len
== 0)
912 ex
.fe_start
= ranges
[i
].start
;
913 ex
.fe_len
= ranges
[i
].len
;
914 ext4_lock_group(sb
, TEST_GOAL_GROUP
);
915 mb_mark_used(&e4b
, &ex
);
916 ext4_unlock_group(sb
, TEST_GOAL_GROUP
);
919 all
+= (end
- start
);
921 for (i
= 0; i
< TEST_RANGE_COUNT
; i
++) {
922 if (ranges
[i
].len
== 0)
925 ext4_lock_group(sb
, TEST_GOAL_GROUP
);
926 mb_free_blocks(NULL
, &e4b
, ranges
[i
].start
,
928 ext4_unlock_group(sb
, TEST_GOAL_GROUP
);
932 kunit_info(test
, "costed jiffies %lu\n", all
);
933 ext4_mb_unload_buddy(&e4b
);
936 static const struct mbt_ext4_block_layout mbt_test_layouts
[] = {
938 .blocksize_bits
= 10,
940 .blocks_per_group
= 8192,
945 .blocksize_bits
= 12,
947 .blocks_per_group
= 8192,
952 .blocksize_bits
= 16,
954 .blocks_per_group
= 8192,
960 static void mbt_show_layout(const struct mbt_ext4_block_layout
*layout
,
963 snprintf(desc
, KUNIT_PARAM_DESC_SIZE
, "block_bits=%d cluster_bits=%d "
964 "blocks_per_group=%d group_count=%d desc_size=%d\n",
965 layout
->blocksize_bits
, layout
->cluster_bits
,
966 layout
->blocks_per_group
, layout
->group_count
,
969 KUNIT_ARRAY_PARAM(mbt_layouts
, mbt_test_layouts
, mbt_show_layout
);
971 static struct kunit_case mbt_test_cases
[] = {
972 KUNIT_CASE_PARAM(test_new_blocks_simple
, mbt_layouts_gen_params
),
973 KUNIT_CASE_PARAM(test_free_blocks_simple
, mbt_layouts_gen_params
),
974 KUNIT_CASE_PARAM(test_mb_generate_buddy
, mbt_layouts_gen_params
),
975 KUNIT_CASE_PARAM(test_mb_mark_used
, mbt_layouts_gen_params
),
976 KUNIT_CASE_PARAM(test_mb_free_blocks
, mbt_layouts_gen_params
),
977 KUNIT_CASE_PARAM(test_mark_diskspace_used
, mbt_layouts_gen_params
),
978 KUNIT_CASE_PARAM_ATTR(test_mb_mark_used_cost
, mbt_layouts_gen_params
,
979 { .speed
= KUNIT_SPEED_SLOW
}),
983 static struct kunit_suite mbt_test_suite
= {
984 .name
= "ext4_mballoc_test",
985 .init
= mbt_kunit_init
,
986 .exit
= mbt_kunit_exit
,
987 .test_cases
= mbt_test_cases
,
990 kunit_test_suites(&mbt_test_suite
);
992 MODULE_LICENSE("GPL");