1 // SPDX-License-Identifier: GPL-2.0+
3 * test_maple_tree.c: Test the maple tree API
4 * Copyright (c) 2018-2022 Oracle Corporation
5 * Author: Liam R. Howlett <Liam.Howlett@Oracle.com>
7 * Any tests that only require the interface of the tree.
10 #include <linux/maple_tree.h>
11 #include <linux/module.h>
12 #include <linux/rwsem.h>
14 #define MTREE_ALLOC_MAX 0x2000000000000Ul
15 #define CONFIG_MAPLE_SEARCH
16 #define MAPLE_32BIT (MAPLE_NODE_SLOTS > 31)
18 #ifndef CONFIG_DEBUG_MAPLE_TREE
19 #define mt_dump(mt, fmt) do {} while (0)
20 #define mt_validate(mt) do {} while (0)
21 #define mt_cache_shrink() do {} while (0)
22 #define mas_dump(mas) do {} while (0)
23 #define mas_wr_dump(mas) do {} while (0)
24 atomic_t maple_tree_tests_run
;
25 atomic_t maple_tree_tests_passed
;
28 #define MT_BUG_ON(__tree, __x) do { \
29 atomic_inc(&maple_tree_tests_run); \
31 pr_info("BUG at %s:%d (%u)\n", \
32 __func__, __LINE__, __x); \
33 pr_info("Pass: %u Run:%u\n", \
34 atomic_read(&maple_tree_tests_passed), \
35 atomic_read(&maple_tree_tests_run)); \
37 atomic_inc(&maple_tree_tests_passed); \
42 /* #define BENCH_SLOT_STORE */
43 /* #define BENCH_NODE_STORE */
44 /* #define BENCH_AWALK */
45 /* #define BENCH_WALK */
46 /* #define BENCH_LOAD */
47 /* #define BENCH_MT_FOR_EACH */
48 /* #define BENCH_FORK */
49 /* #define BENCH_MAS_FOR_EACH */
50 /* #define BENCH_MAS_PREV */
53 #define mt_set_non_kernel(x) do {} while (0)
54 #define mt_zero_nr_tallocated(x) do {} while (0)
56 #define cond_resched() do {} while (0)
59 #define mas_is_none(x) ((x)->status == ma_none)
60 #define mas_is_overflow(x) ((x)->status == ma_overflow)
61 #define mas_is_underflow(x) ((x)->status == ma_underflow)
63 static int __init
mtree_insert_index(struct maple_tree
*mt
,
64 unsigned long index
, gfp_t gfp
)
66 return mtree_insert(mt
, index
, xa_mk_value(index
& LONG_MAX
), gfp
);
69 static void __init
mtree_erase_index(struct maple_tree
*mt
, unsigned long index
)
71 MT_BUG_ON(mt
, mtree_erase(mt
, index
) != xa_mk_value(index
& LONG_MAX
));
72 MT_BUG_ON(mt
, mtree_load(mt
, index
) != NULL
);
75 static int __init
mtree_test_insert(struct maple_tree
*mt
, unsigned long index
,
78 return mtree_insert(mt
, index
, ptr
, GFP_KERNEL
);
81 static int __init
mtree_test_store_range(struct maple_tree
*mt
,
82 unsigned long start
, unsigned long end
, void *ptr
)
84 return mtree_store_range(mt
, start
, end
, ptr
, GFP_KERNEL
);
87 static int __init
mtree_test_store(struct maple_tree
*mt
, unsigned long start
,
90 return mtree_test_store_range(mt
, start
, start
, ptr
);
93 static int __init
mtree_test_insert_range(struct maple_tree
*mt
,
94 unsigned long start
, unsigned long end
, void *ptr
)
96 return mtree_insert_range(mt
, start
, end
, ptr
, GFP_KERNEL
);
99 static void __init
*mtree_test_load(struct maple_tree
*mt
, unsigned long index
)
101 return mtree_load(mt
, index
);
104 static void __init
*mtree_test_erase(struct maple_tree
*mt
, unsigned long index
)
106 return mtree_erase(mt
, index
);
109 #if defined(CONFIG_64BIT)
110 static noinline
void __init
check_mtree_alloc_range(struct maple_tree
*mt
,
111 unsigned long start
, unsigned long end
, unsigned long size
,
112 unsigned long expected
, int eret
, void *ptr
)
115 unsigned long result
= expected
+ 1;
118 ret
= mtree_alloc_range(mt
, &result
, ptr
, size
, start
, end
,
120 MT_BUG_ON(mt
, ret
!= eret
);
124 MT_BUG_ON(mt
, result
!= expected
);
127 static noinline
void __init
check_mtree_alloc_rrange(struct maple_tree
*mt
,
128 unsigned long start
, unsigned long end
, unsigned long size
,
129 unsigned long expected
, int eret
, void *ptr
)
132 unsigned long result
= expected
+ 1;
135 ret
= mtree_alloc_rrange(mt
, &result
, ptr
, size
, start
, end
,
137 MT_BUG_ON(mt
, ret
!= eret
);
141 MT_BUG_ON(mt
, result
!= expected
);
145 static noinline
void __init
check_load(struct maple_tree
*mt
,
146 unsigned long index
, void *ptr
)
148 void *ret
= mtree_test_load(mt
, index
);
151 pr_err("Load %lu returned %p expect %p\n", index
, ret
, ptr
);
152 MT_BUG_ON(mt
, ret
!= ptr
);
155 static noinline
void __init
check_store_range(struct maple_tree
*mt
,
156 unsigned long start
, unsigned long end
, void *ptr
, int expected
)
161 ret
= mtree_test_store_range(mt
, start
, end
, ptr
);
162 MT_BUG_ON(mt
, ret
!= expected
);
167 for (i
= start
; i
<= end
; i
++)
168 check_load(mt
, i
, ptr
);
171 static noinline
void __init
check_insert_range(struct maple_tree
*mt
,
172 unsigned long start
, unsigned long end
, void *ptr
, int expected
)
177 ret
= mtree_test_insert_range(mt
, start
, end
, ptr
);
178 MT_BUG_ON(mt
, ret
!= expected
);
183 for (i
= start
; i
<= end
; i
++)
184 check_load(mt
, i
, ptr
);
187 static noinline
void __init
check_insert(struct maple_tree
*mt
,
188 unsigned long index
, void *ptr
)
192 ret
= mtree_test_insert(mt
, index
, ptr
);
193 MT_BUG_ON(mt
, ret
!= 0);
196 static noinline
void __init
check_dup_insert(struct maple_tree
*mt
,
197 unsigned long index
, void *ptr
)
201 ret
= mtree_test_insert(mt
, index
, ptr
);
202 MT_BUG_ON(mt
, ret
!= -EEXIST
);
206 static noinline
void __init
check_index_load(struct maple_tree
*mt
,
209 return check_load(mt
, index
, xa_mk_value(index
& LONG_MAX
));
212 static inline __init
int not_empty(struct maple_node
*node
)
219 for (i
= 0; i
< ARRAY_SIZE(node
->slot
); i
++)
227 static noinline
void __init
check_rev_seq(struct maple_tree
*mt
,
228 unsigned long max
, bool verbose
)
230 unsigned long i
= max
, j
;
232 MT_BUG_ON(mt
, !mtree_empty(mt
));
234 mt_zero_nr_tallocated();
236 MT_BUG_ON(mt
, mtree_insert_index(mt
, i
, GFP_KERNEL
));
237 for (j
= i
; j
<= max
; j
++)
238 check_index_load(mt
, j
);
240 check_load(mt
, i
- 1, NULL
);
242 MT_BUG_ON(mt
, !mt_height(mt
));
244 MT_BUG_ON(mt
, !mt_height(mt
));
247 check_load(mt
, max
+ 1, NULL
);
252 mt_dump(mt
, mt_dump_dec
);
253 pr_info(" %s test of 0-%lu %luK in %d active (%d total)\n",
254 __func__
, max
, mt_get_alloc_size()/1024, mt_nr_allocated(),
260 static noinline
void __init
check_seq(struct maple_tree
*mt
, unsigned long max
,
265 MT_BUG_ON(mt
, !mtree_empty(mt
));
267 mt_zero_nr_tallocated();
268 for (i
= 0; i
<= max
; i
++) {
269 MT_BUG_ON(mt
, mtree_insert_index(mt
, i
, GFP_KERNEL
));
270 for (j
= 0; j
<= i
; j
++)
271 check_index_load(mt
, j
);
274 MT_BUG_ON(mt
, !mt_height(mt
));
275 check_load(mt
, i
+ 1, NULL
);
281 mt_dump(mt
, mt_dump_dec
);
282 pr_info(" seq test of 0-%lu %luK in %d active (%d total)\n",
283 max
, mt_get_alloc_size()/1024, mt_nr_allocated(),
289 static noinline
void __init
check_lb_not_empty(struct maple_tree
*mt
)
292 unsigned long huge
= 4000UL * 1000 * 1000;
297 check_insert(mt
, i
, (void *) i
);
298 for (j
= huge
; j
>= i
; j
/= 2) {
299 check_load(mt
, j
-1, NULL
);
300 check_load(mt
, j
, (void *) j
);
301 check_load(mt
, j
+1, NULL
);
308 static noinline
void __init
check_lower_bound_split(struct maple_tree
*mt
)
310 MT_BUG_ON(mt
, !mtree_empty(mt
));
311 check_lb_not_empty(mt
);
314 static noinline
void __init
check_upper_bound_split(struct maple_tree
*mt
)
319 MT_BUG_ON(mt
, !mtree_empty(mt
));
324 huge
= 4000UL * 1000 * 1000;
328 check_insert(mt
, i
, (void *) i
);
329 for (j
= i
; j
>= huge
; j
*= 2) {
330 check_load(mt
, j
-1, NULL
);
331 check_load(mt
, j
, (void *) j
);
332 check_load(mt
, j
+1, NULL
);
339 static noinline
void __init
check_mid_split(struct maple_tree
*mt
)
341 unsigned long huge
= 8000UL * 1000 * 1000;
343 check_insert(mt
, huge
, (void *) huge
);
344 check_insert(mt
, 0, xa_mk_value(0));
345 check_lb_not_empty(mt
);
348 static noinline
void __init
check_rev_find(struct maple_tree
*mt
)
350 int i
, nr_entries
= 200;
352 MA_STATE(mas
, mt
, 0, 0);
354 for (i
= 0; i
<= nr_entries
; i
++)
355 mtree_store_range(mt
, i
*10, i
*10 + 5,
356 xa_mk_value(i
), GFP_KERNEL
);
360 val
= mas_find_rev(&mas
, 1000);
361 MT_BUG_ON(mt
, val
!= xa_mk_value(100));
362 val
= mas_find_rev(&mas
, 1000);
363 MT_BUG_ON(mt
, val
!= NULL
);
366 val
= mas_find_rev(&mas
, 997);
367 MT_BUG_ON(mt
, val
!= NULL
);
370 val
= mas_find_rev(&mas
, 900);
371 MT_BUG_ON(mt
, val
!= xa_mk_value(100));
372 val
= mas_find_rev(&mas
, 900);
373 MT_BUG_ON(mt
, val
!= xa_mk_value(99));
376 val
= mas_find_rev(&mas
, 0);
377 MT_BUG_ON(mt
, val
!= xa_mk_value(2));
378 val
= mas_find_rev(&mas
, 0);
379 MT_BUG_ON(mt
, val
!= xa_mk_value(1));
380 val
= mas_find_rev(&mas
, 0);
381 MT_BUG_ON(mt
, val
!= xa_mk_value(0));
382 val
= mas_find_rev(&mas
, 0);
383 MT_BUG_ON(mt
, val
!= NULL
);
387 static noinline
void __init
check_find(struct maple_tree
*mt
)
389 unsigned long val
= 0;
393 unsigned long last
= 0, index
= 0;
394 void *entry
, *entry2
;
396 MA_STATE(mas
, mt
, 0, 0);
399 MT_BUG_ON(mt
, mtree_insert_index(mt
, val
++, GFP_KERNEL
));
401 #if defined(CONFIG_64BIT)
402 top
= 4398046511104UL;
413 for (int i
= 0; i
<= count
; i
++) {
415 MT_BUG_ON(mt
, mtree_insert_index(mt
, val
, GFP_KERNEL
));
417 MT_BUG_ON(mt
, mtree_insert(mt
, val
,
418 XA_ZERO_ENTRY
, GFP_KERNEL
));
426 while ((entry
= mas_find(&mas
, 268435456)) != NULL
) {
428 MT_BUG_ON(mt
, xa_mk_value(val
) != entry
);
430 MT_BUG_ON(mt
, entry
!= XA_ZERO_ENTRY
);
433 /* For zero check. */
442 mas_for_each(&mas
, entry
, ULONG_MAX
) {
444 MT_BUG_ON(mt
, xa_mk_value(val
) != entry
);
446 MT_BUG_ON(mt
, entry
!= XA_ZERO_ENTRY
);
448 /* For zero check. */
458 mas_for_each(&mas
, entry
, ULONG_MAX
) {
460 MT_BUG_ON(mt
, xa_mk_value(val
) != entry
);
462 MT_BUG_ON(mt
, entry
!= XA_ZERO_ENTRY
);
464 /* For zero check. */
475 max
= 300; /* A value big enough to include XA_ZERO_ENTRY at 64. */
476 mt_for_each(mt
, entry
, index
, max
) {
477 MT_BUG_ON(mt
, xa_mk_value(val
) != entry
);
479 if (val
== 64) /* Skip zero entry. */
481 /* For zero check. */
489 MT_BUG_ON(mt
, mtree_insert_index(mt
, ULONG_MAX
, GFP_KERNEL
));
490 mt_for_each(mt
, entry
, index
, ULONG_MAX
) {
492 MT_BUG_ON(mt
, entry
!= xa_mk_value(LONG_MAX
));
494 MT_BUG_ON(mt
, xa_mk_value(val
) != entry
);
496 /* Workaround for 32bit */
497 if ((val
<< 2) < val
)
502 if (val
== 64) /* Skip zero entry. */
504 /* For zero check. */
508 MT_BUG_ON(mt
, max
> 25);
510 mtree_erase_index(mt
, ULONG_MAX
);
514 entry
= mt_find(mt
, &index
, 512);
515 MT_BUG_ON(mt
, xa_mk_value(256) != entry
);
519 entry
= mt_find(mt
, &index
, 20);
520 MT_BUG_ON(mt
, entry
!= NULL
);
524 /* Insert ULONG_MAX */
525 MT_BUG_ON(mt
, mtree_insert_index(mt
, ULONG_MAX
, GFP_KERNEL
));
530 mas_for_each(&mas
, entry
, ULONG_MAX
) {
532 MT_BUG_ON(mt
, entry
!= XA_ZERO_ENTRY
);
534 MT_BUG_ON(mt
, entry
!= xa_mk_value(LONG_MAX
));
536 MT_BUG_ON(mt
, xa_mk_value(val
) != entry
);
538 /* Workaround for 32bit */
539 if ((val
<< 2) < val
)
544 /* For zero check. */
553 mas_set(&mas
, 1048576);
555 entry
= mas_find(&mas
, 1048576);
557 MT_BUG_ON(mas
.tree
, entry
== NULL
);
561 * 1. get the expected value, leveraging the existence of an end entry
562 * 2. delete end entry
563 * 3. find the last value but searching for ULONG_MAX and then using
566 /* First, get the expected result. */
569 mas
.index
= ULONG_MAX
; /* start at max.. */
570 entry
= mas_find(&mas
, ULONG_MAX
);
571 entry
= mas_prev(&mas
, 0);
575 /* Erase the last entry. */
577 mas
.index
= ULONG_MAX
;
578 mas
.last
= ULONG_MAX
;
581 /* Get the previous value from MAS_START */
583 entry2
= mas_prev(&mas
, 0);
586 MT_BUG_ON(mt
, entry
!= entry2
);
587 MT_BUG_ON(mt
, index
!= mas
.index
);
588 MT_BUG_ON(mt
, last
!= mas
.last
);
591 mas
.status
= ma_none
;
592 mas
.index
= ULONG_MAX
;
593 mas
.last
= ULONG_MAX
;
594 entry2
= mas_prev(&mas
, 0);
595 MT_BUG_ON(mt
, entry
!= entry2
);
598 MT_BUG_ON(mt
, mas_prev(&mas
, 0) != NULL
);
604 static noinline
void __init
check_find_2(struct maple_tree
*mt
)
609 MA_STATE(mas
, mt
, 0, 0);
611 mas_for_each(&mas
, entry
, ULONG_MAX
)
615 for (i
= 0; i
< 256; i
++) {
616 mtree_insert_index(mt
, i
, GFP_KERNEL
);
620 mas_for_each(&mas
, entry
, ULONG_MAX
) {
621 MT_BUG_ON(mt
, entry
!= xa_mk_value(j
));
625 MT_BUG_ON(mt
, j
!= i
+ 1);
628 for (i
= 0; i
< 256; i
++) {
629 mtree_erase_index(mt
, i
);
633 mas_for_each(&mas
, entry
, ULONG_MAX
) {
634 if (xa_is_zero(entry
))
637 MT_BUG_ON(mt
, entry
!= xa_mk_value(j
));
641 MT_BUG_ON(mt
, j
!= 256);
644 /*MT_BUG_ON(mt, !mtree_empty(mt)); */
648 #if defined(CONFIG_64BIT)
649 static noinline
void __init
check_alloc_rev_range(struct maple_tree
*mt
)
653 * cat /proc/self/maps | awk '{print $1}'|
654 * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
657 static const unsigned long range
[] = {
658 /* Inclusive , Exclusive. */
659 0x565234af2000, 0x565234af4000,
660 0x565234af4000, 0x565234af9000,
661 0x565234af9000, 0x565234afb000,
662 0x565234afc000, 0x565234afd000,
663 0x565234afd000, 0x565234afe000,
664 0x565235def000, 0x565235e10000,
665 0x7f36d4bfd000, 0x7f36d4ee2000,
666 0x7f36d4ee2000, 0x7f36d4f04000,
667 0x7f36d4f04000, 0x7f36d504c000,
668 0x7f36d504c000, 0x7f36d5098000,
669 0x7f36d5098000, 0x7f36d5099000,
670 0x7f36d5099000, 0x7f36d509d000,
671 0x7f36d509d000, 0x7f36d509f000,
672 0x7f36d509f000, 0x7f36d50a5000,
673 0x7f36d50b9000, 0x7f36d50db000,
674 0x7f36d50db000, 0x7f36d50dc000,
675 0x7f36d50dc000, 0x7f36d50fa000,
676 0x7f36d50fa000, 0x7f36d5102000,
677 0x7f36d5102000, 0x7f36d5103000,
678 0x7f36d5103000, 0x7f36d5104000,
679 0x7f36d5104000, 0x7f36d5105000,
680 0x7fff5876b000, 0x7fff5878d000,
681 0x7fff5878e000, 0x7fff58791000,
682 0x7fff58791000, 0x7fff58793000,
685 static const unsigned long holes
[] = {
687 * Note: start of hole is INCLUSIVE
688 * end of hole is EXCLUSIVE
689 * (opposite of the above table.)
690 * Start of hole, end of hole, size of hole (+1)
692 0x565234afb000, 0x565234afc000, 0x1000,
693 0x565234afe000, 0x565235def000, 0x12F1000,
694 0x565235e10000, 0x7f36d4bfd000, 0x28E49EDED000,
698 * req_range consists of 4 values.
702 * 4. number that should be returned.
705 static const unsigned long req_range
[] = {
706 0x565234af9000, /* Min */
707 0x7fff58791000, /* Max */
709 0x7fff5878d << 12, /* First rev hole of size 0x1000 */
710 0, /* Return value success. */
713 0x565234AF0 << 12, /* Max */
715 0x565234AEE << 12, /* max - 3. */
716 0, /* Return value success. */
721 562949953421311 << 12,/* First rev hole of size 0x1000 */
722 0, /* Return value success. */
725 0x7F36D5109 << 12, /* Max */
727 0x7F36D5106 << 12, /* First rev hole of size 0x4000 */
728 0, /* Return value success. */
739 18446744073709551615UL,
740 562915594369134UL << 12,
744 /* Single space test. */
752 int i
, range_count
= ARRAY_SIZE(range
);
753 int req_range_count
= ARRAY_SIZE(req_range
);
754 unsigned long min
= 0;
756 MA_STATE(mas
, mt
, 0, 0);
758 mtree_store_range(mt
, MTREE_ALLOC_MAX
, ULONG_MAX
, XA_ZERO_ENTRY
,
760 #define DEBUG_REV_RANGE 0
761 for (i
= 0; i
< range_count
; i
+= 2) {
762 /* Inclusive, Inclusive (with the -1) */
765 pr_debug("\t%s: Insert %lu-%lu\n", __func__
, range
[i
] >> 12,
766 (range
[i
+ 1] >> 12) - 1);
768 check_insert_range(mt
, range
[i
] >> 12, (range
[i
+ 1] >> 12) - 1,
769 xa_mk_value(range
[i
] >> 12), 0);
775 for (i
= 0; i
< ARRAY_SIZE(holes
); i
+= 3) {
777 pr_debug("Search from %lu-%lu for gap %lu should be at %lu\n",
778 min
, holes
[i
+1]>>12, holes
[i
+2]>>12,
781 MT_BUG_ON(mt
, mas_empty_area_rev(&mas
, min
,
785 pr_debug("Found %lu %lu\n", mas
.index
, mas
.last
);
786 pr_debug("gap %lu %lu\n", (holes
[i
] >> 12),
789 MT_BUG_ON(mt
, mas
.last
+ 1 != (holes
[i
+1] >> 12));
790 MT_BUG_ON(mt
, mas
.index
!= (holes
[i
+1] >> 12) - (holes
[i
+2] >> 12));
791 min
= holes
[i
+1] >> 12;
796 for (i
= 0; i
< req_range_count
; i
+= 5) {
798 pr_debug("\tReverse request %d between %lu-%lu size %lu, should get %lu\n",
799 i
, req_range
[i
] >> 12,
800 (req_range
[i
+ 1] >> 12),
801 req_range
[i
+2] >> 12,
802 req_range
[i
+3] >> 12);
804 check_mtree_alloc_rrange(mt
,
805 req_range
[i
] >> 12, /* start */
806 req_range
[i
+1] >> 12, /* end */
807 req_range
[i
+2] >> 12, /* size */
808 req_range
[i
+3] >> 12, /* expected address */
809 req_range
[i
+4], /* expected return */
810 xa_mk_value(req_range
[i
] >> 12)); /* pointer */
814 mt_set_non_kernel(1);
815 mtree_erase(mt
, 34148798727); /* create a deleted range. */
816 mtree_erase(mt
, 34148798725);
817 check_mtree_alloc_rrange(mt
, 0, 34359052173, 210253414,
823 static noinline
void __init
check_alloc_range(struct maple_tree
*mt
)
827 * cat /proc/self/maps|awk '{print $1}'|
828 * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
831 static const unsigned long range
[] = {
832 /* Inclusive , Exclusive. */
833 0x565234af2000, 0x565234af4000,
834 0x565234af4000, 0x565234af9000,
835 0x565234af9000, 0x565234afb000,
836 0x565234afc000, 0x565234afd000,
837 0x565234afd000, 0x565234afe000,
838 0x565235def000, 0x565235e10000,
839 0x7f36d4bfd000, 0x7f36d4ee2000,
840 0x7f36d4ee2000, 0x7f36d4f04000,
841 0x7f36d4f04000, 0x7f36d504c000,
842 0x7f36d504c000, 0x7f36d5098000,
843 0x7f36d5098000, 0x7f36d5099000,
844 0x7f36d5099000, 0x7f36d509d000,
845 0x7f36d509d000, 0x7f36d509f000,
846 0x7f36d509f000, 0x7f36d50a5000,
847 0x7f36d50b9000, 0x7f36d50db000,
848 0x7f36d50db000, 0x7f36d50dc000,
849 0x7f36d50dc000, 0x7f36d50fa000,
850 0x7f36d50fa000, 0x7f36d5102000,
851 0x7f36d5102000, 0x7f36d5103000,
852 0x7f36d5103000, 0x7f36d5104000,
853 0x7f36d5104000, 0x7f36d5105000,
854 0x7fff5876b000, 0x7fff5878d000,
855 0x7fff5878e000, 0x7fff58791000,
856 0x7fff58791000, 0x7fff58793000,
858 static const unsigned long holes
[] = {
859 /* Start of hole, end of hole, size of hole (+1) */
860 0x565234afb000, 0x565234afc000, 0x1000,
861 0x565234afe000, 0x565235def000, 0x12F1000,
862 0x565235e10000, 0x7f36d4bfd000, 0x28E49EDED000,
866 * req_range consists of 4 values.
870 * 4. number that should be returned.
873 static const unsigned long req_range
[] = {
874 0x565234af9000, /* Min */
875 0x7fff58791000, /* Max */
877 0x565234afb000, /* First hole in our data of size 1000. */
878 0, /* Return value success. */
881 0x7fff58791000, /* Max */
883 0x0, /* First hole in our data of size 2000. */
884 0, /* Return value success. */
887 34148797436 << 12, /* Min */
888 0x7fff587AF000, /* Max */
890 34148798629 << 12, /* Expected location */
891 0, /* Return value success. */
894 34148798623 << 12, /* Min */
895 34148798683 << 12, /* Max */
897 0, /* Expected location */
898 -EBUSY
, /* Return value failed. */
900 /* Test filling entire gap. */
901 34148798623 << 12, /* Min */
902 0x7fff587AF000, /* Max */
904 34148798632 << 12, /* Expected location */
905 0, /* Return value success. */
907 /* Test walking off the end of root. */
911 0, /* Expected location */
912 -EBUSY
, /* Return value failure. */
914 /* Test looking for too large a hole across entire range. */
917 4503599618982063UL << 12, /* Size */
918 34359052178 << 12, /* Expected location */
919 -EBUSY
, /* Return failure. */
921 /* Test a single entry */
922 34148798648 << 12, /* Min */
923 34148798648 << 12, /* Max */
924 4096, /* Size of 1 */
925 34148798648 << 12, /* Location is the same as min/max */
928 int i
, range_count
= ARRAY_SIZE(range
);
929 int req_range_count
= ARRAY_SIZE(req_range
);
930 unsigned long min
= 0x565234af2000;
931 MA_STATE(mas
, mt
, 0, 0);
933 mtree_store_range(mt
, MTREE_ALLOC_MAX
, ULONG_MAX
, XA_ZERO_ENTRY
,
935 for (i
= 0; i
< range_count
; i
+= 2) {
936 #define DEBUG_ALLOC_RANGE 0
937 #if DEBUG_ALLOC_RANGE
938 pr_debug("\tInsert %lu-%lu\n", range
[i
] >> 12,
939 (range
[i
+ 1] >> 12) - 1);
940 mt_dump(mt
, mt_dump_hex
);
942 check_insert_range(mt
, range
[i
] >> 12, (range
[i
+ 1] >> 12) - 1,
943 xa_mk_value(range
[i
] >> 12), 0);
950 for (i
= 0; i
< ARRAY_SIZE(holes
); i
+= 3) {
952 #if DEBUG_ALLOC_RANGE
953 pr_debug("\tGet empty %lu-%lu size %lu (%lx-%lx)\n", min
>> 12,
954 holes
[i
+1] >> 12, holes
[i
+2] >> 12,
957 MT_BUG_ON(mt
, mas_empty_area(&mas
, min
>> 12,
960 MT_BUG_ON(mt
, mas
.index
!= holes
[i
] >> 12);
965 for (i
= 0; i
< req_range_count
; i
+= 5) {
966 #if DEBUG_ALLOC_RANGE
967 pr_debug("\tTest %d: %lu-%lu size %lu expected %lu (%lu-%lu)\n",
968 i
/5, req_range
[i
] >> 12, req_range
[i
+ 1] >> 12,
969 req_range
[i
+ 2] >> 12, req_range
[i
+ 3] >> 12,
970 req_range
[i
], req_range
[i
+1]);
972 check_mtree_alloc_range(mt
,
973 req_range
[i
] >> 12, /* start */
974 req_range
[i
+1] >> 12, /* end */
975 req_range
[i
+2] >> 12, /* size */
976 req_range
[i
+3] >> 12, /* expected address */
977 req_range
[i
+4], /* expected return */
978 xa_mk_value(req_range
[i
] >> 12)); /* pointer */
980 #if DEBUG_ALLOC_RANGE
981 mt_dump(mt
, mt_dump_hex
);
989 static noinline
void __init
check_ranges(struct maple_tree
*mt
)
992 static const unsigned long r
[] = {
995 17, 22, /* Overlaps previous range. */
1002 MT_BUG_ON(mt
, !mtree_empty(mt
));
1003 check_insert_range(mt
, r
[0], r
[1], xa_mk_value(r
[0]), 0);
1004 check_insert_range(mt
, r
[2], r
[3], xa_mk_value(r
[2]), 0);
1005 check_insert_range(mt
, r
[4], r
[5], xa_mk_value(r
[4]), -EEXIST
);
1006 MT_BUG_ON(mt
, !mt_height(mt
));
1008 check_store_range(mt
, r
[4], r
[5], xa_mk_value(r
[4]), 0);
1009 check_store_range(mt
, r
[6], r
[7], xa_mk_value(r
[6]), 0);
1010 check_store_range(mt
, r
[8], r
[9], xa_mk_value(r
[8]), 0);
1011 MT_BUG_ON(mt
, !mt_height(mt
));
1013 MT_BUG_ON(mt
, mt_height(mt
));
1015 check_seq(mt
, 50, false);
1016 mt_set_non_kernel(4);
1017 check_store_range(mt
, 5, 47, xa_mk_value(47), 0);
1018 MT_BUG_ON(mt
, !mt_height(mt
));
1021 /* Create tree of 1-100 */
1022 check_seq(mt
, 100, false);
1024 mt_set_non_kernel(10);
1025 check_store_range(mt
, r
[10], r
[11], xa_mk_value(r
[10]), 0);
1026 MT_BUG_ON(mt
, !mt_height(mt
));
1029 /* Create tree of 1-200 */
1030 check_seq(mt
, 200, false);
1032 check_store_range(mt
, r
[10], r
[11], xa_mk_value(r
[10]), 0);
1033 MT_BUG_ON(mt
, !mt_height(mt
));
1036 check_seq(mt
, 30, false);
1037 check_store_range(mt
, 6, 18, xa_mk_value(6), 0);
1038 MT_BUG_ON(mt
, !mt_height(mt
));
1041 /* Overwrite across multiple levels. */
1042 /* Create tree of 1-400 */
1043 check_seq(mt
, 400, false);
1044 mt_set_non_kernel(50);
1046 check_store_range(mt
, r
[12], r
[13], xa_mk_value(r
[12]), 0);
1047 mt_set_non_kernel(50);
1048 mtree_test_erase(mt
, 140);
1049 mtree_test_erase(mt
, 141);
1050 mtree_test_erase(mt
, 142);
1051 mtree_test_erase(mt
, 143);
1052 mtree_test_erase(mt
, 130);
1053 mtree_test_erase(mt
, 131);
1054 mtree_test_erase(mt
, 132);
1055 mtree_test_erase(mt
, 133);
1056 mtree_test_erase(mt
, 134);
1057 mtree_test_erase(mt
, 135);
1058 check_load(mt
, r
[12], xa_mk_value(r
[12]));
1059 check_load(mt
, r
[13], xa_mk_value(r
[12]));
1060 check_load(mt
, r
[13] - 1, xa_mk_value(r
[12]));
1061 check_load(mt
, r
[13] + 1, xa_mk_value(r
[13] + 1));
1062 check_load(mt
, 135, NULL
);
1063 check_load(mt
, 140, NULL
);
1064 mt_set_non_kernel(0);
1065 MT_BUG_ON(mt
, !mt_height(mt
));
1070 /* Overwrite multiple levels at the end of the tree (slot 7) */
1071 mt_set_non_kernel(50);
1072 check_seq(mt
, 400, false);
1073 check_store_range(mt
, 353, 361, xa_mk_value(353), 0);
1074 check_store_range(mt
, 347, 352, xa_mk_value(347), 0);
1076 check_load(mt
, 346, xa_mk_value(346));
1077 for (i
= 347; i
<= 352; i
++)
1078 check_load(mt
, i
, xa_mk_value(347));
1079 for (i
= 353; i
<= 361; i
++)
1080 check_load(mt
, i
, xa_mk_value(353));
1081 check_load(mt
, 362, xa_mk_value(362));
1082 mt_set_non_kernel(0);
1083 MT_BUG_ON(mt
, !mt_height(mt
));
1086 mt_set_non_kernel(50);
1087 check_seq(mt
, 400, false);
1088 check_store_range(mt
, 352, 364, NULL
, 0);
1089 check_store_range(mt
, 351, 363, xa_mk_value(352), 0);
1090 check_load(mt
, 350, xa_mk_value(350));
1091 check_load(mt
, 351, xa_mk_value(352));
1092 for (i
= 352; i
<= 363; i
++)
1093 check_load(mt
, i
, xa_mk_value(352));
1094 check_load(mt
, 364, NULL
);
1095 check_load(mt
, 365, xa_mk_value(365));
1096 mt_set_non_kernel(0);
1097 MT_BUG_ON(mt
, !mt_height(mt
));
1100 mt_set_non_kernel(5);
1101 check_seq(mt
, 400, false);
1102 check_store_range(mt
, 352, 364, NULL
, 0);
1103 check_store_range(mt
, 351, 364, xa_mk_value(352), 0);
1104 check_load(mt
, 350, xa_mk_value(350));
1105 check_load(mt
, 351, xa_mk_value(352));
1106 for (i
= 352; i
<= 364; i
++)
1107 check_load(mt
, i
, xa_mk_value(352));
1108 check_load(mt
, 365, xa_mk_value(365));
1109 mt_set_non_kernel(0);
1110 MT_BUG_ON(mt
, !mt_height(mt
));
1114 mt_set_non_kernel(50);
1115 check_seq(mt
, 400, false);
1116 check_store_range(mt
, 362, 367, xa_mk_value(362), 0);
1117 check_store_range(mt
, 353, 361, xa_mk_value(353), 0);
1118 mt_set_non_kernel(0);
1120 MT_BUG_ON(mt
, !mt_height(mt
));
1123 * Interesting cases:
1124 * 1. Overwrite the end of a node and end in the first entry of the next
1126 * 2. Split a single range
1127 * 3. Overwrite the start of a range
1128 * 4. Overwrite the end of a range
1129 * 5. Overwrite the entire range
1130 * 6. Overwrite a range that causes multiple parent nodes to be
1132 * 7. Overwrite a range that causes multiple parent nodes and part of
1133 * root to be combined
1134 * 8. Overwrite the whole tree
1135 * 9. Try to overwrite the zero entry of an alloc tree.
1136 * 10. Write a range larger than a nodes current pivot
1139 mt_set_non_kernel(50);
1140 for (i
= 0; i
<= 500; i
++) {
1143 check_store_range(mt
, val
, val2
, xa_mk_value(val
), 0);
1145 check_store_range(mt
, 2400, 2400, xa_mk_value(2400), 0);
1146 check_store_range(mt
, 2411, 2411, xa_mk_value(2411), 0);
1147 check_store_range(mt
, 2412, 2412, xa_mk_value(2412), 0);
1148 check_store_range(mt
, 2396, 2400, xa_mk_value(4052020), 0);
1149 check_store_range(mt
, 2402, 2402, xa_mk_value(2402), 0);
1151 mt_set_non_kernel(0);
1153 mt_set_non_kernel(50);
1154 for (i
= 0; i
<= 500; i
++) {
1157 check_store_range(mt
, val
, val2
, xa_mk_value(val
), 0);
1159 check_store_range(mt
, 2422, 2422, xa_mk_value(2422), 0);
1160 check_store_range(mt
, 2424, 2424, xa_mk_value(2424), 0);
1161 check_store_range(mt
, 2425, 2425, xa_mk_value(2), 0);
1162 check_store_range(mt
, 2460, 2470, NULL
, 0);
1163 check_store_range(mt
, 2435, 2460, xa_mk_value(2435), 0);
1164 check_store_range(mt
, 2461, 2470, xa_mk_value(2461), 0);
1165 mt_set_non_kernel(0);
1166 MT_BUG_ON(mt
, !mt_height(mt
));
1169 /* Check in-place modifications */
1170 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
1171 /* Append to the start of last range */
1172 mt_set_non_kernel(50);
1173 for (i
= 0; i
<= 500; i
++) {
1176 check_store_range(mt
, val
, val2
, xa_mk_value(val
), 0);
1179 /* Append to the last range without touching any boundaries */
1180 for (i
= 0; i
< 10; i
++) {
1183 check_store_range(mt
, val
, val2
, xa_mk_value(val
), 0);
1186 /* Append to the end of last range */
1188 for (i
= 0; i
< 10; i
++) {
1190 MT_BUG_ON(mt
, mtree_test_store_range(mt
, val
, ULONG_MAX
,
1191 xa_mk_value(val
)) != 0);
1194 /* Overwriting the range and over a part of the next range */
1195 for (i
= 10; i
< 30; i
+= 2) {
1198 check_store_range(mt
, val
, val2
, xa_mk_value(val
), 0);
1201 /* Overwriting a part of the range and over the next range */
1202 for (i
= 50; i
< 70; i
+= 2) {
1205 check_store_range(mt
, val
, val2
, xa_mk_value(val
), 0);
1209 * Expand the range, only partially overwriting the previous and
1212 for (i
= 100; i
< 130; i
+= 3) {
1215 check_store_range(mt
, val
, val2
, xa_mk_value(val
), 0);
1219 * Expand the range, only partially overwriting the previous and
1220 * next ranges, in RCU mode
1223 for (i
= 150; i
< 180; i
+= 3) {
1226 check_store_range(mt
, val
, val2
, xa_mk_value(val
), 0);
1229 MT_BUG_ON(mt
, !mt_height(mt
));
1231 mt_set_non_kernel(0);
1234 /* Test rebalance gaps */
1235 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
1236 mt_set_non_kernel(50);
1237 for (i
= 0; i
<= 50; i
++) {
1240 check_store_range(mt
, val
, val2
, xa_mk_value(val
), 0);
1242 check_store_range(mt
, 161, 161, xa_mk_value(161), 0);
1243 check_store_range(mt
, 162, 162, xa_mk_value(162), 0);
1244 check_store_range(mt
, 163, 163, xa_mk_value(163), 0);
1245 check_store_range(mt
, 240, 249, NULL
, 0);
1246 mtree_erase(mt
, 200);
1247 mtree_erase(mt
, 210);
1248 mtree_erase(mt
, 220);
1249 mtree_erase(mt
, 230);
1250 mt_set_non_kernel(0);
1251 MT_BUG_ON(mt
, !mt_height(mt
));
1254 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
1255 for (i
= 0; i
<= 500; i
++) {
1258 check_store_range(mt
, val
, val2
, xa_mk_value(val
), 0);
1260 check_store_range(mt
, 4600, 4959, xa_mk_value(1), 0);
1262 MT_BUG_ON(mt
, !mt_height(mt
));
1265 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
1266 for (i
= 0; i
<= 500; i
++) {
1269 check_store_range(mt
, val
, val2
, xa_mk_value(val
), 0);
1271 check_store_range(mt
, 4811, 4811, xa_mk_value(4811), 0);
1272 check_store_range(mt
, 4812, 4812, xa_mk_value(4812), 0);
1273 check_store_range(mt
, 4861, 4861, xa_mk_value(4861), 0);
1274 check_store_range(mt
, 4862, 4862, xa_mk_value(4862), 0);
1275 check_store_range(mt
, 4842, 4849, NULL
, 0);
1277 MT_BUG_ON(mt
, !mt_height(mt
));
1280 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
1281 for (i
= 0; i
<= 1300; i
++) {
1284 check_store_range(mt
, val
, val2
, xa_mk_value(val
), 0);
1285 MT_BUG_ON(mt
, mt_height(mt
) >= 4);
1287 /* Cause a 3 child split all the way up the tree. */
1288 for (i
= 5; i
< 215; i
+= 10)
1289 check_store_range(mt
, 11450 + i
, 11450 + i
+ 1, NULL
, 0);
1290 for (i
= 5; i
< 65; i
+= 10)
1291 check_store_range(mt
, 11770 + i
, 11770 + i
+ 1, NULL
, 0);
1293 MT_BUG_ON(mt
, mt_height(mt
) >= 4);
1294 for (i
= 5; i
< 45; i
+= 10)
1295 check_store_range(mt
, 11700 + i
, 11700 + i
+ 1, NULL
, 0);
1297 MT_BUG_ON(mt
, mt_height(mt
) < 4);
1301 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
1302 for (i
= 0; i
<= 1200; i
++) {
1305 check_store_range(mt
, val
, val2
, xa_mk_value(val
), 0);
1306 MT_BUG_ON(mt
, mt_height(mt
) >= 4);
1308 /* Fill parents and leaves before split. */
1309 for (i
= 5; i
< 455; i
+= 10)
1310 check_store_range(mt
, 7800 + i
, 7800 + i
+ 1, NULL
, 0);
1312 for (i
= 1; i
< 16; i
++)
1313 check_store_range(mt
, 8185 + i
, 8185 + i
+ 1,
1314 xa_mk_value(8185+i
), 0);
1315 MT_BUG_ON(mt
, mt_height(mt
) >= 4);
1316 /* triple split across multiple levels. */
1317 check_store_range(mt
, 8184, 8184, xa_mk_value(8184), 0);
1319 MT_BUG_ON(mt
, mt_height(mt
) != 4);
1322 static noinline
void __init
check_next_entry(struct maple_tree
*mt
)
1325 unsigned long limit
= 30, i
= 0;
1326 MA_STATE(mas
, mt
, i
, i
);
1328 MT_BUG_ON(mt
, !mtree_empty(mt
));
1330 check_seq(mt
, limit
, false);
1333 /* Check the first one and get ma_state in the correct state. */
1334 MT_BUG_ON(mt
, mas_walk(&mas
) != xa_mk_value(i
++));
1335 for ( ; i
<= limit
+ 1; i
++) {
1336 entry
= mas_next(&mas
, limit
);
1338 MT_BUG_ON(mt
, entry
!= NULL
);
1340 MT_BUG_ON(mt
, xa_mk_value(i
) != entry
);
1346 static noinline
void __init
check_prev_entry(struct maple_tree
*mt
)
1348 unsigned long index
= 16;
1352 MA_STATE(mas
, mt
, index
, index
);
1354 MT_BUG_ON(mt
, !mtree_empty(mt
));
1355 check_seq(mt
, 30, false);
1358 value
= mas_find(&mas
, ULONG_MAX
);
1359 MT_BUG_ON(mt
, value
!= xa_mk_value(index
));
1360 value
= mas_prev(&mas
, 0);
1361 MT_BUG_ON(mt
, value
!= xa_mk_value(index
- 1));
1365 /* Check limits on prev */
1366 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
1368 for (i
= 0; i
<= index
; i
++) {
1369 mas_set_range(&mas
, i
*10, i
*10+5);
1370 mas_store_gfp(&mas
, xa_mk_value(i
), GFP_KERNEL
);
1374 value
= mas_walk(&mas
);
1375 MT_BUG_ON(mt
, value
!= xa_mk_value(2));
1377 value
= mas_prev(&mas
, 19);
1378 MT_BUG_ON(mt
, value
!= NULL
);
1381 value
= mas_walk(&mas
);
1382 MT_BUG_ON(mt
, value
!= xa_mk_value(8));
1384 value
= mas_prev(&mas
, 76);
1385 MT_BUG_ON(mt
, value
!= NULL
);
1390 static noinline
void __init
check_root_expand(struct maple_tree
*mt
)
1392 MA_STATE(mas
, mt
, 0, 0);
1398 ptr
= mas_walk(&mas
);
1399 MT_BUG_ON(mt
, mas
.index
!= 0);
1400 MT_BUG_ON(mt
, ptr
!= NULL
);
1401 MT_BUG_ON(mt
, mas
.index
!= 0);
1402 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
1404 ptr
= &check_prev_entry
;
1406 mas_store_gfp(&mas
, ptr
, GFP_KERNEL
);
1409 ptr
= mas_walk(&mas
);
1410 MT_BUG_ON(mt
, ptr
!= NULL
);
1413 ptr
= mas_walk(&mas
);
1414 MT_BUG_ON(mt
, ptr
!= &check_prev_entry
);
1417 ptr
= mas_walk(&mas
);
1418 MT_BUG_ON(mt
, ptr
!= NULL
);
1423 mt_init_flags(mt
, 0);
1427 ptr
= &check_prev_entry
;
1428 mas_store_gfp(&mas
, ptr
, GFP_KERNEL
);
1431 ptr
= mas_walk(&mas
);
1432 MT_BUG_ON(mt
, ptr
!= NULL
);
1433 MT_BUG_ON(mt
, mas
.index
!= 1);
1434 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
1436 mas_set_range(&mas
, 0, 100);
1437 ptr
= mas_walk(&mas
);
1438 MT_BUG_ON(mt
, ptr
!= &check_prev_entry
);
1439 MT_BUG_ON(mt
, mas
.last
!= 0);
1443 mt_init_flags(mt
, 0);
1447 ptr
= (void *)((unsigned long) check_prev_entry
| 1UL);
1448 mas_store_gfp(&mas
, ptr
, GFP_KERNEL
);
1449 ptr
= mas_next(&mas
, ULONG_MAX
);
1450 MT_BUG_ON(mt
, ptr
!= NULL
);
1451 MT_BUG_ON(mt
, (mas
.index
!= 1) && (mas
.last
!= ULONG_MAX
));
1454 ptr
= mas_prev(&mas
, 0);
1455 MT_BUG_ON(mt
, (mas
.index
!= 0) && (mas
.last
!= 0));
1456 MT_BUG_ON(mt
, ptr
!= (void *)((unsigned long) check_prev_entry
| 1UL));
1462 mt_init_flags(mt
, 0);
1465 ptr
= (void *)((unsigned long) check_prev_entry
| 2UL);
1466 mas_store_gfp(&mas
, ptr
, GFP_KERNEL
);
1467 ptr
= mas_next(&mas
, ULONG_MAX
);
1468 MT_BUG_ON(mt
, ptr
!= NULL
);
1469 MT_BUG_ON(mt
, (mas
.index
!= ULONG_MAX
) && (mas
.last
!= ULONG_MAX
));
1472 ptr
= mas_prev(&mas
, 0);
1473 MT_BUG_ON(mt
, (mas
.index
!= 0) && (mas
.last
!= 0));
1474 MT_BUG_ON(mt
, ptr
!= (void *)((unsigned long) check_prev_entry
| 2UL));
1480 static noinline
void __init
check_gap_combining(struct maple_tree
*mt
)
1482 struct maple_enode
*mn1
, *mn2
;
1484 unsigned long singletons
= 100;
1485 static const unsigned long *seq100
;
1486 static const unsigned long seq100_64
[] = {
1500 static const unsigned long seq100_32
[] = {
1514 static const unsigned long seq2000
[] = {
1518 static const unsigned long seq400
[] = {
1520 256, 260, 266, 270, 275, 280, 290, 398,
1524 unsigned long index
;
1526 MA_STATE(mas
, mt
, 0, 0);
1534 mas_set(&mas
, index
);
1535 MT_BUG_ON(mt
, !mtree_empty(mt
));
1536 check_seq(mt
, singletons
, false); /* create 100 singletons. */
1538 mt_set_non_kernel(1);
1539 mtree_test_erase(mt
, seq100
[2]);
1540 check_load(mt
, seq100
[2], NULL
);
1541 mtree_test_erase(mt
, seq100
[1]);
1542 check_load(mt
, seq100
[1], NULL
);
1545 entry
= mas_find(&mas
, ULONG_MAX
);
1546 MT_BUG_ON(mt
, entry
!= xa_mk_value(index
));
1548 mas_next(&mas
, ULONG_MAX
);
1549 entry
= mas_next(&mas
, ULONG_MAX
);
1550 MT_BUG_ON(mt
, entry
!= xa_mk_value(index
+ 4));
1552 MT_BUG_ON(mt
, mn1
== mn2
); /* test the test. */
1555 * At this point, there is a gap of 2 at index + 1 between seq100[3] and
1556 * seq100[4]. Search for the gap.
1558 mt_set_non_kernel(1);
1560 MT_BUG_ON(mt
, mas_empty_area_rev(&mas
, seq100
[3], seq100
[4],
1562 MT_BUG_ON(mt
, mas
.index
!= index
+ 1);
1565 mtree_test_erase(mt
, seq100
[6]);
1566 check_load(mt
, seq100
[6], NULL
);
1567 mtree_test_erase(mt
, seq100
[7]);
1568 check_load(mt
, seq100
[7], NULL
);
1569 mtree_test_erase(mt
, seq100
[8]);
1576 entry
= mas_find(&mas
, ULONG_MAX
);
1577 MT_BUG_ON(mt
, entry
!= xa_mk_value(index
));
1579 entry
= mas_next(&mas
, ULONG_MAX
);
1580 MT_BUG_ON(mt
, entry
!= xa_mk_value(index
+ 4));
1581 mas_next(&mas
, ULONG_MAX
); /* go to the next entry. */
1583 MT_BUG_ON(mt
, mn1
== mn2
); /* test the next entry is in the next node. */
1586 * At this point, there is a gap of 3 at seq100[6]. Find it by
1587 * searching 20 - 50 for size 3.
1590 MT_BUG_ON(mt
, mas_empty_area_rev(&mas
, seq100
[10], seq100
[11],
1592 MT_BUG_ON(mt
, mas
.index
!= seq100
[6]);
1595 mt_set_non_kernel(1);
1596 mtree_store(mt
, seq100
[13], NULL
, GFP_KERNEL
);
1597 check_load(mt
, seq100
[13], NULL
);
1598 check_load(mt
, seq100
[14], xa_mk_value(seq100
[14]));
1599 mtree_store(mt
, seq100
[14], NULL
, GFP_KERNEL
);
1600 check_load(mt
, seq100
[13], NULL
);
1601 check_load(mt
, seq100
[14], NULL
);
1605 MT_BUG_ON(mt
, mas_empty_area_rev(&mas
, seq100
[16], seq100
[15],
1607 MT_BUG_ON(mt
, mas
.index
!= seq100
[13]);
1612 * *DEPRECATED: no retries anymore* Test retry entry in the start of a
1615 mt_set_non_kernel(2);
1616 mtree_test_store_range(mt
, seq100
[18], seq100
[14], NULL
);
1617 mtree_test_erase(mt
, seq100
[15]);
1620 MT_BUG_ON(mt
, mas_empty_area_rev(&mas
, seq100
[16], seq100
[19],
1623 MT_BUG_ON(mt
, mas
.index
!= seq100
[18]);
1627 /* seq 2000 tests are for multi-level tree gaps */
1628 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
1629 check_seq(mt
, 2000, false);
1630 mt_set_non_kernel(1);
1631 mtree_test_erase(mt
, seq2000
[0]);
1632 mtree_test_erase(mt
, seq2000
[1]);
1634 mt_set_non_kernel(2);
1637 MT_BUG_ON(mt
, mas_empty_area_rev(&mas
, seq2000
[2], seq2000
[3],
1639 MT_BUG_ON(mt
, mas
.index
!= seq2000
[1]);
1644 /* seq 400 tests rebalancing over two levels. */
1645 mt_set_non_kernel(99);
1646 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
1647 check_seq(mt
, 400, false);
1648 mtree_test_store_range(mt
, seq400
[0], seq400
[1], NULL
);
1649 mt_set_non_kernel(0);
1652 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
1653 check_seq(mt
, 400, false);
1654 mt_set_non_kernel(50);
1655 mtree_test_store_range(mt
, seq400
[2], seq400
[9],
1656 xa_mk_value(seq400
[2]));
1657 mtree_test_store_range(mt
, seq400
[3], seq400
[9],
1658 xa_mk_value(seq400
[3]));
1659 mtree_test_store_range(mt
, seq400
[4], seq400
[9],
1660 xa_mk_value(seq400
[4]));
1661 mtree_test_store_range(mt
, seq400
[5], seq400
[9],
1662 xa_mk_value(seq400
[5]));
1663 mtree_test_store_range(mt
, seq400
[0], seq400
[9],
1664 xa_mk_value(seq400
[0]));
1665 mtree_test_store_range(mt
, seq400
[6], seq400
[9],
1666 xa_mk_value(seq400
[6]));
1667 mtree_test_store_range(mt
, seq400
[7], seq400
[9],
1668 xa_mk_value(seq400
[7]));
1669 mtree_test_store_range(mt
, seq400
[8], seq400
[9],
1670 xa_mk_value(seq400
[8]));
1671 mtree_test_store_range(mt
, seq400
[10], seq400
[11],
1672 xa_mk_value(seq400
[10]));
1674 mt_set_non_kernel(0);
1677 static noinline
void __init
check_node_overwrite(struct maple_tree
*mt
)
1681 for (i
= 0; i
< max
; i
++)
1682 mtree_test_store_range(mt
, i
*100, i
*100 + 50, xa_mk_value(i
*100));
1684 mtree_test_store_range(mt
, 319951, 367950, NULL
);
1685 /*mt_dump(mt, mt_dump_dec); */
1689 #if defined(BENCH_SLOT_STORE)
1690 static noinline
void __init
bench_slot_store(struct maple_tree
*mt
)
1692 int i
, brk
= 105, max
= 1040, brk_start
= 100, count
= 20000000;
1694 for (i
= 0; i
< max
; i
+= 10)
1695 mtree_store_range(mt
, i
, i
+ 5, xa_mk_value(i
), GFP_KERNEL
);
1697 for (i
= 0; i
< count
; i
++) {
1698 mtree_store_range(mt
, brk
, brk
, NULL
, GFP_KERNEL
);
1699 mtree_store_range(mt
, brk_start
, brk
, xa_mk_value(brk
),
1705 #if defined(BENCH_NODE_STORE)
1706 static noinline
void __init
bench_node_store(struct maple_tree
*mt
)
1708 int i
, overwrite
= 76, max
= 240, count
= 20000000;
1710 for (i
= 0; i
< max
; i
+= 10)
1711 mtree_store_range(mt
, i
, i
+ 5, xa_mk_value(i
), GFP_KERNEL
);
1713 for (i
= 0; i
< count
; i
++) {
1714 mtree_store_range(mt
, overwrite
, overwrite
+ 15,
1715 xa_mk_value(overwrite
), GFP_KERNEL
);
1718 if (overwrite
>= 135)
1724 #if defined(BENCH_AWALK)
1725 static noinline
void __init
bench_awalk(struct maple_tree
*mt
)
1727 int i
, max
= 2500, count
= 50000000;
1728 MA_STATE(mas
, mt
, 1470, 1470);
1730 for (i
= 0; i
< max
; i
+= 10)
1731 mtree_store_range(mt
, i
, i
+ 5, xa_mk_value(i
), GFP_KERNEL
);
1733 mtree_store_range(mt
, 1470, 1475, NULL
, GFP_KERNEL
);
1735 for (i
= 0; i
< count
; i
++) {
1736 mas_empty_area_rev(&mas
, 0, 2000, 10);
1741 #if defined(BENCH_WALK)
1742 static noinline
void __init
bench_walk(struct maple_tree
*mt
)
1744 int i
, max
= 2500, count
= 550000000;
1745 MA_STATE(mas
, mt
, 1470, 1470);
1747 for (i
= 0; i
< max
; i
+= 10)
1748 mtree_store_range(mt
, i
, i
+ 5, xa_mk_value(i
), GFP_KERNEL
);
1750 for (i
= 0; i
< count
; i
++) {
1758 #if defined(BENCH_LOAD)
1759 static noinline
void __init
bench_load(struct maple_tree
*mt
)
1761 int i
, max
= 2500, count
= 550000000;
1763 for (i
= 0; i
< max
; i
+= 10)
1764 mtree_store_range(mt
, i
, i
+ 5, xa_mk_value(i
), GFP_KERNEL
);
1766 for (i
= 0; i
< count
; i
++)
1767 mtree_load(mt
, 1470);
1771 #if defined(BENCH_MT_FOR_EACH)
1772 static noinline
void __init
bench_mt_for_each(struct maple_tree
*mt
)
1774 int i
, count
= 1000000;
1775 unsigned long max
= 2500, index
= 0;
1778 for (i
= 0; i
< max
; i
+= 5)
1779 mtree_store_range(mt
, i
, i
+ 4, xa_mk_value(i
), GFP_KERNEL
);
1781 for (i
= 0; i
< count
; i
++) {
1782 unsigned long j
= 0;
1784 mt_for_each(mt
, entry
, index
, max
) {
1785 MT_BUG_ON(mt
, entry
!= xa_mk_value(j
));
1795 #if defined(BENCH_MAS_FOR_EACH)
1796 static noinline
void __init
bench_mas_for_each(struct maple_tree
*mt
)
1798 int i
, count
= 1000000;
1799 unsigned long max
= 2500;
1801 MA_STATE(mas
, mt
, 0, 0);
1803 for (i
= 0; i
< max
; i
+= 5) {
1808 mtree_store_range(mt
, i
, i
+ gap
, xa_mk_value(i
), GFP_KERNEL
);
1812 for (i
= 0; i
< count
; i
++) {
1813 unsigned long j
= 0;
1815 mas_for_each(&mas
, entry
, max
) {
1816 MT_BUG_ON(mt
, entry
!= xa_mk_value(j
));
1825 #if defined(BENCH_MAS_PREV)
1826 static noinline
void __init
bench_mas_prev(struct maple_tree
*mt
)
1828 int i
, count
= 1000000;
1829 unsigned long max
= 2500;
1831 MA_STATE(mas
, mt
, 0, 0);
1833 for (i
= 0; i
< max
; i
+= 5) {
1838 mtree_store_range(mt
, i
, i
+ gap
, xa_mk_value(i
), GFP_KERNEL
);
1842 for (i
= 0; i
< count
; i
++) {
1843 unsigned long j
= 2495;
1845 mas_set(&mas
, ULONG_MAX
);
1846 while ((entry
= mas_prev(&mas
, 0)) != NULL
) {
1847 MT_BUG_ON(mt
, entry
!= xa_mk_value(j
));
1855 /* check_forking - simulate the kernel forking sequence with the tree. */
1856 static noinline
void __init
check_forking(void)
1858 struct maple_tree mt
, newmt
;
1859 int i
, nr_entries
= 134, ret
;
1861 MA_STATE(mas
, &mt
, 0, 0);
1862 MA_STATE(newmas
, &newmt
, 0, 0);
1863 struct rw_semaphore mt_lock
, newmt_lock
;
1865 init_rwsem(&mt_lock
);
1866 init_rwsem(&newmt_lock
);
1868 mt_init_flags(&mt
, MT_FLAGS_ALLOC_RANGE
| MT_FLAGS_LOCK_EXTERN
);
1869 mt_set_external_lock(&mt
, &mt_lock
);
1871 mt_init_flags(&newmt
, MT_FLAGS_ALLOC_RANGE
| MT_FLAGS_LOCK_EXTERN
);
1872 mt_set_external_lock(&newmt
, &newmt_lock
);
1874 down_write(&mt_lock
);
1875 for (i
= 0; i
<= nr_entries
; i
++) {
1876 mas_set_range(&mas
, i
*10, i
*10 + 5);
1877 mas_store_gfp(&mas
, xa_mk_value(i
), GFP_KERNEL
);
1880 down_write_nested(&newmt_lock
, SINGLE_DEPTH_NESTING
);
1881 ret
= __mt_dup(&mt
, &newmt
, GFP_KERNEL
);
1887 mas_set(&newmas
, 0);
1888 mas_for_each(&newmas
, val
, ULONG_MAX
)
1889 mas_store(&newmas
, val
);
1891 mas_destroy(&newmas
);
1893 mt_validate(&newmt
);
1894 __mt_destroy(&newmt
);
1896 up_write(&newmt_lock
);
1900 static noinline
void __init
check_iteration(struct maple_tree
*mt
)
1902 int i
, nr_entries
= 125;
1904 MA_STATE(mas
, mt
, 0, 0);
1906 for (i
= 0; i
<= nr_entries
; i
++)
1907 mtree_store_range(mt
, i
* 10, i
* 10 + 9,
1908 xa_mk_value(i
), GFP_KERNEL
);
1910 mt_set_non_kernel(99999);
1914 mas_for_each(&mas
, val
, 925) {
1915 MT_BUG_ON(mt
, mas
.index
!= i
* 10);
1916 MT_BUG_ON(mt
, mas
.last
!= i
* 10 + 9);
1917 /* Overwrite end of entry 92 */
1921 mas_store(&mas
, val
);
1925 /* Ensure mas_find() gets the next value */
1926 val
= mas_find(&mas
, ULONG_MAX
);
1927 MT_BUG_ON(mt
, val
!= xa_mk_value(i
));
1931 mas_for_each(&mas
, val
, 785) {
1932 MT_BUG_ON(mt
, mas
.index
!= i
* 10);
1933 MT_BUG_ON(mt
, mas
.last
!= i
* 10 + 9);
1934 /* Overwrite start of entry 78 */
1938 mas_store(&mas
, val
);
1943 val
= mas_find(&mas
, ULONG_MAX
);
1944 MT_BUG_ON(mt
, val
!= xa_mk_value(i
));
1948 mas_for_each(&mas
, val
, 765) {
1949 MT_BUG_ON(mt
, mas
.index
!= i
* 10);
1950 MT_BUG_ON(mt
, mas
.last
!= i
* 10 + 9);
1951 /* Overwrite end of entry 76 and advance to the end */
1955 mas_store(&mas
, val
);
1959 /* Make sure the next find returns the one after 765, 766-769 */
1960 val
= mas_find(&mas
, ULONG_MAX
);
1961 MT_BUG_ON(mt
, val
!= xa_mk_value(76));
1964 mt_set_non_kernel(0);
1967 static noinline
void __init
check_mas_store_gfp(struct maple_tree
*mt
)
1970 struct maple_tree newmt
;
1971 int i
, nr_entries
= 135;
1973 MA_STATE(mas
, mt
, 0, 0);
1974 MA_STATE(newmas
, mt
, 0, 0);
1976 for (i
= 0; i
<= nr_entries
; i
++)
1977 mtree_store_range(mt
, i
*10, i
*10 + 5,
1978 xa_mk_value(i
), GFP_KERNEL
);
1980 mt_set_non_kernel(99999);
1981 mt_init_flags(&newmt
, MT_FLAGS_ALLOC_RANGE
);
1982 newmas
.tree
= &newmt
;
1987 mas_for_each(&mas
, val
, ULONG_MAX
) {
1988 newmas
.index
= mas
.index
;
1989 newmas
.last
= mas
.last
;
1990 mas_store_gfp(&newmas
, val
, GFP_KERNEL
);
1992 mas_unlock(&newmas
);
1994 mt_validate(&newmt
);
1995 mt_set_non_kernel(0);
1996 mtree_destroy(&newmt
);
1999 #if defined(BENCH_FORK)
2000 static noinline
void __init
bench_forking(void)
2002 struct maple_tree mt
, newmt
;
2003 int i
, nr_entries
= 134, nr_fork
= 80000, ret
;
2005 MA_STATE(mas
, &mt
, 0, 0);
2006 MA_STATE(newmas
, &newmt
, 0, 0);
2007 struct rw_semaphore mt_lock
, newmt_lock
;
2009 init_rwsem(&mt_lock
);
2010 init_rwsem(&newmt_lock
);
2012 mt_init_flags(&mt
, MT_FLAGS_ALLOC_RANGE
| MT_FLAGS_LOCK_EXTERN
);
2013 mt_set_external_lock(&mt
, &mt_lock
);
2015 down_write(&mt_lock
);
2016 for (i
= 0; i
<= nr_entries
; i
++) {
2017 mas_set_range(&mas
, i
*10, i
*10 + 5);
2018 mas_store_gfp(&mas
, xa_mk_value(i
), GFP_KERNEL
);
2021 for (i
= 0; i
< nr_fork
; i
++) {
2022 mt_init_flags(&newmt
,
2023 MT_FLAGS_ALLOC_RANGE
| MT_FLAGS_LOCK_EXTERN
);
2024 mt_set_external_lock(&newmt
, &newmt_lock
);
2026 down_write_nested(&newmt_lock
, SINGLE_DEPTH_NESTING
);
2027 ret
= __mt_dup(&mt
, &newmt
, GFP_KERNEL
);
2033 mas_set(&newmas
, 0);
2034 mas_for_each(&newmas
, val
, ULONG_MAX
)
2035 mas_store(&newmas
, val
);
2037 mas_destroy(&newmas
);
2038 mt_validate(&newmt
);
2039 __mt_destroy(&newmt
);
2040 up_write(&newmt_lock
);
2048 static noinline
void __init
next_prev_test(struct maple_tree
*mt
)
2052 MA_STATE(mas
, mt
, 0, 0);
2053 struct maple_enode
*mn
;
2054 static const unsigned long *level2
;
2055 static const unsigned long level2_64
[] = { 707, 1000, 710, 715, 720,
2057 static const unsigned long level2_32
[] = { 1747, 2000, 1750, 1755,
2059 unsigned long last_index
;
2064 last_index
= 0x138e;
2071 for (i
= 0; i
<= nr_entries
; i
++)
2072 mtree_store_range(mt
, i
*10, i
*10 + 5,
2073 xa_mk_value(i
), GFP_KERNEL
);
2076 for (i
= 0; i
<= nr_entries
/ 2; i
++) {
2077 mas_next(&mas
, 1000);
2078 if (mas_is_none(&mas
))
2085 mas_for_each(&mas
, val
, 1000) {
2092 mas_for_each(&mas
, val
, 1000) {
2098 * 680 - 685 = 0x61a00001930c
2100 * 690 - 695 = 0x61a00001930c
2101 * Check simple next/prev
2104 val
= mas_walk(&mas
);
2105 MT_BUG_ON(mt
, val
!= NULL
);
2107 val
= mas_next(&mas
, 1000);
2108 MT_BUG_ON(mt
, val
!= xa_mk_value(690 / 10));
2109 MT_BUG_ON(mt
, mas
.index
!= 690);
2110 MT_BUG_ON(mt
, mas
.last
!= 695);
2112 val
= mas_prev(&mas
, 0);
2113 MT_BUG_ON(mt
, val
!= xa_mk_value(680 / 10));
2114 MT_BUG_ON(mt
, mas
.index
!= 680);
2115 MT_BUG_ON(mt
, mas
.last
!= 685);
2117 val
= mas_next(&mas
, 1000);
2118 MT_BUG_ON(mt
, val
!= xa_mk_value(690 / 10));
2119 MT_BUG_ON(mt
, mas
.index
!= 690);
2120 MT_BUG_ON(mt
, mas
.last
!= 695);
2122 val
= mas_next(&mas
, 1000);
2123 MT_BUG_ON(mt
, val
!= xa_mk_value(700 / 10));
2124 MT_BUG_ON(mt
, mas
.index
!= 700);
2125 MT_BUG_ON(mt
, mas
.last
!= 705);
2127 /* Check across node boundaries of the tree */
2129 val
= mas_walk(&mas
);
2130 MT_BUG_ON(mt
, val
!= xa_mk_value(70 / 10));
2131 MT_BUG_ON(mt
, mas
.index
!= 70);
2132 MT_BUG_ON(mt
, mas
.last
!= 75);
2134 val
= mas_next(&mas
, 1000);
2135 MT_BUG_ON(mt
, val
!= xa_mk_value(80 / 10));
2136 MT_BUG_ON(mt
, mas
.index
!= 80);
2137 MT_BUG_ON(mt
, mas
.last
!= 85);
2139 val
= mas_prev(&mas
, 70);
2140 MT_BUG_ON(mt
, val
!= xa_mk_value(70 / 10));
2141 MT_BUG_ON(mt
, mas
.index
!= 70);
2142 MT_BUG_ON(mt
, mas
.last
!= 75);
2144 /* Check across two levels of the tree */
2146 mas_set(&mas
, level2
[0]);
2147 val
= mas_walk(&mas
);
2148 MT_BUG_ON(mt
, val
!= NULL
);
2149 val
= mas_next(&mas
, level2
[1]);
2150 MT_BUG_ON(mt
, val
!= xa_mk_value(level2
[2] / 10));
2151 MT_BUG_ON(mt
, mas
.index
!= level2
[2]);
2152 MT_BUG_ON(mt
, mas
.last
!= level2
[3]);
2155 val
= mas_next(&mas
, level2
[1]);
2156 MT_BUG_ON(mt
, val
!= xa_mk_value(level2
[4] / 10));
2157 MT_BUG_ON(mt
, mas
.index
!= level2
[4]);
2158 MT_BUG_ON(mt
, mas
.last
!= level2
[5]);
2159 MT_BUG_ON(mt
, mn
== mas
.node
);
2161 val
= mas_prev(&mas
, 0);
2162 MT_BUG_ON(mt
, val
!= xa_mk_value(level2
[2] / 10));
2163 MT_BUG_ON(mt
, mas
.index
!= level2
[2]);
2164 MT_BUG_ON(mt
, mas
.last
!= level2
[3]);
2166 /* Check running off the end and back on */
2167 mas_set(&mas
, nr_entries
* 10);
2168 val
= mas_walk(&mas
);
2169 MT_BUG_ON(mt
, val
!= xa_mk_value(nr_entries
));
2170 MT_BUG_ON(mt
, mas
.index
!= (nr_entries
* 10));
2171 MT_BUG_ON(mt
, mas
.last
!= (nr_entries
* 10 + 5));
2173 val
= mas_next(&mas
, ULONG_MAX
);
2174 MT_BUG_ON(mt
, val
!= NULL
);
2175 MT_BUG_ON(mt
, mas
.index
!= last_index
);
2176 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
2178 val
= mas_prev(&mas
, 0);
2179 MT_BUG_ON(mt
, val
!= xa_mk_value(nr_entries
));
2180 MT_BUG_ON(mt
, mas
.index
!= (nr_entries
* 10));
2181 MT_BUG_ON(mt
, mas
.last
!= (nr_entries
* 10 + 5));
2183 /* Check running off the start and back on */
2186 val
= mas_walk(&mas
);
2187 MT_BUG_ON(mt
, val
!= xa_mk_value(1));
2188 MT_BUG_ON(mt
, mas
.index
!= 10);
2189 MT_BUG_ON(mt
, mas
.last
!= 15);
2191 val
= mas_prev(&mas
, 0);
2192 MT_BUG_ON(mt
, val
!= xa_mk_value(0));
2193 MT_BUG_ON(mt
, mas
.index
!= 0);
2194 MT_BUG_ON(mt
, mas
.last
!= 5);
2196 val
= mas_prev(&mas
, 0);
2197 MT_BUG_ON(mt
, val
!= NULL
);
2198 MT_BUG_ON(mt
, mas
.index
!= 0);
2199 MT_BUG_ON(mt
, mas
.last
!= 5);
2200 MT_BUG_ON(mt
, !mas_is_underflow(&mas
));
2204 mas_store(&mas
, NULL
);
2209 val
= mas_prev(&mas
, 0);
2210 MT_BUG_ON(mt
, val
!= NULL
);
2211 MT_BUG_ON(mt
, mas
.index
!= 0);
2212 MT_BUG_ON(mt
, mas
.last
!= 9);
2218 mtree_store_range(mt
, 0, 0, xa_mk_value(0), GFP_KERNEL
);
2219 mtree_store_range(mt
, 5, 5, xa_mk_value(5), GFP_KERNEL
);
2222 val
= mas_prev(&mas
, 4);
2223 MT_BUG_ON(mt
, val
!= NULL
);
2229 /* Test spanning writes that require balancing right sibling or right cousin */
2230 static noinline
void __init
check_spanning_relatives(struct maple_tree
*mt
)
2233 unsigned long i
, nr_entries
= 1000;
2235 for (i
= 0; i
<= nr_entries
; i
++)
2236 mtree_store_range(mt
, i
*10, i
*10 + 5,
2237 xa_mk_value(i
), GFP_KERNEL
);
2240 mtree_store_range(mt
, 9365, 9955, NULL
, GFP_KERNEL
);
2243 static noinline
void __init
check_fuzzer(struct maple_tree
*mt
)
2246 * 1. Causes a spanning rebalance of a single root node.
2247 * Fixed by setting the correct limit in mast_cp_to_nodes() when the
2248 * entire right side is consumed.
2250 mtree_test_insert(mt
, 88, (void *)0xb1);
2251 mtree_test_insert(mt
, 84, (void *)0xa9);
2252 mtree_test_insert(mt
, 2, (void *)0x5);
2253 mtree_test_insert(mt
, 4, (void *)0x9);
2254 mtree_test_insert(mt
, 14, (void *)0x1d);
2255 mtree_test_insert(mt
, 7, (void *)0xf);
2256 mtree_test_insert(mt
, 12, (void *)0x19);
2257 mtree_test_insert(mt
, 18, (void *)0x25);
2258 mtree_test_store_range(mt
, 8, 18, (void *)0x11);
2263 * 2. Cause a spanning rebalance of two nodes in root.
2264 * Fixed by setting mast->r->max correctly.
2266 mt_init_flags(mt
, 0);
2267 mtree_test_store(mt
, 87, (void *)0xaf);
2268 mtree_test_store(mt
, 0, (void *)0x1);
2269 mtree_test_load(mt
, 4);
2270 mtree_test_insert(mt
, 4, (void *)0x9);
2271 mtree_test_store(mt
, 8, (void *)0x11);
2272 mtree_test_store(mt
, 44, (void *)0x59);
2273 mtree_test_store(mt
, 68, (void *)0x89);
2274 mtree_test_store(mt
, 2, (void *)0x5);
2275 mtree_test_insert(mt
, 43, (void *)0x57);
2276 mtree_test_insert(mt
, 24, (void *)0x31);
2277 mtree_test_insert(mt
, 844, (void *)0x699);
2278 mtree_test_store(mt
, 84, (void *)0xa9);
2279 mtree_test_store(mt
, 4, (void *)0x9);
2280 mtree_test_erase(mt
, 4);
2281 mtree_test_load(mt
, 5);
2282 mtree_test_erase(mt
, 0);
2286 * 3. Cause a node overflow on copy
2287 * Fixed by using the correct check for node size in mas_wr_modify()
2288 * Also discovered issue with metadata setting.
2290 mt_init_flags(mt
, 0);
2291 mtree_test_store_range(mt
, 0, ULONG_MAX
, (void *)0x1);
2292 mtree_test_store(mt
, 4, (void *)0x9);
2293 mtree_test_erase(mt
, 5);
2294 mtree_test_erase(mt
, 0);
2295 mtree_test_erase(mt
, 4);
2296 mtree_test_store(mt
, 5, (void *)0xb);
2297 mtree_test_erase(mt
, 5);
2298 mtree_test_store(mt
, 5, (void *)0xb);
2299 mtree_test_erase(mt
, 5);
2300 mtree_test_erase(mt
, 4);
2301 mtree_test_store(mt
, 4, (void *)0x9);
2302 mtree_test_store(mt
, 444, (void *)0x379);
2303 mtree_test_store(mt
, 0, (void *)0x1);
2304 mtree_test_load(mt
, 0);
2305 mtree_test_store(mt
, 5, (void *)0xb);
2306 mtree_test_erase(mt
, 0);
2310 * 4. spanning store failure due to writing incorrect pivot value at
2312 * Fixed by setting mast->r->max correctly in mast_cp_to_nodes()
2315 mt_init_flags(mt
, 0);
2316 mtree_test_insert(mt
, 261, (void *)0x20b);
2317 mtree_test_store(mt
, 516, (void *)0x409);
2318 mtree_test_store(mt
, 6, (void *)0xd);
2319 mtree_test_insert(mt
, 5, (void *)0xb);
2320 mtree_test_insert(mt
, 1256, (void *)0x9d1);
2321 mtree_test_store(mt
, 4, (void *)0x9);
2322 mtree_test_erase(mt
, 1);
2323 mtree_test_store(mt
, 56, (void *)0x71);
2324 mtree_test_insert(mt
, 1, (void *)0x3);
2325 mtree_test_store(mt
, 24, (void *)0x31);
2326 mtree_test_erase(mt
, 1);
2327 mtree_test_insert(mt
, 2263, (void *)0x11af);
2328 mtree_test_insert(mt
, 446, (void *)0x37d);
2329 mtree_test_store_range(mt
, 6, 45, (void *)0xd);
2330 mtree_test_store_range(mt
, 3, 446, (void *)0x7);
2334 * 5. mas_wr_extend_null() may overflow slots.
2335 * Fix by checking against wr_mas->node_end.
2337 mt_init_flags(mt
, 0);
2338 mtree_test_store(mt
, 48, (void *)0x61);
2339 mtree_test_store(mt
, 3, (void *)0x7);
2340 mtree_test_load(mt
, 0);
2341 mtree_test_store(mt
, 88, (void *)0xb1);
2342 mtree_test_store(mt
, 81, (void *)0xa3);
2343 mtree_test_insert(mt
, 0, (void *)0x1);
2344 mtree_test_insert(mt
, 8, (void *)0x11);
2345 mtree_test_insert(mt
, 4, (void *)0x9);
2346 mtree_test_insert(mt
, 2480, (void *)0x1361);
2347 mtree_test_insert(mt
, ULONG_MAX
,
2348 (void *)0xffffffffffffffff);
2349 mtree_test_erase(mt
, ULONG_MAX
);
2353 * 6. When reusing a node with an implied pivot and the node is
2354 * shrinking, old data would be left in the implied slot
2355 * Fixed by checking the last pivot for the mas->max and clear
2356 * accordingly. This only affected the left-most node as that node is
2357 * the only one allowed to end in NULL.
2359 mt_init_flags(mt
, 0);
2360 mtree_test_erase(mt
, 3);
2361 mtree_test_insert(mt
, 22, (void *)0x2d);
2362 mtree_test_insert(mt
, 15, (void *)0x1f);
2363 mtree_test_load(mt
, 2);
2364 mtree_test_insert(mt
, 1, (void *)0x3);
2365 mtree_test_insert(mt
, 1, (void *)0x3);
2366 mtree_test_insert(mt
, 5, (void *)0xb);
2367 mtree_test_erase(mt
, 1);
2368 mtree_test_insert(mt
, 1, (void *)0x3);
2369 mtree_test_insert(mt
, 4, (void *)0x9);
2370 mtree_test_insert(mt
, 1, (void *)0x3);
2371 mtree_test_erase(mt
, 1);
2372 mtree_test_insert(mt
, 2, (void *)0x5);
2373 mtree_test_insert(mt
, 1, (void *)0x3);
2374 mtree_test_erase(mt
, 3);
2375 mtree_test_insert(mt
, 22, (void *)0x2d);
2376 mtree_test_insert(mt
, 15, (void *)0x1f);
2377 mtree_test_insert(mt
, 2, (void *)0x5);
2378 mtree_test_insert(mt
, 1, (void *)0x3);
2379 mtree_test_insert(mt
, 8, (void *)0x11);
2380 mtree_test_load(mt
, 2);
2381 mtree_test_insert(mt
, 1, (void *)0x3);
2382 mtree_test_insert(mt
, 1, (void *)0x3);
2383 mtree_test_store(mt
, 1, (void *)0x3);
2384 mtree_test_insert(mt
, 5, (void *)0xb);
2385 mtree_test_erase(mt
, 1);
2386 mtree_test_insert(mt
, 1, (void *)0x3);
2387 mtree_test_insert(mt
, 4, (void *)0x9);
2388 mtree_test_insert(mt
, 1, (void *)0x3);
2389 mtree_test_erase(mt
, 1);
2390 mtree_test_insert(mt
, 2, (void *)0x5);
2391 mtree_test_insert(mt
, 1, (void *)0x3);
2392 mtree_test_erase(mt
, 3);
2393 mtree_test_insert(mt
, 22, (void *)0x2d);
2394 mtree_test_insert(mt
, 15, (void *)0x1f);
2395 mtree_test_insert(mt
, 2, (void *)0x5);
2396 mtree_test_insert(mt
, 1, (void *)0x3);
2397 mtree_test_insert(mt
, 8, (void *)0x11);
2398 mtree_test_insert(mt
, 12, (void *)0x19);
2399 mtree_test_erase(mt
, 1);
2400 mtree_test_store_range(mt
, 4, 62, (void *)0x9);
2401 mtree_test_erase(mt
, 62);
2402 mtree_test_store_range(mt
, 1, 0, (void *)0x3);
2403 mtree_test_insert(mt
, 11, (void *)0x17);
2404 mtree_test_insert(mt
, 3, (void *)0x7);
2405 mtree_test_insert(mt
, 3, (void *)0x7);
2406 mtree_test_store(mt
, 62, (void *)0x7d);
2407 mtree_test_erase(mt
, 62);
2408 mtree_test_store_range(mt
, 1, 15, (void *)0x3);
2409 mtree_test_erase(mt
, 1);
2410 mtree_test_insert(mt
, 22, (void *)0x2d);
2411 mtree_test_insert(mt
, 12, (void *)0x19);
2412 mtree_test_erase(mt
, 1);
2413 mtree_test_insert(mt
, 3, (void *)0x7);
2414 mtree_test_store(mt
, 62, (void *)0x7d);
2415 mtree_test_erase(mt
, 62);
2416 mtree_test_insert(mt
, 122, (void *)0xf5);
2417 mtree_test_store(mt
, 3, (void *)0x7);
2418 mtree_test_insert(mt
, 0, (void *)0x1);
2419 mtree_test_store_range(mt
, 0, 1, (void *)0x1);
2420 mtree_test_insert(mt
, 85, (void *)0xab);
2421 mtree_test_insert(mt
, 72, (void *)0x91);
2422 mtree_test_insert(mt
, 81, (void *)0xa3);
2423 mtree_test_insert(mt
, 726, (void *)0x5ad);
2424 mtree_test_insert(mt
, 0, (void *)0x1);
2425 mtree_test_insert(mt
, 1, (void *)0x3);
2426 mtree_test_store(mt
, 51, (void *)0x67);
2427 mtree_test_insert(mt
, 611, (void *)0x4c7);
2428 mtree_test_insert(mt
, 485, (void *)0x3cb);
2429 mtree_test_insert(mt
, 1, (void *)0x3);
2430 mtree_test_erase(mt
, 1);
2431 mtree_test_insert(mt
, 0, (void *)0x1);
2432 mtree_test_insert(mt
, 1, (void *)0x3);
2433 mtree_test_insert_range(mt
, 26, 1, (void *)0x35);
2434 mtree_test_load(mt
, 1);
2435 mtree_test_store_range(mt
, 1, 22, (void *)0x3);
2436 mtree_test_insert(mt
, 1, (void *)0x3);
2437 mtree_test_erase(mt
, 1);
2438 mtree_test_load(mt
, 53);
2439 mtree_test_load(mt
, 1);
2440 mtree_test_store_range(mt
, 1, 1, (void *)0x3);
2441 mtree_test_insert(mt
, 222, (void *)0x1bd);
2442 mtree_test_insert(mt
, 485, (void *)0x3cb);
2443 mtree_test_insert(mt
, 1, (void *)0x3);
2444 mtree_test_erase(mt
, 1);
2445 mtree_test_load(mt
, 0);
2446 mtree_test_insert(mt
, 21, (void *)0x2b);
2447 mtree_test_insert(mt
, 3, (void *)0x7);
2448 mtree_test_store(mt
, 621, (void *)0x4db);
2449 mtree_test_insert(mt
, 0, (void *)0x1);
2450 mtree_test_erase(mt
, 5);
2451 mtree_test_insert(mt
, 1, (void *)0x3);
2452 mtree_test_store(mt
, 62, (void *)0x7d);
2453 mtree_test_erase(mt
, 62);
2454 mtree_test_store_range(mt
, 1, 0, (void *)0x3);
2455 mtree_test_insert(mt
, 22, (void *)0x2d);
2456 mtree_test_insert(mt
, 12, (void *)0x19);
2457 mtree_test_erase(mt
, 1);
2458 mtree_test_insert(mt
, 1, (void *)0x3);
2459 mtree_test_store_range(mt
, 4, 62, (void *)0x9);
2460 mtree_test_erase(mt
, 62);
2461 mtree_test_erase(mt
, 1);
2462 mtree_test_load(mt
, 1);
2463 mtree_test_store_range(mt
, 1, 22, (void *)0x3);
2464 mtree_test_insert(mt
, 1, (void *)0x3);
2465 mtree_test_erase(mt
, 1);
2466 mtree_test_load(mt
, 53);
2467 mtree_test_load(mt
, 1);
2468 mtree_test_store_range(mt
, 1, 1, (void *)0x3);
2469 mtree_test_insert(mt
, 222, (void *)0x1bd);
2470 mtree_test_insert(mt
, 485, (void *)0x3cb);
2471 mtree_test_insert(mt
, 1, (void *)0x3);
2472 mtree_test_erase(mt
, 1);
2473 mtree_test_insert(mt
, 1, (void *)0x3);
2474 mtree_test_load(mt
, 0);
2475 mtree_test_load(mt
, 0);
2479 * 7. Previous fix was incomplete, fix mas_resuse_node() clearing of old
2480 * data by overwriting it first - that way metadata is of no concern.
2482 mt_init_flags(mt
, 0);
2483 mtree_test_load(mt
, 1);
2484 mtree_test_insert(mt
, 102, (void *)0xcd);
2485 mtree_test_erase(mt
, 2);
2486 mtree_test_erase(mt
, 0);
2487 mtree_test_load(mt
, 0);
2488 mtree_test_insert(mt
, 4, (void *)0x9);
2489 mtree_test_insert(mt
, 2, (void *)0x5);
2490 mtree_test_insert(mt
, 110, (void *)0xdd);
2491 mtree_test_insert(mt
, 1, (void *)0x3);
2492 mtree_test_insert_range(mt
, 5, 0, (void *)0xb);
2493 mtree_test_erase(mt
, 2);
2494 mtree_test_store(mt
, 0, (void *)0x1);
2495 mtree_test_store(mt
, 112, (void *)0xe1);
2496 mtree_test_insert(mt
, 21, (void *)0x2b);
2497 mtree_test_store(mt
, 1, (void *)0x3);
2498 mtree_test_insert_range(mt
, 110, 2, (void *)0xdd);
2499 mtree_test_store(mt
, 2, (void *)0x5);
2500 mtree_test_load(mt
, 22);
2501 mtree_test_erase(mt
, 2);
2502 mtree_test_store(mt
, 210, (void *)0x1a5);
2503 mtree_test_store_range(mt
, 0, 2, (void *)0x1);
2504 mtree_test_store(mt
, 2, (void *)0x5);
2505 mtree_test_erase(mt
, 2);
2506 mtree_test_erase(mt
, 22);
2507 mtree_test_erase(mt
, 1);
2508 mtree_test_erase(mt
, 2);
2509 mtree_test_store(mt
, 0, (void *)0x1);
2510 mtree_test_load(mt
, 112);
2511 mtree_test_insert(mt
, 2, (void *)0x5);
2512 mtree_test_erase(mt
, 2);
2513 mtree_test_store(mt
, 1, (void *)0x3);
2514 mtree_test_insert_range(mt
, 1, 2, (void *)0x3);
2515 mtree_test_erase(mt
, 0);
2516 mtree_test_erase(mt
, 2);
2517 mtree_test_store(mt
, 2, (void *)0x5);
2518 mtree_test_erase(mt
, 0);
2519 mtree_test_erase(mt
, 2);
2520 mtree_test_store(mt
, 0, (void *)0x1);
2521 mtree_test_store(mt
, 0, (void *)0x1);
2522 mtree_test_erase(mt
, 2);
2523 mtree_test_store(mt
, 2, (void *)0x5);
2524 mtree_test_erase(mt
, 2);
2525 mtree_test_insert(mt
, 2, (void *)0x5);
2526 mtree_test_insert_range(mt
, 1, 2, (void *)0x3);
2527 mtree_test_erase(mt
, 0);
2528 mtree_test_erase(mt
, 2);
2529 mtree_test_store(mt
, 0, (void *)0x1);
2530 mtree_test_load(mt
, 112);
2531 mtree_test_store_range(mt
, 110, 12, (void *)0xdd);
2532 mtree_test_store(mt
, 2, (void *)0x5);
2533 mtree_test_load(mt
, 110);
2534 mtree_test_insert_range(mt
, 4, 71, (void *)0x9);
2535 mtree_test_load(mt
, 2);
2536 mtree_test_store(mt
, 2, (void *)0x5);
2537 mtree_test_insert_range(mt
, 11, 22, (void *)0x17);
2538 mtree_test_erase(mt
, 12);
2539 mtree_test_store(mt
, 2, (void *)0x5);
2540 mtree_test_load(mt
, 22);
2545 * 8. When rebalancing or spanning_rebalance(), the max of the new node
2546 * may be set incorrectly to the final pivot and not the right max.
2547 * Fix by setting the left max to orig right max if the entire node is
2550 mt_init_flags(mt
, 0);
2551 mtree_test_store(mt
, 6, (void *)0xd);
2552 mtree_test_store(mt
, 67, (void *)0x87);
2553 mtree_test_insert(mt
, 15, (void *)0x1f);
2554 mtree_test_insert(mt
, 6716, (void *)0x3479);
2555 mtree_test_store(mt
, 61, (void *)0x7b);
2556 mtree_test_insert(mt
, 13, (void *)0x1b);
2557 mtree_test_store(mt
, 8, (void *)0x11);
2558 mtree_test_insert(mt
, 1, (void *)0x3);
2559 mtree_test_load(mt
, 0);
2560 mtree_test_erase(mt
, 67167);
2561 mtree_test_insert_range(mt
, 6, 7167, (void *)0xd);
2562 mtree_test_insert(mt
, 6, (void *)0xd);
2563 mtree_test_erase(mt
, 67);
2564 mtree_test_insert(mt
, 1, (void *)0x3);
2565 mtree_test_erase(mt
, 667167);
2566 mtree_test_insert(mt
, 6, (void *)0xd);
2567 mtree_test_store(mt
, 67, (void *)0x87);
2568 mtree_test_insert(mt
, 5, (void *)0xb);
2569 mtree_test_erase(mt
, 1);
2570 mtree_test_insert(mt
, 6, (void *)0xd);
2571 mtree_test_erase(mt
, 67);
2572 mtree_test_insert(mt
, 15, (void *)0x1f);
2573 mtree_test_insert(mt
, 67167, (void *)0x20cbf);
2574 mtree_test_insert(mt
, 1, (void *)0x3);
2575 mtree_test_load(mt
, 7);
2576 mtree_test_insert(mt
, 16, (void *)0x21);
2577 mtree_test_insert(mt
, 36, (void *)0x49);
2578 mtree_test_store(mt
, 67, (void *)0x87);
2579 mtree_test_store(mt
, 6, (void *)0xd);
2580 mtree_test_insert(mt
, 367, (void *)0x2df);
2581 mtree_test_insert(mt
, 115, (void *)0xe7);
2582 mtree_test_store(mt
, 0, (void *)0x1);
2583 mtree_test_store_range(mt
, 1, 3, (void *)0x3);
2584 mtree_test_store(mt
, 1, (void *)0x3);
2585 mtree_test_erase(mt
, 67167);
2586 mtree_test_insert_range(mt
, 6, 47, (void *)0xd);
2587 mtree_test_store(mt
, 1, (void *)0x3);
2588 mtree_test_insert_range(mt
, 1, 67, (void *)0x3);
2589 mtree_test_load(mt
, 67);
2590 mtree_test_insert(mt
, 1, (void *)0x3);
2591 mtree_test_erase(mt
, 67167);
2595 * 9. spanning store to the end of data caused an invalid metadata
2596 * length which resulted in a crash eventually.
2597 * Fix by checking if there is a value in pivot before incrementing the
2598 * metadata end in mab_mas_cp(). To ensure this doesn't happen again,
2599 * abstract the two locations this happens into a function called
2600 * mas_leaf_set_meta().
2602 mt_init_flags(mt
, 0);
2603 mtree_test_insert(mt
, 21, (void *)0x2b);
2604 mtree_test_insert(mt
, 12, (void *)0x19);
2605 mtree_test_insert(mt
, 6, (void *)0xd);
2606 mtree_test_insert(mt
, 8, (void *)0x11);
2607 mtree_test_insert(mt
, 2, (void *)0x5);
2608 mtree_test_insert(mt
, 91, (void *)0xb7);
2609 mtree_test_insert(mt
, 18, (void *)0x25);
2610 mtree_test_insert(mt
, 81, (void *)0xa3);
2611 mtree_test_store_range(mt
, 0, 128, (void *)0x1);
2612 mtree_test_store(mt
, 1, (void *)0x3);
2613 mtree_test_erase(mt
, 8);
2614 mtree_test_insert(mt
, 11, (void *)0x17);
2615 mtree_test_insert(mt
, 8, (void *)0x11);
2616 mtree_test_insert(mt
, 21, (void *)0x2b);
2617 mtree_test_insert(mt
, 2, (void *)0x5);
2618 mtree_test_insert(mt
, ULONG_MAX
- 10, (void *)0xffffffffffffffeb);
2619 mtree_test_erase(mt
, ULONG_MAX
- 10);
2620 mtree_test_store_range(mt
, 0, 281, (void *)0x1);
2621 mtree_test_erase(mt
, 2);
2622 mtree_test_insert(mt
, 1211, (void *)0x977);
2623 mtree_test_insert(mt
, 111, (void *)0xdf);
2624 mtree_test_insert(mt
, 13, (void *)0x1b);
2625 mtree_test_insert(mt
, 211, (void *)0x1a7);
2626 mtree_test_insert(mt
, 11, (void *)0x17);
2627 mtree_test_insert(mt
, 5, (void *)0xb);
2628 mtree_test_insert(mt
, 1218, (void *)0x985);
2629 mtree_test_insert(mt
, 61, (void *)0x7b);
2630 mtree_test_store(mt
, 1, (void *)0x3);
2631 mtree_test_insert(mt
, 121, (void *)0xf3);
2632 mtree_test_insert(mt
, 8, (void *)0x11);
2633 mtree_test_insert(mt
, 21, (void *)0x2b);
2634 mtree_test_insert(mt
, 2, (void *)0x5);
2635 mtree_test_insert(mt
, ULONG_MAX
- 10, (void *)0xffffffffffffffeb);
2636 mtree_test_erase(mt
, ULONG_MAX
- 10);
2639 /* duplicate the tree with a specific gap */
2640 static noinline
void __init
check_dup_gaps(struct maple_tree
*mt
,
2641 unsigned long nr_entries
, bool zero_start
,
2644 unsigned long i
= 0;
2645 struct maple_tree newmt
;
2648 MA_STATE(mas
, mt
, 0, 0);
2649 MA_STATE(newmas
, &newmt
, 0, 0);
2650 struct rw_semaphore newmt_lock
;
2652 init_rwsem(&newmt_lock
);
2653 mt_set_external_lock(&newmt
, &newmt_lock
);
2658 mt_zero_nr_tallocated();
2659 for (; i
<= nr_entries
; i
++)
2660 mtree_store_range(mt
, i
*10, (i
+1)*10 - gap
,
2661 xa_mk_value(i
), GFP_KERNEL
);
2663 mt_init_flags(&newmt
, MT_FLAGS_ALLOC_RANGE
| MT_FLAGS_LOCK_EXTERN
);
2664 mt_set_non_kernel(99999);
2665 down_write(&newmt_lock
);
2666 ret
= mas_expected_entries(&newmas
, nr_entries
);
2667 mt_set_non_kernel(0);
2668 MT_BUG_ON(mt
, ret
!= 0);
2671 mas_for_each(&mas
, tmp
, ULONG_MAX
) {
2672 newmas
.index
= mas
.index
;
2673 newmas
.last
= mas
.last
;
2674 mas_store(&newmas
, tmp
);
2677 mas_destroy(&newmas
);
2679 __mt_destroy(&newmt
);
2680 up_write(&newmt_lock
);
2683 /* Duplicate many sizes of trees. Mainly to test expected entry values */
2684 static noinline
void __init
check_dup(struct maple_tree
*mt
)
2687 int big_start
= 100010;
2689 /* Check with a value at zero */
2690 for (i
= 10; i
< 1000; i
++) {
2691 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
2692 check_dup_gaps(mt
, i
, true, 5);
2699 /* Check with a value at zero, no gap */
2700 for (i
= 1000; i
< 2000; i
++) {
2701 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
2702 check_dup_gaps(mt
, i
, true, 0);
2709 /* Check with a value at zero and unreasonably large */
2710 for (i
= big_start
; i
< big_start
+ 10; i
++) {
2711 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
2712 check_dup_gaps(mt
, i
, true, 5);
2719 /* Small to medium size not starting at zero*/
2720 for (i
= 200; i
< 1000; i
++) {
2721 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
2722 check_dup_gaps(mt
, i
, false, 5);
2729 /* Unreasonably large not starting at zero*/
2730 for (i
= big_start
; i
< big_start
+ 10; i
++) {
2731 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
2732 check_dup_gaps(mt
, i
, false, 5);
2739 /* Check non-allocation tree not starting at zero */
2740 for (i
= 1500; i
< 3000; i
++) {
2741 mt_init_flags(mt
, 0);
2742 check_dup_gaps(mt
, i
, false, 5);
2751 /* Check non-allocation tree starting at zero */
2752 for (i
= 200; i
< 1000; i
++) {
2753 mt_init_flags(mt
, 0);
2754 check_dup_gaps(mt
, i
, true, 5);
2761 /* Unreasonably large */
2762 for (i
= big_start
+ 5; i
< big_start
+ 10; i
++) {
2763 mt_init_flags(mt
, 0);
2764 check_dup_gaps(mt
, i
, true, 5);
2772 static noinline
void __init
check_bnode_min_spanning(struct maple_tree
*mt
)
2775 MA_STATE(mas
, mt
, 0, 0);
2777 mt_set_non_kernel(9999);
2780 mas_set_range(&mas
, i
*10, i
*10+9);
2781 mas_store(&mas
, check_bnode_min_spanning
);
2784 mas_set_range(&mas
, 240, 509);
2785 mas_store(&mas
, NULL
);
2788 mt_set_non_kernel(0);
2791 static noinline
void __init
check_empty_area_window(struct maple_tree
*mt
)
2793 unsigned long i
, nr_entries
= 20;
2794 MA_STATE(mas
, mt
, 0, 0);
2796 for (i
= 1; i
<= nr_entries
; i
++)
2797 mtree_store_range(mt
, i
*10, i
*10 + 9,
2798 xa_mk_value(i
), GFP_KERNEL
);
2800 /* Create another hole besides the one at 0 */
2801 mtree_store_range(mt
, 160, 169, NULL
, GFP_KERNEL
);
2803 /* Check lower bounds that don't fit */
2805 MT_BUG_ON(mt
, mas_empty_area_rev(&mas
, 5, 90, 10) != -EBUSY
);
2808 MT_BUG_ON(mt
, mas_empty_area_rev(&mas
, 6, 90, 5) != -EBUSY
);
2810 /* Check lower bound that does fit */
2812 MT_BUG_ON(mt
, mas_empty_area_rev(&mas
, 5, 90, 5) != 0);
2813 MT_BUG_ON(mt
, mas
.index
!= 5);
2814 MT_BUG_ON(mt
, mas
.last
!= 9);
2817 /* Check one gap that doesn't fit and one that does */
2820 MT_BUG_ON(mt
, mas_empty_area_rev(&mas
, 5, 217, 9) != 0);
2821 MT_BUG_ON(mt
, mas
.index
!= 161);
2822 MT_BUG_ON(mt
, mas
.last
!= 169);
2824 /* Check one gap that does fit above the min */
2826 MT_BUG_ON(mt
, mas_empty_area_rev(&mas
, 100, 218, 3) != 0);
2827 MT_BUG_ON(mt
, mas
.index
!= 216);
2828 MT_BUG_ON(mt
, mas
.last
!= 218);
2830 /* Check size that doesn't fit any gap */
2832 MT_BUG_ON(mt
, mas_empty_area_rev(&mas
, 100, 218, 16) != -EBUSY
);
2835 * Check size that doesn't fit the lower end of the window but
2839 MT_BUG_ON(mt
, mas_empty_area_rev(&mas
, 167, 200, 4) != -EBUSY
);
2842 * Check size that doesn't fit the upper end of the window but
2846 MT_BUG_ON(mt
, mas_empty_area_rev(&mas
, 100, 162, 4) != -EBUSY
);
2848 /* Check mas_empty_area forward */
2850 MT_BUG_ON(mt
, mas_empty_area(&mas
, 0, 100, 9) != 0);
2851 MT_BUG_ON(mt
, mas
.index
!= 0);
2852 MT_BUG_ON(mt
, mas
.last
!= 8);
2855 MT_BUG_ON(mt
, mas_empty_area(&mas
, 0, 100, 4) != 0);
2856 MT_BUG_ON(mt
, mas
.index
!= 0);
2857 MT_BUG_ON(mt
, mas
.last
!= 3);
2860 MT_BUG_ON(mt
, mas_empty_area(&mas
, 0, 100, 11) != -EBUSY
);
2863 MT_BUG_ON(mt
, mas_empty_area(&mas
, 5, 100, 6) != -EBUSY
);
2866 MT_BUG_ON(mt
, mas_empty_area(&mas
, 0, 8, 10) != -EINVAL
);
2869 mas_empty_area(&mas
, 100, 165, 3);
2872 MT_BUG_ON(mt
, mas_empty_area(&mas
, 100, 163, 6) != -EBUSY
);
2876 static noinline
void __init
check_empty_area_fill(struct maple_tree
*mt
)
2878 const unsigned long max
= 0x25D78000;
2881 MA_STATE(mas
, mt
, 0, 0);
2883 mt_set_non_kernel(99999);
2884 for (shift
= 12; shift
<= 16; shift
++) {
2890 MT_BUG_ON(mt
, mas_empty_area(&mas
, 0, max
, size
) != 0);
2891 MT_BUG_ON(mt
, mas
.last
!= mas
.index
+ size
- 1);
2892 mas_store_gfp(&mas
, (void *)size
, GFP_KERNEL
);
2898 /* No space left. */
2901 MT_BUG_ON(mt
, mas_empty_area(&mas
, 0, max
, size
) != -EBUSY
);
2904 /* Fill a depth 3 node to the maximum */
2905 for (unsigned long i
= 629440511; i
<= 629440800; i
+= 6)
2906 mtree_store_range(mt
, i
, i
+ 5, (void *)i
, GFP_KERNEL
);
2907 /* Make space in the second-last depth 4 node */
2908 mtree_erase(mt
, 631668735);
2909 /* Make space in the last depth 4 node */
2910 mtree_erase(mt
, 629506047);
2912 /* Search from just after the gap in the second-last depth 4 */
2914 MT_BUG_ON(mt
, mas_empty_area(&mas
, 629506048, 690000000, 0x5000) != 0);
2916 mt_set_non_kernel(0);
2920 * Check MAS_START, MAS_PAUSE, active (implied), and MAS_NONE transitions.
2922 * The table below shows the single entry tree (0-0 pointer) and normal tree
2925 * Function ENTRY Start Result index & last
2926 * ┬ ┬ ┬ ┬ ┬
2927 * │ │ │ │ └─ the final range
2928 * │ │ │ └─ The node value after execution
2929 * │ │ └─ The node value before execution
2930 * │ └─ If the entry exists or does not exists (DNE)
2931 * └─ The function name
2933 * Function ENTRY Start Result index & last
2936 * Single entry tree at 0-0
2937 * ------------------------
2938 * DNE MAS_START MAS_NONE 1 - oo
2939 * DNE MAS_PAUSE MAS_NONE 1 - oo
2940 * DNE MAS_ROOT MAS_NONE 1 - oo
2942 * DNE MAS_NONE MAS_ROOT 0
2944 * DNE MAS_NONE MAS_NONE 1 - oo
2948 * exists MAS_START active range
2949 * DNE MAS_START active set to last range
2950 * exists MAS_PAUSE active range
2951 * DNE MAS_PAUSE active set to last range
2952 * exists MAS_NONE active range
2953 * exists active active range
2954 * DNE active active set to last range
2955 * ERANGE active MAS_OVERFLOW last range
2957 * Function ENTRY Start Result index & last
2960 * Single entry tree at 0-0
2961 * ------------------------
2963 * exists MAS_START MAS_ROOT 0
2964 * exists MAS_PAUSE MAS_ROOT 0
2965 * exists MAS_NONE MAS_ROOT 0
2968 * DNE MAS_START MAS_NONE 0
2969 * DNE MAS_PAUSE MAS_NONE 0
2970 * DNE MAS_NONE MAS_NONE 0
2971 * DNE MAS_ROOT MAS_NONE 0
2975 * exists MAS_START active range
2976 * DNE MAS_START active set to min
2977 * exists MAS_PAUSE active range
2978 * DNE MAS_PAUSE active set to min
2979 * exists MAS_NONE active range
2980 * DNE MAS_NONE MAS_NONE set to min
2981 * any MAS_ROOT MAS_NONE 0
2982 * exists active active range
2983 * DNE active active last range
2984 * ERANGE active MAS_UNDERFLOW last range
2986 * Function ENTRY Start Result index & last
2988 * - at index or next
2989 * Single entry tree at 0-0
2990 * ------------------------
2992 * DNE MAS_START MAS_NONE 0
2993 * DNE MAS_PAUSE MAS_NONE 0
2994 * DNE MAS_ROOT MAS_NONE 0
2995 * DNE MAS_NONE MAS_NONE 1
2997 * exists MAS_START MAS_ROOT 0
2998 * exists MAS_PAUSE MAS_ROOT 0
2999 * exists MAS_NONE MAS_ROOT 0
3003 * exists MAS_START active range
3004 * DNE MAS_START active set to max
3005 * exists MAS_PAUSE active range
3006 * DNE MAS_PAUSE active set to max
3007 * exists MAS_NONE active range (start at last)
3008 * exists active active range
3009 * DNE active active last range (max < last)
3011 * Function ENTRY Start Result index & last
3013 * - at index or before
3014 * Single entry tree at 0-0
3015 * ------------------------
3017 * exists MAS_START MAS_ROOT 0
3018 * exists MAS_PAUSE MAS_ROOT 0
3019 * exists MAS_NONE MAS_ROOT 0
3021 * DNE MAS_START MAS_NONE 0
3022 * DNE MAS_PAUSE MAS_NONE 0
3023 * DNE MAS_NONE MAS_NONE 0
3024 * DNE MAS_ROOT MAS_NONE 0
3028 * exists MAS_START active range
3029 * DNE MAS_START active set to min
3030 * exists MAS_PAUSE active range
3031 * DNE MAS_PAUSE active set to min
3032 * exists MAS_NONE active range (start at index)
3033 * exists active active range
3034 * DNE active active last range (min > index)
3036 * Function ENTRY Start Result index & last
3039 * Single entry tree at 0-0
3040 * ------------------------
3042 * DNE MAS_START MAS_ROOT 1 - oo
3043 * DNE MAS_PAUSE MAS_ROOT 1 - oo
3044 * DNE MAS_NONE MAS_ROOT 1 - oo
3045 * DNE MAS_ROOT MAS_ROOT 1 - oo
3047 * exists MAS_START MAS_ROOT 0
3048 * exists MAS_PAUSE MAS_ROOT 0
3049 * exists MAS_NONE MAS_ROOT 0
3050 * exists MAS_ROOT MAS_ROOT 0
3054 * exists MAS_START active range
3055 * DNE MAS_START active range of NULL
3056 * exists MAS_PAUSE active range
3057 * DNE MAS_PAUSE active range of NULL
3058 * exists MAS_NONE active range
3059 * DNE MAS_NONE active range of NULL
3060 * exists active active range
3061 * DNE active active range of NULL
3064 static noinline
void __init
check_state_handling(struct maple_tree
*mt
)
3066 MA_STATE(mas
, mt
, 0, 0);
3067 void *entry
, *ptr
= (void *) 0x1234500;
3071 /* Check MAS_ROOT First */
3072 mtree_store_range(mt
, 0, 0, ptr
, GFP_KERNEL
);
3075 /* prev: Start -> underflow*/
3076 entry
= mas_prev(&mas
, 0);
3077 MT_BUG_ON(mt
, entry
!= NULL
);
3078 MT_BUG_ON(mt
, mas
.status
!= ma_underflow
);
3080 /* prev: Start -> root */
3082 entry
= mas_prev(&mas
, 0);
3083 MT_BUG_ON(mt
, entry
!= ptr
);
3084 MT_BUG_ON(mt
, mas
.index
!= 0);
3085 MT_BUG_ON(mt
, mas
.last
!= 0);
3086 MT_BUG_ON(mt
, mas
.status
!= ma_root
);
3088 /* prev: pause -> root */
3091 entry
= mas_prev(&mas
, 0);
3092 MT_BUG_ON(mt
, entry
!= ptr
);
3093 MT_BUG_ON(mt
, mas
.index
!= 0);
3094 MT_BUG_ON(mt
, mas
.last
!= 0);
3095 MT_BUG_ON(mt
, mas
.status
!= ma_root
);
3097 /* next: start -> none */
3099 entry
= mas_next(&mas
, ULONG_MAX
);
3100 MT_BUG_ON(mt
, mas
.index
!= 1);
3101 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
3102 MT_BUG_ON(mt
, entry
!= NULL
);
3103 MT_BUG_ON(mt
, mas
.status
!= ma_none
);
3105 /* next: start -> none*/
3107 entry
= mas_next(&mas
, ULONG_MAX
);
3108 MT_BUG_ON(mt
, mas
.index
!= 1);
3109 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
3110 MT_BUG_ON(mt
, entry
!= NULL
);
3111 MT_BUG_ON(mt
, mas
.status
!= ma_none
);
3113 /* find: start -> root */
3115 entry
= mas_find(&mas
, ULONG_MAX
);
3116 MT_BUG_ON(mt
, entry
!= ptr
);
3117 MT_BUG_ON(mt
, mas
.index
!= 0);
3118 MT_BUG_ON(mt
, mas
.last
!= 0);
3119 MT_BUG_ON(mt
, mas
.status
!= ma_root
);
3121 /* find: root -> none */
3122 entry
= mas_find(&mas
, ULONG_MAX
);
3123 MT_BUG_ON(mt
, entry
!= NULL
);
3124 MT_BUG_ON(mt
, mas
.index
!= 1);
3125 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
3126 MT_BUG_ON(mt
, mas
.status
!= ma_none
);
3128 /* find: none -> none */
3129 entry
= mas_find(&mas
, ULONG_MAX
);
3130 MT_BUG_ON(mt
, entry
!= NULL
);
3131 MT_BUG_ON(mt
, mas
.index
!= 1);
3132 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
3133 MT_BUG_ON(mt
, mas
.status
!= ma_none
);
3135 /* find: start -> none */
3137 entry
= mas_find(&mas
, ULONG_MAX
);
3138 MT_BUG_ON(mt
, entry
!= NULL
);
3139 MT_BUG_ON(mt
, mas
.index
!= 1);
3140 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
3141 MT_BUG_ON(mt
, mas
.status
!= ma_none
);
3143 /* find_rev: none -> root */
3144 entry
= mas_find_rev(&mas
, 0);
3145 MT_BUG_ON(mt
, entry
!= ptr
);
3146 MT_BUG_ON(mt
, mas
.index
!= 0);
3147 MT_BUG_ON(mt
, mas
.last
!= 0);
3148 MT_BUG_ON(mt
, mas
.status
!= ma_root
);
3150 /* find_rev: start -> root */
3152 entry
= mas_find_rev(&mas
, 0);
3153 MT_BUG_ON(mt
, entry
!= ptr
);
3154 MT_BUG_ON(mt
, mas
.index
!= 0);
3155 MT_BUG_ON(mt
, mas
.last
!= 0);
3156 MT_BUG_ON(mt
, mas
.status
!= ma_root
);
3158 /* find_rev: root -> none */
3159 entry
= mas_find_rev(&mas
, 0);
3160 MT_BUG_ON(mt
, entry
!= NULL
);
3161 MT_BUG_ON(mt
, mas
.index
!= 0);
3162 MT_BUG_ON(mt
, mas
.last
!= 0);
3163 MT_BUG_ON(mt
, mas
.status
!= ma_none
);
3165 /* find_rev: none -> none */
3166 entry
= mas_find_rev(&mas
, 0);
3167 MT_BUG_ON(mt
, entry
!= NULL
);
3168 MT_BUG_ON(mt
, mas
.index
!= 0);
3169 MT_BUG_ON(mt
, mas
.last
!= 0);
3170 MT_BUG_ON(mt
, mas
.status
!= ma_none
);
3172 /* find_rev: start -> root */
3174 entry
= mas_find_rev(&mas
, 0);
3175 MT_BUG_ON(mt
, entry
!= ptr
);
3176 MT_BUG_ON(mt
, mas
.index
!= 0);
3177 MT_BUG_ON(mt
, mas
.last
!= 0);
3178 MT_BUG_ON(mt
, mas
.status
!= ma_root
);
3180 /* walk: start -> none */
3182 entry
= mas_walk(&mas
);
3183 MT_BUG_ON(mt
, entry
!= NULL
);
3184 MT_BUG_ON(mt
, mas
.index
!= 1);
3185 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
3186 MT_BUG_ON(mt
, mas
.status
!= ma_none
);
3188 /* walk: pause -> none*/
3191 entry
= mas_walk(&mas
);
3192 MT_BUG_ON(mt
, entry
!= NULL
);
3193 MT_BUG_ON(mt
, mas
.index
!= 1);
3194 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
3195 MT_BUG_ON(mt
, mas
.status
!= ma_none
);
3197 /* walk: none -> none */
3198 mas
.index
= mas
.last
= 10;
3199 entry
= mas_walk(&mas
);
3200 MT_BUG_ON(mt
, entry
!= NULL
);
3201 MT_BUG_ON(mt
, mas
.index
!= 1);
3202 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
3203 MT_BUG_ON(mt
, mas
.status
!= ma_none
);
3205 /* walk: none -> none */
3206 entry
= mas_walk(&mas
);
3207 MT_BUG_ON(mt
, entry
!= NULL
);
3208 MT_BUG_ON(mt
, mas
.index
!= 1);
3209 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
3210 MT_BUG_ON(mt
, mas
.status
!= ma_none
);
3212 /* walk: start -> root */
3214 entry
= mas_walk(&mas
);
3215 MT_BUG_ON(mt
, entry
!= ptr
);
3216 MT_BUG_ON(mt
, mas
.index
!= 0);
3217 MT_BUG_ON(mt
, mas
.last
!= 0);
3218 MT_BUG_ON(mt
, mas
.status
!= ma_root
);
3220 /* walk: pause -> root */
3223 entry
= mas_walk(&mas
);
3224 MT_BUG_ON(mt
, entry
!= ptr
);
3225 MT_BUG_ON(mt
, mas
.index
!= 0);
3226 MT_BUG_ON(mt
, mas
.last
!= 0);
3227 MT_BUG_ON(mt
, mas
.status
!= ma_root
);
3229 /* walk: none -> root */
3230 mas
.status
= ma_none
;
3231 entry
= mas_walk(&mas
);
3232 MT_BUG_ON(mt
, entry
!= ptr
);
3233 MT_BUG_ON(mt
, mas
.index
!= 0);
3234 MT_BUG_ON(mt
, mas
.last
!= 0);
3235 MT_BUG_ON(mt
, mas
.status
!= ma_root
);
3237 /* walk: root -> root */
3238 entry
= mas_walk(&mas
);
3239 MT_BUG_ON(mt
, entry
!= ptr
);
3240 MT_BUG_ON(mt
, mas
.index
!= 0);
3241 MT_BUG_ON(mt
, mas
.last
!= 0);
3242 MT_BUG_ON(mt
, mas
.status
!= ma_root
);
3244 /* walk: root -> none */
3246 entry
= mas_walk(&mas
);
3247 MT_BUG_ON(mt
, entry
!= NULL
);
3248 MT_BUG_ON(mt
, mas
.index
!= 1);
3249 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
3250 MT_BUG_ON(mt
, mas
.status
!= ma_none
);
3252 /* walk: none -> root */
3253 mas
.index
= mas
.last
= 0;
3254 entry
= mas_walk(&mas
);
3255 MT_BUG_ON(mt
, entry
!= ptr
);
3256 MT_BUG_ON(mt
, mas
.index
!= 0);
3257 MT_BUG_ON(mt
, mas
.last
!= 0);
3258 MT_BUG_ON(mt
, mas
.status
!= ma_root
);
3262 /* Check when there is an actual node */
3263 mtree_store_range(mt
, 0, 0, NULL
, GFP_KERNEL
);
3264 mtree_store_range(mt
, 0x1000, 0x1500, ptr
, GFP_KERNEL
);
3265 mtree_store_range(mt
, 0x2000, 0x2500, ptr2
, GFP_KERNEL
);
3266 mtree_store_range(mt
, 0x3000, 0x3500, ptr3
, GFP_KERNEL
);
3270 /* next: start ->active */
3272 entry
= mas_next(&mas
, ULONG_MAX
);
3273 MT_BUG_ON(mt
, entry
!= ptr
);
3274 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3275 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3276 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3278 /* next: pause ->active */
3281 entry
= mas_next(&mas
, ULONG_MAX
);
3282 MT_BUG_ON(mt
, entry
!= ptr
);
3283 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3284 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3285 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3287 /* next: none ->active */
3288 mas
.index
= mas
.last
= 0;
3290 mas
.status
= ma_none
;
3291 entry
= mas_next(&mas
, ULONG_MAX
);
3292 MT_BUG_ON(mt
, entry
!= ptr
);
3293 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3294 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3295 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3297 /* next:active ->active (spanning limit) */
3298 entry
= mas_next(&mas
, 0x2100);
3299 MT_BUG_ON(mt
, entry
!= ptr2
);
3300 MT_BUG_ON(mt
, mas
.index
!= 0x2000);
3301 MT_BUG_ON(mt
, mas
.last
!= 0x2500);
3302 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3304 /* next:active -> overflow (limit reached) beyond data */
3305 entry
= mas_next(&mas
, 0x2999);
3306 MT_BUG_ON(mt
, entry
!= NULL
);
3307 MT_BUG_ON(mt
, mas
.index
!= 0x2501);
3308 MT_BUG_ON(mt
, mas
.last
!= 0x2fff);
3309 MT_BUG_ON(mt
, !mas_is_overflow(&mas
));
3311 /* next:overflow -> active (limit changed) */
3312 entry
= mas_next(&mas
, ULONG_MAX
);
3313 MT_BUG_ON(mt
, entry
!= ptr3
);
3314 MT_BUG_ON(mt
, mas
.index
!= 0x3000);
3315 MT_BUG_ON(mt
, mas
.last
!= 0x3500);
3316 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3318 /* next:active -> overflow (limit reached) */
3319 entry
= mas_next(&mas
, ULONG_MAX
);
3320 MT_BUG_ON(mt
, entry
!= NULL
);
3321 MT_BUG_ON(mt
, mas
.index
!= 0x3501);
3322 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
3323 MT_BUG_ON(mt
, !mas_is_overflow(&mas
));
3325 /* next:overflow -> overflow */
3326 entry
= mas_next(&mas
, ULONG_MAX
);
3327 MT_BUG_ON(mt
, entry
!= NULL
);
3328 MT_BUG_ON(mt
, mas
.index
!= 0x3501);
3329 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
3330 MT_BUG_ON(mt
, !mas_is_overflow(&mas
));
3332 /* prev:overflow -> active */
3333 entry
= mas_prev(&mas
, 0);
3334 MT_BUG_ON(mt
, entry
!= ptr3
);
3335 MT_BUG_ON(mt
, mas
.index
!= 0x3000);
3336 MT_BUG_ON(mt
, mas
.last
!= 0x3500);
3337 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3339 /* next: none -> active, skip value at location */
3341 entry
= mas_next(&mas
, ULONG_MAX
);
3342 mas
.status
= ma_none
;
3344 entry
= mas_next(&mas
, ULONG_MAX
);
3345 MT_BUG_ON(mt
, entry
!= ptr2
);
3346 MT_BUG_ON(mt
, mas
.index
!= 0x2000);
3347 MT_BUG_ON(mt
, mas
.last
!= 0x2500);
3348 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3350 /* prev:active ->active */
3351 entry
= mas_prev(&mas
, 0);
3352 MT_BUG_ON(mt
, entry
!= ptr
);
3353 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3354 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3355 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3357 /* prev:active -> underflow (span limit) */
3358 mas_next(&mas
, ULONG_MAX
);
3359 entry
= mas_prev(&mas
, 0x1200);
3360 MT_BUG_ON(mt
, entry
!= ptr
);
3361 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3362 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3363 MT_BUG_ON(mt
, !mas_is_active(&mas
)); /* spanning limit */
3364 entry
= mas_prev(&mas
, 0x1200); /* underflow */
3365 MT_BUG_ON(mt
, entry
!= NULL
);
3366 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3367 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3368 MT_BUG_ON(mt
, !mas_is_underflow(&mas
));
3370 /* prev:underflow -> underflow (lower limit) spanning end range */
3371 entry
= mas_prev(&mas
, 0x0100);
3372 MT_BUG_ON(mt
, entry
!= NULL
);
3373 MT_BUG_ON(mt
, mas
.index
!= 0);
3374 MT_BUG_ON(mt
, mas
.last
!= 0x0FFF);
3375 MT_BUG_ON(mt
, !mas_is_underflow(&mas
));
3377 /* prev:underflow -> underflow */
3378 entry
= mas_prev(&mas
, 0);
3379 MT_BUG_ON(mt
, entry
!= NULL
);
3380 MT_BUG_ON(mt
, mas
.index
!= 0);
3381 MT_BUG_ON(mt
, mas
.last
!= 0x0FFF);
3382 MT_BUG_ON(mt
, !mas_is_underflow(&mas
));
3384 /* prev:underflow -> underflow */
3385 entry
= mas_prev(&mas
, 0);
3386 MT_BUG_ON(mt
, entry
!= NULL
);
3387 MT_BUG_ON(mt
, mas
.index
!= 0);
3388 MT_BUG_ON(mt
, mas
.last
!= 0x0FFF);
3389 MT_BUG_ON(mt
, !mas_is_underflow(&mas
));
3391 /* next:underflow -> active */
3392 entry
= mas_next(&mas
, ULONG_MAX
);
3393 MT_BUG_ON(mt
, entry
!= ptr
);
3394 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3395 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3396 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3398 /* prev:first value -> underflow */
3399 entry
= mas_prev(&mas
, 0x1000);
3400 MT_BUG_ON(mt
, entry
!= NULL
);
3401 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3402 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3403 MT_BUG_ON(mt
, !mas_is_underflow(&mas
));
3405 /* find:underflow -> first value */
3406 entry
= mas_find(&mas
, ULONG_MAX
);
3407 MT_BUG_ON(mt
, entry
!= ptr
);
3408 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3409 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3410 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3412 /* prev: pause ->active */
3413 mas_set(&mas
, 0x3600);
3414 entry
= mas_prev(&mas
, 0);
3415 MT_BUG_ON(mt
, entry
!= ptr3
);
3417 entry
= mas_prev(&mas
, 0);
3418 MT_BUG_ON(mt
, entry
!= ptr2
);
3419 MT_BUG_ON(mt
, mas
.index
!= 0x2000);
3420 MT_BUG_ON(mt
, mas
.last
!= 0x2500);
3421 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3423 /* prev:active -> underflow spanning min */
3424 entry
= mas_prev(&mas
, 0x1600);
3425 MT_BUG_ON(mt
, entry
!= NULL
);
3426 MT_BUG_ON(mt
, mas
.index
!= 0x1501);
3427 MT_BUG_ON(mt
, mas
.last
!= 0x1FFF);
3428 MT_BUG_ON(mt
, !mas_is_underflow(&mas
));
3430 /* prev: active ->active, continue */
3431 entry
= mas_prev(&mas
, 0);
3432 MT_BUG_ON(mt
, entry
!= ptr
);
3433 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3434 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3435 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3437 /* find: start ->active */
3439 entry
= mas_find(&mas
, ULONG_MAX
);
3440 MT_BUG_ON(mt
, entry
!= ptr
);
3441 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3442 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3443 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3445 /* find: pause ->active */
3448 entry
= mas_find(&mas
, ULONG_MAX
);
3449 MT_BUG_ON(mt
, entry
!= ptr
);
3450 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3451 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3452 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3454 /* find: start ->active on value */;
3455 mas_set(&mas
, 1200);
3456 entry
= mas_find(&mas
, ULONG_MAX
);
3457 MT_BUG_ON(mt
, entry
!= ptr
);
3458 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3459 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3460 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3462 /* find:active ->active */
3463 entry
= mas_find(&mas
, ULONG_MAX
);
3464 MT_BUG_ON(mt
, entry
!= ptr2
);
3465 MT_BUG_ON(mt
, mas
.index
!= 0x2000);
3466 MT_BUG_ON(mt
, mas
.last
!= 0x2500);
3467 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3470 /* find:active -> active (NULL)*/
3471 entry
= mas_find(&mas
, 0x2700);
3472 MT_BUG_ON(mt
, entry
!= NULL
);
3473 MT_BUG_ON(mt
, mas
.index
!= 0x2501);
3474 MT_BUG_ON(mt
, mas
.last
!= 0x2FFF);
3475 MAS_BUG_ON(&mas
, !mas_is_active(&mas
));
3477 /* find: overflow ->active */
3478 entry
= mas_find(&mas
, 0x5000);
3479 MT_BUG_ON(mt
, entry
!= ptr3
);
3480 MT_BUG_ON(mt
, mas
.index
!= 0x3000);
3481 MT_BUG_ON(mt
, mas
.last
!= 0x3500);
3482 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3484 /* find:active -> active (NULL) end*/
3485 entry
= mas_find(&mas
, ULONG_MAX
);
3486 MT_BUG_ON(mt
, entry
!= NULL
);
3487 MT_BUG_ON(mt
, mas
.index
!= 0x3501);
3488 MT_BUG_ON(mt
, mas
.last
!= ULONG_MAX
);
3489 MAS_BUG_ON(&mas
, !mas_is_active(&mas
));
3491 /* find_rev: active (END) ->active */
3492 entry
= mas_find_rev(&mas
, 0);
3493 MT_BUG_ON(mt
, entry
!= ptr3
);
3494 MT_BUG_ON(mt
, mas
.index
!= 0x3000);
3495 MT_BUG_ON(mt
, mas
.last
!= 0x3500);
3496 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3498 /* find_rev:active ->active */
3499 entry
= mas_find_rev(&mas
, 0);
3500 MT_BUG_ON(mt
, entry
!= ptr2
);
3501 MT_BUG_ON(mt
, mas
.index
!= 0x2000);
3502 MT_BUG_ON(mt
, mas
.last
!= 0x2500);
3503 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3505 /* find_rev: pause ->active */
3507 entry
= mas_find_rev(&mas
, 0);
3508 MT_BUG_ON(mt
, entry
!= ptr
);
3509 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3510 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3511 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3513 /* find_rev:active -> underflow */
3514 entry
= mas_find_rev(&mas
, 0);
3515 MT_BUG_ON(mt
, entry
!= NULL
);
3516 MT_BUG_ON(mt
, mas
.index
!= 0);
3517 MT_BUG_ON(mt
, mas
.last
!= 0x0FFF);
3518 MT_BUG_ON(mt
, !mas_is_underflow(&mas
));
3520 /* find_rev: start ->active */
3521 mas_set(&mas
, 0x1200);
3522 entry
= mas_find_rev(&mas
, 0);
3523 MT_BUG_ON(mt
, entry
!= ptr
);
3524 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3525 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3526 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3528 /* mas_walk start ->active */
3529 mas_set(&mas
, 0x1200);
3530 entry
= mas_walk(&mas
);
3531 MT_BUG_ON(mt
, entry
!= ptr
);
3532 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3533 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3534 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3536 /* mas_walk start ->active */
3537 mas_set(&mas
, 0x1600);
3538 entry
= mas_walk(&mas
);
3539 MT_BUG_ON(mt
, entry
!= NULL
);
3540 MT_BUG_ON(mt
, mas
.index
!= 0x1501);
3541 MT_BUG_ON(mt
, mas
.last
!= 0x1fff);
3542 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3544 /* mas_walk pause ->active */
3545 mas_set(&mas
, 0x1200);
3547 entry
= mas_walk(&mas
);
3548 MT_BUG_ON(mt
, entry
!= ptr
);
3549 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3550 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3551 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3553 /* mas_walk pause -> active */
3554 mas_set(&mas
, 0x1600);
3556 entry
= mas_walk(&mas
);
3557 MT_BUG_ON(mt
, entry
!= NULL
);
3558 MT_BUG_ON(mt
, mas
.index
!= 0x1501);
3559 MT_BUG_ON(mt
, mas
.last
!= 0x1fff);
3560 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3562 /* mas_walk none -> active */
3563 mas_set(&mas
, 0x1200);
3564 mas
.status
= ma_none
;
3565 entry
= mas_walk(&mas
);
3566 MT_BUG_ON(mt
, entry
!= ptr
);
3567 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3568 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3569 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3571 /* mas_walk none -> active */
3572 mas_set(&mas
, 0x1600);
3573 mas
.status
= ma_none
;
3574 entry
= mas_walk(&mas
);
3575 MT_BUG_ON(mt
, entry
!= NULL
);
3576 MT_BUG_ON(mt
, mas
.index
!= 0x1501);
3577 MT_BUG_ON(mt
, mas
.last
!= 0x1fff);
3578 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3580 /* mas_walk active -> active */
3584 entry
= mas_walk(&mas
);
3585 MT_BUG_ON(mt
, entry
!= ptr
);
3586 MT_BUG_ON(mt
, mas
.index
!= 0x1000);
3587 MT_BUG_ON(mt
, mas
.last
!= 0x1500);
3588 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3590 /* mas_walk active -> active */
3593 entry
= mas_walk(&mas
);
3594 MT_BUG_ON(mt
, entry
!= NULL
);
3595 MT_BUG_ON(mt
, mas
.index
!= 0x1501);
3596 MT_BUG_ON(mt
, mas
.last
!= 0x1fff);
3597 MT_BUG_ON(mt
, !mas_is_active(&mas
));
3602 static noinline
void __init
alloc_cyclic_testing(struct maple_tree
*mt
)
3604 unsigned long location
;
3607 MA_STATE(mas
, mt
, 0, 0);
3611 for (int i
= 0; i
< 100; i
++) {
3612 mas_alloc_cyclic(&mas
, &location
, mt
, 2, ULONG_MAX
, &next
, GFP_KERNEL
);
3613 MAS_BUG_ON(&mas
, i
!= location
- 2);
3614 MAS_BUG_ON(&mas
, mas
.index
!= location
);
3615 MAS_BUG_ON(&mas
, mas
.last
!= location
);
3616 MAS_BUG_ON(&mas
, i
!= next
- 3);
3622 mt_init_flags(mt
, MT_FLAGS_ALLOC_RANGE
);
3623 for (int i
= 0; i
< 100; i
++) {
3624 mtree_alloc_cyclic(mt
, &location
, mt
, 2, ULONG_MAX
, &next
, GFP_KERNEL
);
3625 MT_BUG_ON(mt
, i
!= location
- 2);
3626 MT_BUG_ON(mt
, i
!= next
- 3);
3627 MT_BUG_ON(mt
, mtree_load(mt
, location
) != mt
);
3632 next
= ULONG_MAX
- 1;
3633 ret
= mtree_alloc_cyclic(mt
, &location
, mt
, 2, ULONG_MAX
, &next
, GFP_KERNEL
);
3634 MT_BUG_ON(mt
, ret
!= 0);
3635 ret
= mtree_alloc_cyclic(mt
, &location
, mt
, 2, ULONG_MAX
, &next
, GFP_KERNEL
);
3636 MT_BUG_ON(mt
, ret
!= 0);
3637 ret
= mtree_alloc_cyclic(mt
, &location
, mt
, 2, ULONG_MAX
, &next
, GFP_KERNEL
);
3638 MT_BUG_ON(mt
, ret
!= 1);
3641 static DEFINE_MTREE(tree
);
3642 static int __init
maple_tree_seed(void)
3644 unsigned long set
[] = { 5015, 5014, 5017, 25, 1000,
3645 1001, 1002, 1003, 1005, 0,
3649 pr_info("\nTEST STARTING\n\n");
3651 #if defined(BENCH_SLOT_STORE)
3653 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3654 bench_slot_store(&tree
);
3655 mtree_destroy(&tree
);
3658 #if defined(BENCH_NODE_STORE)
3660 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3661 bench_node_store(&tree
);
3662 mtree_destroy(&tree
);
3665 #if defined(BENCH_AWALK)
3667 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3669 mtree_destroy(&tree
);
3672 #if defined(BENCH_WALK)
3674 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3676 mtree_destroy(&tree
);
3679 #if defined(BENCH_LOAD)
3681 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3683 mtree_destroy(&tree
);
3686 #if defined(BENCH_FORK)
3691 #if defined(BENCH_MT_FOR_EACH)
3693 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3694 bench_mt_for_each(&tree
);
3695 mtree_destroy(&tree
);
3698 #if defined(BENCH_MAS_FOR_EACH)
3700 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3701 bench_mas_for_each(&tree
);
3702 mtree_destroy(&tree
);
3705 #if defined(BENCH_MAS_PREV)
3707 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3708 bench_mas_prev(&tree
);
3709 mtree_destroy(&tree
);
3713 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3714 check_root_expand(&tree
);
3715 mtree_destroy(&tree
);
3717 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3718 check_iteration(&tree
);
3719 mtree_destroy(&tree
);
3723 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3724 check_mas_store_gfp(&tree
);
3725 mtree_destroy(&tree
);
3727 /* Test ranges (store and insert) */
3728 mt_init_flags(&tree
, 0);
3729 check_ranges(&tree
);
3730 mtree_destroy(&tree
);
3732 #if defined(CONFIG_64BIT)
3733 /* These tests have ranges outside of 4GB */
3734 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3735 check_alloc_range(&tree
);
3736 mtree_destroy(&tree
);
3738 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3739 check_alloc_rev_range(&tree
);
3740 mtree_destroy(&tree
);
3743 mt_init_flags(&tree
, 0);
3745 check_load(&tree
, set
[0], NULL
); /* See if 5015 -> NULL */
3747 check_insert(&tree
, set
[9], &tree
); /* Insert 0 */
3748 check_load(&tree
, set
[9], &tree
); /* See if 0 -> &tree */
3749 check_load(&tree
, set
[0], NULL
); /* See if 5015 -> NULL */
3751 check_insert(&tree
, set
[10], ptr
); /* Insert 5003 */
3752 check_load(&tree
, set
[9], &tree
); /* See if 0 -> &tree */
3753 check_load(&tree
, set
[11], NULL
); /* See if 5002 -> NULL */
3754 check_load(&tree
, set
[10], ptr
); /* See if 5003 -> ptr */
3756 /* Clear out the tree */
3757 mtree_destroy(&tree
);
3759 /* Try to insert, insert a dup, and load back what was inserted. */
3760 mt_init_flags(&tree
, 0);
3761 check_insert(&tree
, set
[0], &tree
); /* Insert 5015 */
3762 check_dup_insert(&tree
, set
[0], &tree
); /* Insert 5015 again */
3763 check_load(&tree
, set
[0], &tree
); /* See if 5015 -> &tree */
3766 * Second set of tests try to load a value that doesn't exist, inserts
3767 * a second value, then loads the value again
3769 check_load(&tree
, set
[1], NULL
); /* See if 5014 -> NULL */
3770 check_insert(&tree
, set
[1], ptr
); /* insert 5014 -> ptr */
3771 check_load(&tree
, set
[1], ptr
); /* See if 5014 -> ptr */
3772 check_load(&tree
, set
[0], &tree
); /* See if 5015 -> &tree */
3774 * Tree currently contains:
3775 * p[0]: 14 -> (nil) p[1]: 15 -> ptr p[2]: 16 -> &tree p[3]: 0 -> (nil)
3777 check_insert(&tree
, set
[6], ptr
); /* insert 1002 -> ptr */
3778 check_insert(&tree
, set
[7], &tree
); /* insert 1003 -> &tree */
3780 check_load(&tree
, set
[0], &tree
); /* See if 5015 -> &tree */
3781 check_load(&tree
, set
[1], ptr
); /* See if 5014 -> ptr */
3782 check_load(&tree
, set
[6], ptr
); /* See if 1002 -> ptr */
3783 check_load(&tree
, set
[7], &tree
); /* 1003 = &tree ? */
3785 /* Clear out tree */
3786 mtree_destroy(&tree
);
3788 mt_init_flags(&tree
, 0);
3789 /* Test inserting into a NULL hole. */
3790 check_insert(&tree
, set
[5], ptr
); /* insert 1001 -> ptr */
3791 check_insert(&tree
, set
[7], &tree
); /* insert 1003 -> &tree */
3792 check_insert(&tree
, set
[6], ptr
); /* insert 1002 -> ptr */
3793 check_load(&tree
, set
[5], ptr
); /* See if 1001 -> ptr */
3794 check_load(&tree
, set
[6], ptr
); /* See if 1002 -> ptr */
3795 check_load(&tree
, set
[7], &tree
); /* See if 1003 -> &tree */
3797 /* Clear out the tree */
3798 mtree_destroy(&tree
);
3800 mt_init_flags(&tree
, 0);
3802 * set[] = {5015, 5014, 5017, 25, 1000,
3803 * 1001, 1002, 1003, 1005, 0,
3807 check_insert(&tree
, set
[0], ptr
); /* 5015 */
3808 check_insert(&tree
, set
[1], &tree
); /* 5014 */
3809 check_insert(&tree
, set
[2], ptr
); /* 5017 */
3810 check_insert(&tree
, set
[3], &tree
); /* 25 */
3811 check_load(&tree
, set
[0], ptr
);
3812 check_load(&tree
, set
[1], &tree
);
3813 check_load(&tree
, set
[2], ptr
);
3814 check_load(&tree
, set
[3], &tree
);
3815 check_insert(&tree
, set
[4], ptr
); /* 1000 < Should split. */
3816 check_load(&tree
, set
[0], ptr
);
3817 check_load(&tree
, set
[1], &tree
);
3818 check_load(&tree
, set
[2], ptr
);
3819 check_load(&tree
, set
[3], &tree
); /*25 */
3820 check_load(&tree
, set
[4], ptr
);
3821 check_insert(&tree
, set
[5], &tree
); /* 1001 */
3822 check_load(&tree
, set
[0], ptr
);
3823 check_load(&tree
, set
[1], &tree
);
3824 check_load(&tree
, set
[2], ptr
);
3825 check_load(&tree
, set
[3], &tree
);
3826 check_load(&tree
, set
[4], ptr
);
3827 check_load(&tree
, set
[5], &tree
);
3828 check_insert(&tree
, set
[6], ptr
);
3829 check_load(&tree
, set
[0], ptr
);
3830 check_load(&tree
, set
[1], &tree
);
3831 check_load(&tree
, set
[2], ptr
);
3832 check_load(&tree
, set
[3], &tree
);
3833 check_load(&tree
, set
[4], ptr
);
3834 check_load(&tree
, set
[5], &tree
);
3835 check_load(&tree
, set
[6], ptr
);
3836 check_insert(&tree
, set
[7], &tree
);
3837 check_load(&tree
, set
[0], ptr
);
3838 check_insert(&tree
, set
[8], ptr
);
3840 check_insert(&tree
, set
[9], &tree
);
3842 check_load(&tree
, set
[0], ptr
);
3843 check_load(&tree
, set
[1], &tree
);
3844 check_load(&tree
, set
[2], ptr
);
3845 check_load(&tree
, set
[3], &tree
);
3846 check_load(&tree
, set
[4], ptr
);
3847 check_load(&tree
, set
[5], &tree
);
3848 check_load(&tree
, set
[6], ptr
);
3849 check_load(&tree
, set
[9], &tree
);
3850 mtree_destroy(&tree
);
3852 mt_init_flags(&tree
, 0);
3853 check_seq(&tree
, 16, false);
3854 mtree_destroy(&tree
);
3856 mt_init_flags(&tree
, 0);
3857 check_seq(&tree
, 1000, true);
3858 mtree_destroy(&tree
);
3860 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3861 check_rev_seq(&tree
, 1000, true);
3862 mtree_destroy(&tree
);
3864 check_lower_bound_split(&tree
);
3865 check_upper_bound_split(&tree
);
3866 check_mid_split(&tree
);
3868 mt_init_flags(&tree
, 0);
3869 check_next_entry(&tree
);
3871 check_find_2(&tree
);
3872 mtree_destroy(&tree
);
3874 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3875 check_prev_entry(&tree
);
3876 mtree_destroy(&tree
);
3878 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3879 check_gap_combining(&tree
);
3880 mtree_destroy(&tree
);
3882 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3883 check_node_overwrite(&tree
);
3884 mtree_destroy(&tree
);
3886 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3887 next_prev_test(&tree
);
3888 mtree_destroy(&tree
);
3890 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3891 check_spanning_relatives(&tree
);
3892 mtree_destroy(&tree
);
3894 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3895 check_rev_find(&tree
);
3896 mtree_destroy(&tree
);
3898 mt_init_flags(&tree
, 0);
3899 check_fuzzer(&tree
);
3900 mtree_destroy(&tree
);
3902 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3904 mtree_destroy(&tree
);
3906 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3907 check_bnode_min_spanning(&tree
);
3908 mtree_destroy(&tree
);
3910 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3911 check_empty_area_window(&tree
);
3912 mtree_destroy(&tree
);
3914 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3915 check_empty_area_fill(&tree
);
3916 mtree_destroy(&tree
);
3918 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3919 check_state_handling(&tree
);
3920 mtree_destroy(&tree
);
3922 mt_init_flags(&tree
, MT_FLAGS_ALLOC_RANGE
);
3923 alloc_cyclic_testing(&tree
);
3924 mtree_destroy(&tree
);
3931 pr_info("maple_tree: %u of %u tests passed\n",
3932 atomic_read(&maple_tree_tests_passed
),
3933 atomic_read(&maple_tree_tests_run
));
3934 if (atomic_read(&maple_tree_tests_run
) ==
3935 atomic_read(&maple_tree_tests_passed
))
3941 static void __exit
maple_tree_harvest(void)
3946 module_init(maple_tree_seed
);
3947 module_exit(maple_tree_harvest
);
3948 MODULE_AUTHOR("Liam R. Howlett <Liam.Howlett@Oracle.com>");
3949 MODULE_DESCRIPTION("maple tree API test module");
3950 MODULE_LICENSE("GPL");