1 // SPDX-License-Identifier: GPL-2.0-only
3 * multiorder.c: Multi-order radix tree entry testing
4 * Copyright (c) 2016 Intel Corporation
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
8 #include <linux/radix-tree.h>
9 #include <linux/slab.h>
10 #include <linux/errno.h>
15 static int item_insert_order(struct xarray
*xa
, unsigned long index
,
18 XA_STATE_ORDER(xas
, xa
, index
, order
);
19 struct item
*item
= item_create(index
, order
);
23 xas_store(&xas
, item
);
25 } while (xas_nomem(&xas
, GFP_KERNEL
));
31 return xas_error(&xas
);
34 void multiorder_iteration(struct xarray
*xa
)
40 #define NUM_ENTRIES 11
41 int index
[NUM_ENTRIES
] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128};
42 int order
[NUM_ENTRIES
] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7};
44 printv(1, "Multiorder iteration test\n");
46 for (i
= 0; i
< NUM_ENTRIES
; i
++) {
47 err
= item_insert_order(xa
, index
[i
], order
[i
]);
51 for (j
= 0; j
< 256; j
++) {
52 for (i
= 0; i
< NUM_ENTRIES
; i
++)
53 if (j
<= (index
[i
] | ((1 << order
[i
]) - 1)))
57 xas_for_each(&xas
, item
, ULONG_MAX
) {
58 int height
= order
[i
] / XA_CHUNK_SHIFT
;
59 int shift
= height
* XA_CHUNK_SHIFT
;
60 unsigned long mask
= (1UL << order
[i
]) - 1;
62 assert((xas
.xa_index
| mask
) == (index
[i
] | mask
));
63 assert(xas
.xa_node
->shift
== shift
);
64 assert(!radix_tree_is_internal_node(item
));
65 assert((item
->index
| mask
) == (index
[i
] | mask
));
66 assert(item
->order
== order
[i
]);
74 void multiorder_tagged_iteration(struct xarray
*xa
)
80 #define MT_NUM_ENTRIES 9
81 int index
[MT_NUM_ENTRIES
] = {0, 2, 4, 16, 32, 40, 64, 72, 128};
82 int order
[MT_NUM_ENTRIES
] = {1, 0, 2, 4, 3, 1, 3, 0, 7};
85 int tag_index
[TAG_ENTRIES
] = {0, 4, 16, 40, 64, 72, 128};
87 printv(1, "Multiorder tagged iteration test\n");
89 for (i
= 0; i
< MT_NUM_ENTRIES
; i
++)
90 assert(!item_insert_order(xa
, index
[i
], order
[i
]));
92 assert(!xa_marked(xa
, XA_MARK_1
));
94 for (i
= 0; i
< TAG_ENTRIES
; i
++)
95 xa_set_mark(xa
, tag_index
[i
], XA_MARK_1
);
97 for (j
= 0; j
< 256; j
++) {
100 for (i
= 0; i
< TAG_ENTRIES
; i
++) {
101 for (k
= i
; index
[k
] < tag_index
[i
]; k
++)
103 if (j
<= (index
[k
] | ((1 << order
[k
]) - 1)))
108 xas_for_each_marked(&xas
, item
, ULONG_MAX
, XA_MARK_1
) {
110 for (k
= i
; index
[k
] < tag_index
[i
]; k
++)
112 mask
= (1UL << order
[k
]) - 1;
114 assert((xas
.xa_index
| mask
) == (tag_index
[i
] | mask
));
115 assert(!xa_is_internal(item
));
116 assert((item
->index
| mask
) == (tag_index
[i
] | mask
));
117 assert(item
->order
== order
[k
]);
122 assert(tag_tagged_items(xa
, 0, ULONG_MAX
, TAG_ENTRIES
, XA_MARK_1
,
123 XA_MARK_2
) == TAG_ENTRIES
);
125 for (j
= 0; j
< 256; j
++) {
128 for (i
= 0; i
< TAG_ENTRIES
; i
++) {
129 for (k
= i
; index
[k
] < tag_index
[i
]; k
++)
131 if (j
<= (index
[k
] | ((1 << order
[k
]) - 1)))
136 xas_for_each_marked(&xas
, item
, ULONG_MAX
, XA_MARK_2
) {
137 for (k
= i
; index
[k
] < tag_index
[i
]; k
++)
139 mask
= (1 << order
[k
]) - 1;
141 assert((xas
.xa_index
| mask
) == (tag_index
[i
] | mask
));
142 assert(!xa_is_internal(item
));
143 assert((item
->index
| mask
) == (tag_index
[i
] | mask
));
144 assert(item
->order
== order
[k
]);
149 assert(tag_tagged_items(xa
, 1, ULONG_MAX
, MT_NUM_ENTRIES
* 2, XA_MARK_1
,
150 XA_MARK_0
) == TAG_ENTRIES
);
153 xas_for_each_marked(&xas
, item
, ULONG_MAX
, XA_MARK_0
) {
154 assert(xas
.xa_index
== tag_index
[i
]);
157 assert(i
== TAG_ENTRIES
);
162 bool stop_iteration
= false;
164 static void *creator_func(void *ptr
)
166 /* 'order' is set up to ensure we have sibling entries */
167 unsigned int order
= RADIX_TREE_MAP_SHIFT
- 1;
168 struct radix_tree_root
*tree
= ptr
;
171 for (i
= 0; i
< 10000; i
++) {
172 item_insert_order(tree
, 0, order
);
173 item_delete_rcu(tree
, 0);
176 stop_iteration
= true;
180 static void *iterator_func(void *ptr
)
182 XA_STATE(xas
, ptr
, 0);
185 while (!stop_iteration
) {
187 xas_for_each(&xas
, item
, ULONG_MAX
) {
188 if (xas_retry(&xas
, item
))
191 item_sanity(item
, xas
.xa_index
);
198 static void multiorder_iteration_race(struct xarray
*xa
)
200 const int num_threads
= sysconf(_SC_NPROCESSORS_ONLN
);
201 pthread_t worker_thread
[num_threads
];
204 pthread_create(&worker_thread
[0], NULL
, &creator_func
, xa
);
205 for (i
= 1; i
< num_threads
; i
++)
206 pthread_create(&worker_thread
[i
], NULL
, &iterator_func
, xa
);
208 for (i
= 0; i
< num_threads
; i
++)
209 pthread_join(worker_thread
[i
], NULL
);
214 static DEFINE_XARRAY(array
);
216 void multiorder_checks(void)
218 multiorder_iteration(&array
);
219 multiorder_tagged_iteration(&array
);
220 multiorder_iteration_race(&array
);
222 radix_tree_cpu_dead(0);
225 int __weak
main(void)