1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright (C) 2001 Momchil Velikov
4 * Portions Copyright (C) 2001 Christoph Hellwig
5 * Copyright (C) 2006 Nick Piggin
6 * Copyright (C) 2012 Konstantin Khlebnikov
8 #ifndef _LINUX_RADIX_TREE_H
9 #define _LINUX_RADIX_TREE_H
11 #include <linux/bitops.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/preempt.h>
15 #include <linux/rcupdate.h>
16 #include <linux/spinlock.h>
17 #include <linux/types.h>
18 #include <linux/xarray.h>
20 /* Keep unconverted code working */
21 #define radix_tree_root xarray
22 #define radix_tree_node xa_node
25 * The bottom two bits of the slot determine how the remaining bits in the
26 * slot are interpreted:
32 * The internal entry may be a pointer to the next level in the tree, a
33 * sibling entry, or an indicator that the entry in this slot has been moved
34 * to another location in the tree and the lookup should be restarted. While
35 * NULL fits the 'data pointer' pattern, it means that there is no entry in
36 * the tree for this index (no matter what level of the tree it is found at).
37 * This means that storing a NULL entry in the tree is the same as deleting
38 * the entry from the tree.
40 #define RADIX_TREE_ENTRY_MASK 3UL
41 #define RADIX_TREE_INTERNAL_NODE 2UL
43 static inline bool radix_tree_is_internal_node(void *ptr
)
45 return ((unsigned long)ptr
& RADIX_TREE_ENTRY_MASK
) ==
46 RADIX_TREE_INTERNAL_NODE
;
49 /*** radix-tree API starts here ***/
51 #define RADIX_TREE_MAP_SHIFT XA_CHUNK_SHIFT
52 #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
53 #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
55 #define RADIX_TREE_MAX_TAGS XA_MAX_MARKS
56 #define RADIX_TREE_TAG_LONGS XA_MARK_LONGS
58 #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
59 #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
60 RADIX_TREE_MAP_SHIFT))
62 /* The IDR tag is stored in the low bits of xa_flags */
63 #define ROOT_IS_IDR ((__force gfp_t)4)
64 /* The top bits of xa_flags are used to store the root tags */
65 #define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
67 #define RADIX_TREE_INIT(name, mask) XARRAY_INIT(name, mask)
69 #define RADIX_TREE(name, mask) \
70 struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
72 #define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask)
74 static inline bool radix_tree_empty(const struct radix_tree_root
*root
)
76 return root
->xa_head
== NULL
;
80 * struct radix_tree_iter - radix tree iterator state
82 * @index: index of current slot
83 * @next_index: one beyond the last index for this chunk
84 * @tags: bit-mask for tag-iterating
85 * @node: node that contains current slot
87 * This radix tree iterator works in terms of "chunks" of slots. A chunk is a
88 * subinterval of slots contained within one radix tree leaf node. It is
89 * described by a pointer to its first slot and a struct radix_tree_iter
90 * which holds the chunk's position in the tree and its size. For tagged
91 * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
94 struct radix_tree_iter
{
96 unsigned long next_index
;
98 struct radix_tree_node
*node
;
102 * Radix-tree synchronization
104 * The radix-tree API requires that users provide all synchronisation (with
105 * specific exceptions, noted below).
107 * Synchronization of access to the data items being stored in the tree, and
108 * management of their lifetimes must be completely managed by API users.
110 * For API usage, in general,
111 * - any function _modifying_ the tree or tags (inserting or deleting
112 * items, setting or clearing tags) must exclude other modifications, and
113 * exclude any functions reading the tree.
114 * - any function _reading_ the tree or tags (looking up items or tags,
115 * gang lookups) must exclude modifications to the tree, but may occur
116 * concurrently with other readers.
118 * The notable exceptions to this rule are the following functions:
119 * __radix_tree_lookup
121 * radix_tree_lookup_slot
123 * radix_tree_gang_lookup
124 * radix_tree_gang_lookup_tag
125 * radix_tree_gang_lookup_tag_slot
128 * The first 7 functions are able to be called locklessly, using RCU. The
129 * caller must ensure calls to these functions are made within rcu_read_lock()
130 * regions. Other readers (lock-free or otherwise) and modifications may be
131 * running concurrently.
133 * It is still required that the caller manage the synchronization and lifetimes
134 * of the items. So if RCU lock-free lookups are used, typically this would mean
135 * that the items have their own locks, or are amenable to lock-free access; and
136 * that the items are freed by RCU (or only freed after having been deleted from
137 * the radix tree *and* a synchronize_rcu() grace period).
139 * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
140 * access to data items when inserting into or looking up from the radix tree)
142 * Note that the value returned by radix_tree_tag_get() may not be relied upon
143 * if only the RCU read lock is held. Functions to set/clear tags and to
144 * delete nodes running concurrently with it may affect its result such that
145 * two consecutive reads in the same locked section may return different
146 * values. If reliability is required, modification functions must also be
147 * excluded from concurrency.
149 * radix_tree_tagged is able to be called without locking or RCU.
153 * radix_tree_deref_slot - dereference a slot
154 * @slot: slot pointer, returned by radix_tree_lookup_slot
156 * For use with radix_tree_lookup_slot(). Caller must hold tree at least read
157 * locked across slot lookup and dereference. Not required if write lock is
158 * held (ie. items cannot be concurrently inserted).
160 * radix_tree_deref_retry must be used to confirm validity of the pointer if
161 * only the read lock is held.
163 * Return: entry stored in that slot.
165 static inline void *radix_tree_deref_slot(void __rcu
**slot
)
167 return rcu_dereference(*slot
);
171 * radix_tree_deref_slot_protected - dereference a slot with tree lock held
172 * @slot: slot pointer, returned by radix_tree_lookup_slot
174 * Similar to radix_tree_deref_slot. The caller does not hold the RCU read
175 * lock but it must hold the tree lock to prevent parallel updates.
177 * Return: entry stored in that slot.
179 static inline void *radix_tree_deref_slot_protected(void __rcu
**slot
,
180 spinlock_t
*treelock
)
182 return rcu_dereference_protected(*slot
, lockdep_is_held(treelock
));
186 * radix_tree_deref_retry - check radix_tree_deref_slot
187 * @arg: pointer returned by radix_tree_deref_slot
188 * Returns: 0 if retry is not required, otherwise retry is required
190 * radix_tree_deref_retry must be used with radix_tree_deref_slot.
192 static inline int radix_tree_deref_retry(void *arg
)
194 return unlikely(radix_tree_is_internal_node(arg
));
198 * radix_tree_exception - radix_tree_deref_slot returned either exception?
199 * @arg: value returned by radix_tree_deref_slot
200 * Returns: 0 if well-aligned pointer, non-0 if either kind of exception.
202 static inline int radix_tree_exception(void *arg
)
204 return unlikely((unsigned long)arg
& RADIX_TREE_ENTRY_MASK
);
207 int radix_tree_insert(struct radix_tree_root
*, unsigned long index
,
209 void *__radix_tree_lookup(const struct radix_tree_root
*, unsigned long index
,
210 struct radix_tree_node
**nodep
, void __rcu
***slotp
);
211 void *radix_tree_lookup(const struct radix_tree_root
*, unsigned long);
212 void __rcu
**radix_tree_lookup_slot(const struct radix_tree_root
*,
213 unsigned long index
);
214 void __radix_tree_replace(struct radix_tree_root
*, struct radix_tree_node
*,
215 void __rcu
**slot
, void *entry
);
216 void radix_tree_iter_replace(struct radix_tree_root
*,
217 const struct radix_tree_iter
*, void __rcu
**slot
, void *entry
);
218 void radix_tree_replace_slot(struct radix_tree_root
*,
219 void __rcu
**slot
, void *entry
);
220 void radix_tree_iter_delete(struct radix_tree_root
*,
221 struct radix_tree_iter
*iter
, void __rcu
**slot
);
222 void *radix_tree_delete_item(struct radix_tree_root
*, unsigned long, void *);
223 void *radix_tree_delete(struct radix_tree_root
*, unsigned long);
224 unsigned int radix_tree_gang_lookup(const struct radix_tree_root
*,
225 void **results
, unsigned long first_index
,
226 unsigned int max_items
);
227 int radix_tree_preload(gfp_t gfp_mask
);
228 int radix_tree_maybe_preload(gfp_t gfp_mask
);
229 void radix_tree_init(void);
230 void *radix_tree_tag_set(struct radix_tree_root
*,
231 unsigned long index
, unsigned int tag
);
232 void *radix_tree_tag_clear(struct radix_tree_root
*,
233 unsigned long index
, unsigned int tag
);
234 int radix_tree_tag_get(const struct radix_tree_root
*,
235 unsigned long index
, unsigned int tag
);
236 void radix_tree_iter_tag_clear(struct radix_tree_root
*,
237 const struct radix_tree_iter
*iter
, unsigned int tag
);
238 unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root
*,
239 void **results
, unsigned long first_index
,
240 unsigned int max_items
, unsigned int tag
);
241 unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root
*,
242 void __rcu
***results
, unsigned long first_index
,
243 unsigned int max_items
, unsigned int tag
);
244 int radix_tree_tagged(const struct radix_tree_root
*, unsigned int tag
);
246 static inline void radix_tree_preload_end(void)
251 void __rcu
**idr_get_free(struct radix_tree_root
*root
,
252 struct radix_tree_iter
*iter
, gfp_t gfp
,
256 RADIX_TREE_ITER_TAG_MASK
= 0x0f, /* tag index in lower nybble */
257 RADIX_TREE_ITER_TAGGED
= 0x10, /* lookup tagged slots */
258 RADIX_TREE_ITER_CONTIG
= 0x20, /* stop at first hole */
262 * radix_tree_iter_init - initialize radix tree iterator
264 * @iter: pointer to iterator state
265 * @start: iteration starting index
268 static __always_inline
void __rcu
**
269 radix_tree_iter_init(struct radix_tree_iter
*iter
, unsigned long start
)
272 * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it
273 * in the case of a successful tagged chunk lookup. If the lookup was
274 * unsuccessful or non-tagged then nobody cares about ->tags.
276 * Set index to zero to bypass next_index overflow protection.
277 * See the comment in radix_tree_next_chunk() for details.
280 iter
->next_index
= start
;
285 * radix_tree_next_chunk - find next chunk of slots for iteration
287 * @root: radix tree root
288 * @iter: iterator state
289 * @flags: RADIX_TREE_ITER_* flags and tag index
290 * Returns: pointer to chunk first slot, or NULL if there no more left
292 * This function looks up the next chunk in the radix tree starting from
293 * @iter->next_index. It returns a pointer to the chunk's first slot.
294 * Also it fills @iter with data about chunk: position in the tree (index),
295 * its end (next_index), and constructs a bit mask for tagged iterating (tags).
297 void __rcu
**radix_tree_next_chunk(const struct radix_tree_root
*,
298 struct radix_tree_iter
*iter
, unsigned flags
);
301 * radix_tree_iter_lookup - look up an index in the radix tree
302 * @root: radix tree root
303 * @iter: iterator state
304 * @index: key to look up
306 * If @index is present in the radix tree, this function returns the slot
307 * containing it and updates @iter to describe the entry. If @index is not
308 * present, it returns NULL.
310 static inline void __rcu
**
311 radix_tree_iter_lookup(const struct radix_tree_root
*root
,
312 struct radix_tree_iter
*iter
, unsigned long index
)
314 radix_tree_iter_init(iter
, index
);
315 return radix_tree_next_chunk(root
, iter
, RADIX_TREE_ITER_CONTIG
);
319 * radix_tree_iter_retry - retry this chunk of the iteration
320 * @iter: iterator state
322 * If we iterate over a tree protected only by the RCU lock, a race
323 * against deletion or creation may result in seeing a slot for which
324 * radix_tree_deref_retry() returns true. If so, call this function
325 * and continue the iteration.
327 static inline __must_check
328 void __rcu
**radix_tree_iter_retry(struct radix_tree_iter
*iter
)
330 iter
->next_index
= iter
->index
;
335 static inline unsigned long
336 __radix_tree_iter_add(struct radix_tree_iter
*iter
, unsigned long slots
)
338 return iter
->index
+ slots
;
342 * radix_tree_iter_resume - resume iterating when the chunk may be invalid
343 * @slot: pointer to current slot
344 * @iter: iterator state
345 * Returns: New slot pointer
347 * If the iterator needs to release then reacquire a lock, the chunk may
348 * have been invalidated by an insertion or deletion. Call this function
349 * before releasing the lock to continue the iteration from the next index.
351 void __rcu
**__must_check
radix_tree_iter_resume(void __rcu
**slot
,
352 struct radix_tree_iter
*iter
);
355 * radix_tree_chunk_size - get current chunk size
357 * @iter: pointer to radix tree iterator
358 * Returns: current chunk size
360 static __always_inline
long
361 radix_tree_chunk_size(struct radix_tree_iter
*iter
)
363 return iter
->next_index
- iter
->index
;
367 * radix_tree_next_slot - find next slot in chunk
369 * @slot: pointer to current slot
370 * @iter: pointer to interator state
371 * @flags: RADIX_TREE_ITER_*, should be constant
372 * Returns: pointer to next slot, or NULL if there no more left
374 * This function updates @iter->index in the case of a successful lookup.
375 * For tagged lookup it also eats @iter->tags.
377 * There are several cases where 'slot' can be passed in as NULL to this
378 * function. These cases result from the use of radix_tree_iter_resume() or
379 * radix_tree_iter_retry(). In these cases we don't end up dereferencing
380 * 'slot' because either:
381 * a) we are doing tagged iteration and iter->tags has been set to 0, or
382 * b) we are doing non-tagged iteration, and iter->index and iter->next_index
383 * have been set up so that radix_tree_chunk_size() returns 1 or 0.
385 static __always_inline
void __rcu
**radix_tree_next_slot(void __rcu
**slot
,
386 struct radix_tree_iter
*iter
, unsigned flags
)
388 if (flags
& RADIX_TREE_ITER_TAGGED
) {
390 if (unlikely(!iter
->tags
))
392 if (likely(iter
->tags
& 1ul)) {
393 iter
->index
= __radix_tree_iter_add(iter
, 1);
397 if (!(flags
& RADIX_TREE_ITER_CONTIG
)) {
398 unsigned offset
= __ffs(iter
->tags
);
400 iter
->tags
>>= offset
++;
401 iter
->index
= __radix_tree_iter_add(iter
, offset
);
406 long count
= radix_tree_chunk_size(iter
);
408 while (--count
> 0) {
410 iter
->index
= __radix_tree_iter_add(iter
, 1);
414 if (flags
& RADIX_TREE_ITER_CONTIG
) {
415 /* forbid switching to the next chunk */
416 iter
->next_index
= 0;
428 * radix_tree_for_each_slot - iterate over non-empty slots
430 * @slot: the void** variable for pointer to slot
431 * @root: the struct radix_tree_root pointer
432 * @iter: the struct radix_tree_iter pointer
433 * @start: iteration starting index
435 * @slot points to radix tree slot, @iter->index contains its index.
437 #define radix_tree_for_each_slot(slot, root, iter, start) \
438 for (slot = radix_tree_iter_init(iter, start) ; \
439 slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \
440 slot = radix_tree_next_slot(slot, iter, 0))
443 * radix_tree_for_each_tagged - iterate over tagged slots
445 * @slot: the void** variable for pointer to slot
446 * @root: the struct radix_tree_root pointer
447 * @iter: the struct radix_tree_iter pointer
448 * @start: iteration starting index
451 * @slot points to radix tree slot, @iter->index contains its index.
453 #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \
454 for (slot = radix_tree_iter_init(iter, start) ; \
455 slot || (slot = radix_tree_next_chunk(root, iter, \
456 RADIX_TREE_ITER_TAGGED | tag)) ; \
457 slot = radix_tree_next_slot(slot, iter, \
458 RADIX_TREE_ITER_TAGGED | tag))
460 #endif /* _LINUX_RADIX_TREE_H */