2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2005 SGI, Christoph Lameter
5 * Copyright (C) 2006 Nick Piggin
6 * Copyright (C) 2012 Konstantin Khlebnikov
7 * Copyright (C) 2016 Intel, Matthew Wilcox
8 * Copyright (C) 2016 Intel, Ross Zwisler
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2, or (at
13 * your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/errno.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/export.h>
29 #include <linux/radix-tree.h>
30 #include <linux/percpu.h>
31 #include <linux/slab.h>
32 #include <linux/kmemleak.h>
33 #include <linux/notifier.h>
34 #include <linux/cpu.h>
35 #include <linux/string.h>
36 #include <linux/bitops.h>
37 #include <linux/rcupdate.h>
38 #include <linux/preempt.h> /* in_interrupt() */
41 /* Number of nodes in fully populated tree of given height */
42 static unsigned long height_to_maxnodes
[RADIX_TREE_MAX_PATH
+ 1] __read_mostly
;
45 * Radix tree node cache.
47 static struct kmem_cache
*radix_tree_node_cachep
;
50 * The radix tree is variable-height, so an insert operation not only has
51 * to build the branch to its corresponding item, it also has to build the
52 * branch to existing items if the size has to be increased (by
55 * The worst case is a zero height tree with just a single item at index 0,
56 * and then inserting an item at index ULONG_MAX. This requires 2 new branches
57 * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
60 #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
63 * Per-cpu pool of preloaded nodes
65 struct radix_tree_preload
{
67 /* nodes->private_data points to next preallocated node */
68 struct radix_tree_node
*nodes
;
70 static DEFINE_PER_CPU(struct radix_tree_preload
, radix_tree_preloads
) = { 0, };
72 static inline void *node_to_entry(void *ptr
)
74 return (void *)((unsigned long)ptr
| RADIX_TREE_INTERNAL_NODE
);
77 #define RADIX_TREE_RETRY node_to_entry(NULL)
79 #ifdef CONFIG_RADIX_TREE_MULTIORDER
80 /* Sibling slots point directly to another slot in the same node */
81 static inline bool is_sibling_entry(struct radix_tree_node
*parent
, void *node
)
84 return (parent
->slots
<= ptr
) &&
85 (ptr
< parent
->slots
+ RADIX_TREE_MAP_SIZE
);
88 static inline bool is_sibling_entry(struct radix_tree_node
*parent
, void *node
)
94 static inline unsigned long get_slot_offset(struct radix_tree_node
*parent
,
97 return slot
- parent
->slots
;
100 static unsigned int radix_tree_descend(struct radix_tree_node
*parent
,
101 struct radix_tree_node
**nodep
, unsigned long index
)
103 unsigned int offset
= (index
>> parent
->shift
) & RADIX_TREE_MAP_MASK
;
104 void **entry
= rcu_dereference_raw(parent
->slots
[offset
]);
106 #ifdef CONFIG_RADIX_TREE_MULTIORDER
107 if (radix_tree_is_internal_node(entry
)) {
108 unsigned long siboff
= get_slot_offset(parent
, entry
);
109 if (siboff
< RADIX_TREE_MAP_SIZE
) {
111 entry
= rcu_dereference_raw(parent
->slots
[offset
]);
116 *nodep
= (void *)entry
;
120 static inline gfp_t
root_gfp_mask(struct radix_tree_root
*root
)
122 return root
->gfp_mask
& __GFP_BITS_MASK
;
125 static inline void tag_set(struct radix_tree_node
*node
, unsigned int tag
,
128 __set_bit(offset
, node
->tags
[tag
]);
131 static inline void tag_clear(struct radix_tree_node
*node
, unsigned int tag
,
134 __clear_bit(offset
, node
->tags
[tag
]);
137 static inline int tag_get(struct radix_tree_node
*node
, unsigned int tag
,
140 return test_bit(offset
, node
->tags
[tag
]);
143 static inline void root_tag_set(struct radix_tree_root
*root
, unsigned int tag
)
145 root
->gfp_mask
|= (__force gfp_t
)(1 << (tag
+ __GFP_BITS_SHIFT
));
148 static inline void root_tag_clear(struct radix_tree_root
*root
, unsigned tag
)
150 root
->gfp_mask
&= (__force gfp_t
)~(1 << (tag
+ __GFP_BITS_SHIFT
));
153 static inline void root_tag_clear_all(struct radix_tree_root
*root
)
155 root
->gfp_mask
&= __GFP_BITS_MASK
;
158 static inline int root_tag_get(struct radix_tree_root
*root
, unsigned int tag
)
160 return (__force
int)root
->gfp_mask
& (1 << (tag
+ __GFP_BITS_SHIFT
));
163 static inline unsigned root_tags_get(struct radix_tree_root
*root
)
165 return (__force
unsigned)root
->gfp_mask
>> __GFP_BITS_SHIFT
;
169 * Returns 1 if any slot in the node has this tag set.
170 * Otherwise returns 0.
172 static inline int any_tag_set(struct radix_tree_node
*node
, unsigned int tag
)
175 for (idx
= 0; idx
< RADIX_TREE_TAG_LONGS
; idx
++) {
176 if (node
->tags
[tag
][idx
])
183 * radix_tree_find_next_bit - find the next set bit in a memory region
185 * @addr: The address to base the search on
186 * @size: The bitmap size in bits
187 * @offset: The bitnumber to start searching at
189 * Unrollable variant of find_next_bit() for constant size arrays.
190 * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
191 * Returns next bit offset, or size if nothing found.
193 static __always_inline
unsigned long
194 radix_tree_find_next_bit(const unsigned long *addr
,
195 unsigned long size
, unsigned long offset
)
197 if (!__builtin_constant_p(size
))
198 return find_next_bit(addr
, size
, offset
);
203 addr
+= offset
/ BITS_PER_LONG
;
204 tmp
= *addr
>> (offset
% BITS_PER_LONG
);
206 return __ffs(tmp
) + offset
;
207 offset
= (offset
+ BITS_PER_LONG
) & ~(BITS_PER_LONG
- 1);
208 while (offset
< size
) {
211 return __ffs(tmp
) + offset
;
212 offset
+= BITS_PER_LONG
;
219 static void dump_node(struct radix_tree_node
*node
, unsigned long index
)
223 pr_debug("radix node: %p offset %d tags %lx %lx %lx shift %d count %d parent %p\n",
225 node
->tags
[0][0], node
->tags
[1][0], node
->tags
[2][0],
226 node
->shift
, node
->count
, node
->parent
);
228 for (i
= 0; i
< RADIX_TREE_MAP_SIZE
; i
++) {
229 unsigned long first
= index
| (i
<< node
->shift
);
230 unsigned long last
= first
| ((1UL << node
->shift
) - 1);
231 void *entry
= node
->slots
[i
];
234 if (is_sibling_entry(node
, entry
)) {
235 pr_debug("radix sblng %p offset %ld val %p indices %ld-%ld\n",
237 *(void **)entry_to_node(entry
),
239 } else if (!radix_tree_is_internal_node(entry
)) {
240 pr_debug("radix entry %p offset %ld indices %ld-%ld\n",
241 entry
, i
, first
, last
);
243 dump_node(entry_to_node(entry
), first
);
249 static void radix_tree_dump(struct radix_tree_root
*root
)
251 pr_debug("radix root: %p rnode %p tags %x\n",
253 root
->gfp_mask
>> __GFP_BITS_SHIFT
);
254 if (!radix_tree_is_internal_node(root
->rnode
))
256 dump_node(entry_to_node(root
->rnode
), 0);
261 * This assumes that the caller has performed appropriate preallocation, and
262 * that the caller has pinned this thread of control to the current CPU.
264 static struct radix_tree_node
*
265 radix_tree_node_alloc(struct radix_tree_root
*root
)
267 struct radix_tree_node
*ret
= NULL
;
268 gfp_t gfp_mask
= root_gfp_mask(root
);
271 * Preload code isn't irq safe and it doesn't make sense to use
272 * preloading during an interrupt anyway as all the allocations have
273 * to be atomic. So just do normal allocation when in interrupt.
275 if (!gfpflags_allow_blocking(gfp_mask
) && !in_interrupt()) {
276 struct radix_tree_preload
*rtp
;
279 * Even if the caller has preloaded, try to allocate from the
280 * cache first for the new node to get accounted to the memory
283 ret
= kmem_cache_alloc(radix_tree_node_cachep
,
284 gfp_mask
| __GFP_NOWARN
);
289 * Provided the caller has preloaded here, we will always
290 * succeed in getting a node here (and never reach
293 rtp
= this_cpu_ptr(&radix_tree_preloads
);
296 rtp
->nodes
= ret
->private_data
;
297 ret
->private_data
= NULL
;
301 * Update the allocation stack trace as this is more useful
304 kmemleak_update_trace(ret
);
307 ret
= kmem_cache_alloc(radix_tree_node_cachep
, gfp_mask
);
309 BUG_ON(radix_tree_is_internal_node(ret
));
313 static void radix_tree_node_rcu_free(struct rcu_head
*head
)
315 struct radix_tree_node
*node
=
316 container_of(head
, struct radix_tree_node
, rcu_head
);
320 * must only free zeroed nodes into the slab. radix_tree_shrink
321 * can leave us with a non-NULL entry in the first slot, so clear
322 * that here to make sure.
324 for (i
= 0; i
< RADIX_TREE_MAX_TAGS
; i
++)
325 tag_clear(node
, i
, 0);
327 node
->slots
[0] = NULL
;
330 kmem_cache_free(radix_tree_node_cachep
, node
);
334 radix_tree_node_free(struct radix_tree_node
*node
)
336 call_rcu(&node
->rcu_head
, radix_tree_node_rcu_free
);
340 * Load up this CPU's radix_tree_node buffer with sufficient objects to
341 * ensure that the addition of a single element in the tree cannot fail. On
342 * success, return zero, with preemption disabled. On error, return -ENOMEM
343 * with preemption not disabled.
345 * To make use of this facility, the radix tree must be initialised without
346 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
348 static int __radix_tree_preload(gfp_t gfp_mask
, int nr
)
350 struct radix_tree_preload
*rtp
;
351 struct radix_tree_node
*node
;
355 * Nodes preloaded by one cgroup can be be used by another cgroup, so
356 * they should never be accounted to any particular memory cgroup.
358 gfp_mask
&= ~__GFP_ACCOUNT
;
361 rtp
= this_cpu_ptr(&radix_tree_preloads
);
362 while (rtp
->nr
< nr
) {
364 node
= kmem_cache_alloc(radix_tree_node_cachep
, gfp_mask
);
368 rtp
= this_cpu_ptr(&radix_tree_preloads
);
370 node
->private_data
= rtp
->nodes
;
374 kmem_cache_free(radix_tree_node_cachep
, node
);
383 * Load up this CPU's radix_tree_node buffer with sufficient objects to
384 * ensure that the addition of a single element in the tree cannot fail. On
385 * success, return zero, with preemption disabled. On error, return -ENOMEM
386 * with preemption not disabled.
388 * To make use of this facility, the radix tree must be initialised without
389 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
391 int radix_tree_preload(gfp_t gfp_mask
)
393 /* Warn on non-sensical use... */
394 WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask
));
395 return __radix_tree_preload(gfp_mask
, RADIX_TREE_PRELOAD_SIZE
);
397 EXPORT_SYMBOL(radix_tree_preload
);
400 * The same as above function, except we don't guarantee preloading happens.
401 * We do it, if we decide it helps. On success, return zero with preemption
402 * disabled. On error, return -ENOMEM with preemption not disabled.
404 int radix_tree_maybe_preload(gfp_t gfp_mask
)
406 if (gfpflags_allow_blocking(gfp_mask
))
407 return __radix_tree_preload(gfp_mask
, RADIX_TREE_PRELOAD_SIZE
);
408 /* Preloading doesn't help anything with this gfp mask, skip it */
412 EXPORT_SYMBOL(radix_tree_maybe_preload
);
415 * The same as function above, but preload number of nodes required to insert
416 * (1 << order) continuous naturally-aligned elements.
418 int radix_tree_maybe_preload_order(gfp_t gfp_mask
, int order
)
420 unsigned long nr_subtrees
;
421 int nr_nodes
, subtree_height
;
423 /* Preloading doesn't help anything with this gfp mask, skip it */
424 if (!gfpflags_allow_blocking(gfp_mask
)) {
430 * Calculate number and height of fully populated subtrees it takes to
431 * store (1 << order) elements.
433 nr_subtrees
= 1 << order
;
434 for (subtree_height
= 0; nr_subtrees
> RADIX_TREE_MAP_SIZE
;
436 nr_subtrees
>>= RADIX_TREE_MAP_SHIFT
;
439 * The worst case is zero height tree with a single item at index 0 and
440 * then inserting items starting at ULONG_MAX - (1 << order).
442 * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to
445 nr_nodes
= RADIX_TREE_MAX_PATH
;
447 /* Plus branch to fully populated subtrees. */
448 nr_nodes
+= RADIX_TREE_MAX_PATH
- subtree_height
;
450 /* Root node is shared. */
453 /* Plus nodes required to build subtrees. */
454 nr_nodes
+= nr_subtrees
* height_to_maxnodes
[subtree_height
];
456 return __radix_tree_preload(gfp_mask
, nr_nodes
);
460 * The maximum index which can be stored in a radix tree
462 static inline unsigned long shift_maxindex(unsigned int shift
)
464 return (RADIX_TREE_MAP_SIZE
<< shift
) - 1;
467 static inline unsigned long node_maxindex(struct radix_tree_node
*node
)
469 return shift_maxindex(node
->shift
);
472 static unsigned radix_tree_load_root(struct radix_tree_root
*root
,
473 struct radix_tree_node
**nodep
, unsigned long *maxindex
)
475 struct radix_tree_node
*node
= rcu_dereference_raw(root
->rnode
);
479 if (likely(radix_tree_is_internal_node(node
))) {
480 node
= entry_to_node(node
);
481 *maxindex
= node_maxindex(node
);
482 return node
->shift
+ RADIX_TREE_MAP_SHIFT
;
490 * Extend a radix tree so it can store key @index.
492 static int radix_tree_extend(struct radix_tree_root
*root
,
493 unsigned long index
, unsigned int shift
)
495 struct radix_tree_node
*slot
;
496 unsigned int maxshift
;
499 /* Figure out what the shift should be. */
501 while (index
> shift_maxindex(maxshift
))
502 maxshift
+= RADIX_TREE_MAP_SHIFT
;
509 struct radix_tree_node
*node
= radix_tree_node_alloc(root
);
514 /* Propagate the aggregated tag info into the new root */
515 for (tag
= 0; tag
< RADIX_TREE_MAX_TAGS
; tag
++) {
516 if (root_tag_get(root
, tag
))
517 tag_set(node
, tag
, 0);
520 BUG_ON(shift
> BITS_PER_LONG
);
525 if (radix_tree_is_internal_node(slot
))
526 entry_to_node(slot
)->parent
= node
;
527 node
->slots
[0] = slot
;
528 slot
= node_to_entry(node
);
529 rcu_assign_pointer(root
->rnode
, slot
);
530 shift
+= RADIX_TREE_MAP_SHIFT
;
531 } while (shift
<= maxshift
);
533 return maxshift
+ RADIX_TREE_MAP_SHIFT
;
537 * __radix_tree_create - create a slot in a radix tree
538 * @root: radix tree root
540 * @order: index occupies 2^order aligned slots
541 * @nodep: returns node
542 * @slotp: returns slot
544 * Create, if necessary, and return the node and slot for an item
545 * at position @index in the radix tree @root.
547 * Until there is more than one item in the tree, no nodes are
548 * allocated and @root->rnode is used as a direct slot instead of
549 * pointing to a node, in which case *@nodep will be NULL.
551 * Returns -ENOMEM, or 0 for success.
553 int __radix_tree_create(struct radix_tree_root
*root
, unsigned long index
,
554 unsigned order
, struct radix_tree_node
**nodep
,
557 struct radix_tree_node
*node
= NULL
, *child
;
558 void **slot
= (void **)&root
->rnode
;
559 unsigned long maxindex
;
560 unsigned int shift
, offset
= 0;
561 unsigned long max
= index
| ((1UL << order
) - 1);
563 shift
= radix_tree_load_root(root
, &child
, &maxindex
);
565 /* Make sure the tree is high enough. */
566 if (max
> maxindex
) {
567 int error
= radix_tree_extend(root
, max
, shift
);
573 shift
+= RADIX_TREE_MAP_SHIFT
;
576 while (shift
> order
) {
577 shift
-= RADIX_TREE_MAP_SHIFT
;
579 /* Have to add a child node. */
580 child
= radix_tree_node_alloc(root
);
583 child
->shift
= shift
;
584 child
->offset
= offset
;
585 child
->parent
= node
;
586 rcu_assign_pointer(*slot
, node_to_entry(child
));
589 } else if (!radix_tree_is_internal_node(child
))
592 /* Go a level down */
593 node
= entry_to_node(child
);
594 offset
= radix_tree_descend(node
, &child
, index
);
595 slot
= &node
->slots
[offset
];
598 #ifdef CONFIG_RADIX_TREE_MULTIORDER
599 /* Insert pointers to the canonical entry */
601 unsigned i
, n
= 1 << (order
- shift
);
602 offset
= offset
& ~(n
- 1);
603 slot
= &node
->slots
[offset
];
604 child
= node_to_entry(slot
);
605 for (i
= 0; i
< n
; i
++) {
610 for (i
= 1; i
< n
; i
++) {
611 rcu_assign_pointer(slot
[i
], child
);
625 * __radix_tree_insert - insert into a radix tree
626 * @root: radix tree root
628 * @order: key covers the 2^order indices around index
629 * @item: item to insert
631 * Insert an item into the radix tree at position @index.
633 int __radix_tree_insert(struct radix_tree_root
*root
, unsigned long index
,
634 unsigned order
, void *item
)
636 struct radix_tree_node
*node
;
640 BUG_ON(radix_tree_is_internal_node(item
));
642 error
= __radix_tree_create(root
, index
, order
, &node
, &slot
);
647 rcu_assign_pointer(*slot
, item
);
650 unsigned offset
= get_slot_offset(node
, slot
);
652 BUG_ON(tag_get(node
, 0, offset
));
653 BUG_ON(tag_get(node
, 1, offset
));
654 BUG_ON(tag_get(node
, 2, offset
));
656 BUG_ON(root_tags_get(root
));
661 EXPORT_SYMBOL(__radix_tree_insert
);
664 * __radix_tree_lookup - lookup an item in a radix tree
665 * @root: radix tree root
667 * @nodep: returns node
668 * @slotp: returns slot
670 * Lookup and return the item at position @index in the radix
673 * Until there is more than one item in the tree, no nodes are
674 * allocated and @root->rnode is used as a direct slot instead of
675 * pointing to a node, in which case *@nodep will be NULL.
677 void *__radix_tree_lookup(struct radix_tree_root
*root
, unsigned long index
,
678 struct radix_tree_node
**nodep
, void ***slotp
)
680 struct radix_tree_node
*node
, *parent
;
681 unsigned long maxindex
;
686 slot
= (void **)&root
->rnode
;
687 radix_tree_load_root(root
, &node
, &maxindex
);
688 if (index
> maxindex
)
691 while (radix_tree_is_internal_node(node
)) {
694 if (node
== RADIX_TREE_RETRY
)
696 parent
= entry_to_node(node
);
697 offset
= radix_tree_descend(parent
, &node
, index
);
698 slot
= parent
->slots
+ offset
;
709 * radix_tree_lookup_slot - lookup a slot in a radix tree
710 * @root: radix tree root
713 * Returns: the slot corresponding to the position @index in the
714 * radix tree @root. This is useful for update-if-exists operations.
716 * This function can be called under rcu_read_lock iff the slot is not
717 * modified by radix_tree_replace_slot, otherwise it must be called
718 * exclusive from other writers. Any dereference of the slot must be done
719 * using radix_tree_deref_slot.
721 void **radix_tree_lookup_slot(struct radix_tree_root
*root
, unsigned long index
)
725 if (!__radix_tree_lookup(root
, index
, NULL
, &slot
))
729 EXPORT_SYMBOL(radix_tree_lookup_slot
);
732 * radix_tree_lookup - perform lookup operation on a radix tree
733 * @root: radix tree root
736 * Lookup the item at the position @index in the radix tree @root.
738 * This function can be called under rcu_read_lock, however the caller
739 * must manage lifetimes of leaf nodes (eg. RCU may also be used to free
740 * them safely). No RCU barriers are required to access or modify the
741 * returned item, however.
743 void *radix_tree_lookup(struct radix_tree_root
*root
, unsigned long index
)
745 return __radix_tree_lookup(root
, index
, NULL
, NULL
);
747 EXPORT_SYMBOL(radix_tree_lookup
);
750 * radix_tree_tag_set - set a tag on a radix tree node
751 * @root: radix tree root
755 * Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
756 * corresponding to @index in the radix tree. From
757 * the root all the way down to the leaf node.
759 * Returns the address of the tagged item. Setting a tag on a not-present
762 void *radix_tree_tag_set(struct radix_tree_root
*root
,
763 unsigned long index
, unsigned int tag
)
765 struct radix_tree_node
*node
, *parent
;
766 unsigned long maxindex
;
768 radix_tree_load_root(root
, &node
, &maxindex
);
769 BUG_ON(index
> maxindex
);
771 while (radix_tree_is_internal_node(node
)) {
774 parent
= entry_to_node(node
);
775 offset
= radix_tree_descend(parent
, &node
, index
);
778 if (!tag_get(parent
, tag
, offset
))
779 tag_set(parent
, tag
, offset
);
782 /* set the root's tag bit */
783 if (!root_tag_get(root
, tag
))
784 root_tag_set(root
, tag
);
788 EXPORT_SYMBOL(radix_tree_tag_set
);
790 static void node_tag_clear(struct radix_tree_root
*root
,
791 struct radix_tree_node
*node
,
792 unsigned int tag
, unsigned int offset
)
795 if (!tag_get(node
, tag
, offset
))
797 tag_clear(node
, tag
, offset
);
798 if (any_tag_set(node
, tag
))
801 offset
= node
->offset
;
805 /* clear the root's tag bit */
806 if (root_tag_get(root
, tag
))
807 root_tag_clear(root
, tag
);
811 * radix_tree_tag_clear - clear a tag on a radix tree node
812 * @root: radix tree root
816 * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
817 * corresponding to @index in the radix tree. If this causes
818 * the leaf node to have no tags set then clear the tag in the
819 * next-to-leaf node, etc.
821 * Returns the address of the tagged item on success, else NULL. ie:
822 * has the same return value and semantics as radix_tree_lookup().
824 void *radix_tree_tag_clear(struct radix_tree_root
*root
,
825 unsigned long index
, unsigned int tag
)
827 struct radix_tree_node
*node
, *parent
;
828 unsigned long maxindex
;
829 int uninitialized_var(offset
);
831 radix_tree_load_root(root
, &node
, &maxindex
);
832 if (index
> maxindex
)
837 while (radix_tree_is_internal_node(node
)) {
838 parent
= entry_to_node(node
);
839 offset
= radix_tree_descend(parent
, &node
, index
);
843 node_tag_clear(root
, parent
, tag
, offset
);
847 EXPORT_SYMBOL(radix_tree_tag_clear
);
850 * radix_tree_tag_get - get a tag on a radix tree node
851 * @root: radix tree root
853 * @tag: tag index (< RADIX_TREE_MAX_TAGS)
857 * 0: tag not present or not set
860 * Note that the return value of this function may not be relied on, even if
861 * the RCU lock is held, unless tag modification and node deletion are excluded
864 int radix_tree_tag_get(struct radix_tree_root
*root
,
865 unsigned long index
, unsigned int tag
)
867 struct radix_tree_node
*node
, *parent
;
868 unsigned long maxindex
;
870 if (!root_tag_get(root
, tag
))
873 radix_tree_load_root(root
, &node
, &maxindex
);
874 if (index
> maxindex
)
879 while (radix_tree_is_internal_node(node
)) {
882 parent
= entry_to_node(node
);
883 offset
= radix_tree_descend(parent
, &node
, index
);
887 if (!tag_get(parent
, tag
, offset
))
889 if (node
== RADIX_TREE_RETRY
)
895 EXPORT_SYMBOL(radix_tree_tag_get
);
897 static inline void __set_iter_shift(struct radix_tree_iter
*iter
,
900 #ifdef CONFIG_RADIX_TREE_MULTIORDER
906 * radix_tree_next_chunk - find next chunk of slots for iteration
908 * @root: radix tree root
909 * @iter: iterator state
910 * @flags: RADIX_TREE_ITER_* flags and tag index
911 * Returns: pointer to chunk first slot, or NULL if iteration is over
913 void **radix_tree_next_chunk(struct radix_tree_root
*root
,
914 struct radix_tree_iter
*iter
, unsigned flags
)
916 unsigned tag
= flags
& RADIX_TREE_ITER_TAG_MASK
;
917 struct radix_tree_node
*node
, *child
;
918 unsigned long index
, offset
, maxindex
;
920 if ((flags
& RADIX_TREE_ITER_TAGGED
) && !root_tag_get(root
, tag
))
924 * Catch next_index overflow after ~0UL. iter->index never overflows
925 * during iterating; it can be zero only at the beginning.
926 * And we cannot overflow iter->next_index in a single step,
927 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
929 * This condition also used by radix_tree_next_slot() to stop
930 * contiguous iterating, and forbid swithing to the next chunk.
932 index
= iter
->next_index
;
933 if (!index
&& iter
->index
)
937 radix_tree_load_root(root
, &child
, &maxindex
);
938 if (index
> maxindex
)
943 if (!radix_tree_is_internal_node(child
)) {
944 /* Single-slot tree */
946 iter
->next_index
= maxindex
+ 1;
948 __set_iter_shift(iter
, 0);
949 return (void **)&root
->rnode
;
953 node
= entry_to_node(child
);
954 offset
= radix_tree_descend(node
, &child
, index
);
956 if ((flags
& RADIX_TREE_ITER_TAGGED
) ?
957 !tag_get(node
, tag
, offset
) : !child
) {
959 if (flags
& RADIX_TREE_ITER_CONTIG
)
962 if (flags
& RADIX_TREE_ITER_TAGGED
)
963 offset
= radix_tree_find_next_bit(
968 while (++offset
< RADIX_TREE_MAP_SIZE
) {
969 void *slot
= node
->slots
[offset
];
970 if (is_sibling_entry(node
, slot
))
975 index
&= ~node_maxindex(node
);
976 index
+= offset
<< node
->shift
;
977 /* Overflow after ~0UL */
980 if (offset
== RADIX_TREE_MAP_SIZE
)
982 child
= rcu_dereference_raw(node
->slots
[offset
]);
985 if ((child
== NULL
) || (child
== RADIX_TREE_RETRY
))
987 } while (radix_tree_is_internal_node(child
));
989 /* Update the iterator state */
990 iter
->index
= (index
&~ node_maxindex(node
)) | (offset
<< node
->shift
);
991 iter
->next_index
= (index
| node_maxindex(node
)) + 1;
992 __set_iter_shift(iter
, node
->shift
);
994 /* Construct iter->tags bit-mask from node->tags[tag] array */
995 if (flags
& RADIX_TREE_ITER_TAGGED
) {
996 unsigned tag_long
, tag_bit
;
998 tag_long
= offset
/ BITS_PER_LONG
;
999 tag_bit
= offset
% BITS_PER_LONG
;
1000 iter
->tags
= node
->tags
[tag
][tag_long
] >> tag_bit
;
1001 /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
1002 if (tag_long
< RADIX_TREE_TAG_LONGS
- 1) {
1003 /* Pick tags from next element */
1005 iter
->tags
|= node
->tags
[tag
][tag_long
+ 1] <<
1006 (BITS_PER_LONG
- tag_bit
);
1007 /* Clip chunk size, here only BITS_PER_LONG tags */
1008 iter
->next_index
= index
+ BITS_PER_LONG
;
1012 return node
->slots
+ offset
;
1014 EXPORT_SYMBOL(radix_tree_next_chunk
);
1017 * radix_tree_range_tag_if_tagged - for each item in given range set given
1018 * tag if item has another tag set
1019 * @root: radix tree root
1020 * @first_indexp: pointer to a starting index of a range to scan
1021 * @last_index: last index of a range to scan
1022 * @nr_to_tag: maximum number items to tag
1023 * @iftag: tag index to test
1024 * @settag: tag index to set if tested tag is set
1026 * This function scans range of radix tree from first_index to last_index
1027 * (inclusive). For each item in the range if iftag is set, the function sets
1028 * also settag. The function stops either after tagging nr_to_tag items or
1029 * after reaching last_index.
1031 * The tags must be set from the leaf level only and propagated back up the
1032 * path to the root. We must do this so that we resolve the full path before
1033 * setting any tags on intermediate nodes. If we set tags as we descend, then
1034 * we can get to the leaf node and find that the index that has the iftag
1035 * set is outside the range we are scanning. This reults in dangling tags and
1036 * can lead to problems with later tag operations (e.g. livelocks on lookups).
1038 * The function returns the number of leaves where the tag was set and sets
1039 * *first_indexp to the first unscanned index.
1040 * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must
1041 * be prepared to handle that.
1043 unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root
*root
,
1044 unsigned long *first_indexp
, unsigned long last_index
,
1045 unsigned long nr_to_tag
,
1046 unsigned int iftag
, unsigned int settag
)
1048 struct radix_tree_node
*parent
, *node
, *child
;
1049 unsigned long maxindex
;
1050 unsigned long tagged
= 0;
1051 unsigned long index
= *first_indexp
;
1053 radix_tree_load_root(root
, &child
, &maxindex
);
1054 last_index
= min(last_index
, maxindex
);
1055 if (index
> last_index
)
1059 if (!root_tag_get(root
, iftag
)) {
1060 *first_indexp
= last_index
+ 1;
1063 if (!radix_tree_is_internal_node(child
)) {
1064 *first_indexp
= last_index
+ 1;
1065 root_tag_set(root
, settag
);
1069 node
= entry_to_node(child
);
1072 unsigned offset
= radix_tree_descend(node
, &child
, index
);
1075 if (!tag_get(node
, iftag
, offset
))
1077 /* Sibling slots never have tags set on them */
1078 if (radix_tree_is_internal_node(child
)) {
1079 node
= entry_to_node(child
);
1085 tag_set(node
, settag
, offset
);
1087 /* walk back up the path tagging interior nodes */
1090 offset
= parent
->offset
;
1091 parent
= parent
->parent
;
1094 /* stop if we find a node with the tag already set */
1095 if (tag_get(parent
, settag
, offset
))
1097 tag_set(parent
, settag
, offset
);
1100 /* Go to next entry in node */
1101 index
= ((index
>> node
->shift
) + 1) << node
->shift
;
1102 /* Overflow can happen when last_index is ~0UL... */
1103 if (index
> last_index
|| !index
)
1105 offset
= (index
>> node
->shift
) & RADIX_TREE_MAP_MASK
;
1106 while (offset
== 0) {
1108 * We've fully scanned this node. Go up. Because
1109 * last_index is guaranteed to be in the tree, what
1110 * we do below cannot wander astray.
1112 node
= node
->parent
;
1113 offset
= (index
>> node
->shift
) & RADIX_TREE_MAP_MASK
;
1115 if (is_sibling_entry(node
, node
->slots
[offset
]))
1117 if (tagged
>= nr_to_tag
)
1121 * We need not to tag the root tag if there is no tag which is set with
1122 * settag within the range from *first_indexp to last_index.
1125 root_tag_set(root
, settag
);
1126 *first_indexp
= index
;
1130 EXPORT_SYMBOL(radix_tree_range_tag_if_tagged
);
1133 * radix_tree_gang_lookup - perform multiple lookup on a radix tree
1134 * @root: radix tree root
1135 * @results: where the results of the lookup are placed
1136 * @first_index: start the lookup from this key
1137 * @max_items: place up to this many items at *results
1139 * Performs an index-ascending scan of the tree for present items. Places
1140 * them at *@results and returns the number of items which were placed at
1143 * The implementation is naive.
1145 * Like radix_tree_lookup, radix_tree_gang_lookup may be called under
1146 * rcu_read_lock. In this case, rather than the returned results being
1147 * an atomic snapshot of the tree at a single point in time, the
1148 * semantics of an RCU protected gang lookup are as though multiple
1149 * radix_tree_lookups have been issued in individual locks, and results
1150 * stored in 'results'.
1153 radix_tree_gang_lookup(struct radix_tree_root
*root
, void **results
,
1154 unsigned long first_index
, unsigned int max_items
)
1156 struct radix_tree_iter iter
;
1158 unsigned int ret
= 0;
1160 if (unlikely(!max_items
))
1163 radix_tree_for_each_slot(slot
, root
, &iter
, first_index
) {
1164 results
[ret
] = rcu_dereference_raw(*slot
);
1167 if (radix_tree_is_internal_node(results
[ret
])) {
1168 slot
= radix_tree_iter_retry(&iter
);
1171 if (++ret
== max_items
)
1177 EXPORT_SYMBOL(radix_tree_gang_lookup
);
1180 * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
1181 * @root: radix tree root
1182 * @results: where the results of the lookup are placed
1183 * @indices: where their indices should be placed (but usually NULL)
1184 * @first_index: start the lookup from this key
1185 * @max_items: place up to this many items at *results
1187 * Performs an index-ascending scan of the tree for present items. Places
1188 * their slots at *@results and returns the number of items which were
1189 * placed at *@results.
1191 * The implementation is naive.
1193 * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
1194 * be dereferenced with radix_tree_deref_slot, and if using only RCU
1195 * protection, radix_tree_deref_slot may fail requiring a retry.
1198 radix_tree_gang_lookup_slot(struct radix_tree_root
*root
,
1199 void ***results
, unsigned long *indices
,
1200 unsigned long first_index
, unsigned int max_items
)
1202 struct radix_tree_iter iter
;
1204 unsigned int ret
= 0;
1206 if (unlikely(!max_items
))
1209 radix_tree_for_each_slot(slot
, root
, &iter
, first_index
) {
1210 results
[ret
] = slot
;
1212 indices
[ret
] = iter
.index
;
1213 if (++ret
== max_items
)
1219 EXPORT_SYMBOL(radix_tree_gang_lookup_slot
);
1222 * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
1224 * @root: radix tree root
1225 * @results: where the results of the lookup are placed
1226 * @first_index: start the lookup from this key
1227 * @max_items: place up to this many items at *results
1228 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
1230 * Performs an index-ascending scan of the tree for present items which
1231 * have the tag indexed by @tag set. Places the items at *@results and
1232 * returns the number of items which were placed at *@results.
1235 radix_tree_gang_lookup_tag(struct radix_tree_root
*root
, void **results
,
1236 unsigned long first_index
, unsigned int max_items
,
1239 struct radix_tree_iter iter
;
1241 unsigned int ret
= 0;
1243 if (unlikely(!max_items
))
1246 radix_tree_for_each_tagged(slot
, root
, &iter
, first_index
, tag
) {
1247 results
[ret
] = rcu_dereference_raw(*slot
);
1250 if (radix_tree_is_internal_node(results
[ret
])) {
1251 slot
= radix_tree_iter_retry(&iter
);
1254 if (++ret
== max_items
)
1260 EXPORT_SYMBOL(radix_tree_gang_lookup_tag
);
1263 * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
1264 * radix tree based on a tag
1265 * @root: radix tree root
1266 * @results: where the results of the lookup are placed
1267 * @first_index: start the lookup from this key
1268 * @max_items: place up to this many items at *results
1269 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
1271 * Performs an index-ascending scan of the tree for present items which
1272 * have the tag indexed by @tag set. Places the slots at *@results and
1273 * returns the number of slots which were placed at *@results.
1276 radix_tree_gang_lookup_tag_slot(struct radix_tree_root
*root
, void ***results
,
1277 unsigned long first_index
, unsigned int max_items
,
1280 struct radix_tree_iter iter
;
1282 unsigned int ret
= 0;
1284 if (unlikely(!max_items
))
1287 radix_tree_for_each_tagged(slot
, root
, &iter
, first_index
, tag
) {
1288 results
[ret
] = slot
;
1289 if (++ret
== max_items
)
1295 EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot
);
1297 #if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP)
1298 #include <linux/sched.h> /* for cond_resched() */
1300 struct locate_info
{
1301 unsigned long found_index
;
1306 * This linear search is at present only useful to shmem_unuse_inode().
1308 static unsigned long __locate(struct radix_tree_node
*slot
, void *item
,
1309 unsigned long index
, struct locate_info
*info
)
1314 unsigned int shift
= slot
->shift
;
1316 for (i
= (index
>> shift
) & RADIX_TREE_MAP_MASK
;
1317 i
< RADIX_TREE_MAP_SIZE
;
1318 i
++, index
+= (1UL << shift
)) {
1319 struct radix_tree_node
*node
=
1320 rcu_dereference_raw(slot
->slots
[i
]);
1321 if (node
== RADIX_TREE_RETRY
)
1323 if (!radix_tree_is_internal_node(node
)) {
1325 info
->found_index
= index
;
1331 node
= entry_to_node(node
);
1332 if (is_sibling_entry(slot
, node
))
1337 } while (i
< RADIX_TREE_MAP_SIZE
);
1340 if ((index
== 0) && (i
== RADIX_TREE_MAP_SIZE
))
1346 * radix_tree_locate_item - search through radix tree for item
1347 * @root: radix tree root
1348 * @item: item to be found
1350 * Returns index where item was found, or -1 if not found.
1351 * Caller must hold no lock (since this time-consuming function needs
1352 * to be preemptible), and must check afterwards if item is still there.
1354 unsigned long radix_tree_locate_item(struct radix_tree_root
*root
, void *item
)
1356 struct radix_tree_node
*node
;
1357 unsigned long max_index
;
1358 unsigned long cur_index
= 0;
1359 struct locate_info info
= {
1366 node
= rcu_dereference_raw(root
->rnode
);
1367 if (!radix_tree_is_internal_node(node
)) {
1370 info
.found_index
= 0;
1374 node
= entry_to_node(node
);
1376 max_index
= node_maxindex(node
);
1377 if (cur_index
> max_index
) {
1382 cur_index
= __locate(node
, item
, cur_index
, &info
);
1385 } while (!info
.stop
&& cur_index
<= max_index
);
1387 return info
.found_index
;
1390 unsigned long radix_tree_locate_item(struct radix_tree_root
*root
, void *item
)
1394 #endif /* CONFIG_SHMEM && CONFIG_SWAP */
1397 * radix_tree_shrink - shrink radix tree to minimum height
1398 * @root radix tree root
1400 static inline bool radix_tree_shrink(struct radix_tree_root
*root
)
1402 bool shrunk
= false;
1405 struct radix_tree_node
*node
= root
->rnode
;
1406 struct radix_tree_node
*child
;
1408 if (!radix_tree_is_internal_node(node
))
1410 node
= entry_to_node(node
);
1413 * The candidate node has more than one child, or its child
1414 * is not at the leftmost slot, or the child is a multiorder
1415 * entry, we cannot shrink.
1417 if (node
->count
!= 1)
1419 child
= node
->slots
[0];
1422 if (!radix_tree_is_internal_node(child
) && node
->shift
)
1425 if (radix_tree_is_internal_node(child
))
1426 entry_to_node(child
)->parent
= NULL
;
1429 * We don't need rcu_assign_pointer(), since we are simply
1430 * moving the node from one part of the tree to another: if it
1431 * was safe to dereference the old pointer to it
1432 * (node->slots[0]), it will be safe to dereference the new
1433 * one (root->rnode) as far as dependent read barriers go.
1435 root
->rnode
= child
;
1438 * We have a dilemma here. The node's slot[0] must not be
1439 * NULLed in case there are concurrent lookups expecting to
1440 * find the item. However if this was a bottom-level node,
1441 * then it may be subject to the slot pointer being visible
1442 * to callers dereferencing it. If item corresponding to
1443 * slot[0] is subsequently deleted, these callers would expect
1444 * their slot to become empty sooner or later.
1446 * For example, lockless pagecache will look up a slot, deref
1447 * the page pointer, and if the page has 0 refcount it means it
1448 * was concurrently deleted from pagecache so try the deref
1449 * again. Fortunately there is already a requirement for logic
1450 * to retry the entire slot lookup -- the indirect pointer
1451 * problem (replacing direct root node with an indirect pointer
1452 * also results in a stale slot). So tag the slot as indirect
1453 * to force callers to retry.
1455 if (!radix_tree_is_internal_node(child
))
1456 node
->slots
[0] = RADIX_TREE_RETRY
;
1458 radix_tree_node_free(node
);
1466 * __radix_tree_delete_node - try to free node after clearing a slot
1467 * @root: radix tree root
1468 * @node: node containing @index
1470 * After clearing the slot at @index in @node from radix tree
1471 * rooted at @root, call this function to attempt freeing the
1472 * node and shrinking the tree.
1474 * Returns %true if @node was freed, %false otherwise.
1476 bool __radix_tree_delete_node(struct radix_tree_root
*root
,
1477 struct radix_tree_node
*node
)
1479 bool deleted
= false;
1482 struct radix_tree_node
*parent
;
1485 if (node
== entry_to_node(root
->rnode
))
1486 deleted
|= radix_tree_shrink(root
);
1490 parent
= node
->parent
;
1492 parent
->slots
[node
->offset
] = NULL
;
1495 root_tag_clear_all(root
);
1499 radix_tree_node_free(node
);
1508 static inline void delete_sibling_entries(struct radix_tree_node
*node
,
1509 void *ptr
, unsigned offset
)
1511 #ifdef CONFIG_RADIX_TREE_MULTIORDER
1513 for (i
= 1; offset
+ i
< RADIX_TREE_MAP_SIZE
; i
++) {
1514 if (node
->slots
[offset
+ i
] != ptr
)
1516 node
->slots
[offset
+ i
] = NULL
;
1523 * radix_tree_delete_item - delete an item from a radix tree
1524 * @root: radix tree root
1526 * @item: expected item
1528 * Remove @item at @index from the radix tree rooted at @root.
1530 * Returns the address of the deleted item, or NULL if it was not present
1531 * or the entry at the given @index was not @item.
1533 void *radix_tree_delete_item(struct radix_tree_root
*root
,
1534 unsigned long index
, void *item
)
1536 struct radix_tree_node
*node
;
1537 unsigned int offset
;
1542 entry
= __radix_tree_lookup(root
, index
, &node
, &slot
);
1546 if (item
&& entry
!= item
)
1550 root_tag_clear_all(root
);
1555 offset
= get_slot_offset(node
, slot
);
1557 /* Clear all tags associated with the item to be deleted. */
1558 for (tag
= 0; tag
< RADIX_TREE_MAX_TAGS
; tag
++)
1559 node_tag_clear(root
, node
, tag
, offset
);
1561 delete_sibling_entries(node
, node_to_entry(slot
), offset
);
1562 node
->slots
[offset
] = NULL
;
1565 __radix_tree_delete_node(root
, node
);
1569 EXPORT_SYMBOL(radix_tree_delete_item
);
1572 * radix_tree_delete - delete an item from a radix tree
1573 * @root: radix tree root
1576 * Remove the item at @index from the radix tree rooted at @root.
1578 * Returns the address of the deleted item, or NULL if it was not present.
1580 void *radix_tree_delete(struct radix_tree_root
*root
, unsigned long index
)
1582 return radix_tree_delete_item(root
, index
, NULL
);
1584 EXPORT_SYMBOL(radix_tree_delete
);
1586 struct radix_tree_node
*radix_tree_replace_clear_tags(
1587 struct radix_tree_root
*root
,
1588 unsigned long index
, void *entry
)
1590 struct radix_tree_node
*node
;
1593 __radix_tree_lookup(root
, index
, &node
, &slot
);
1596 unsigned int tag
, offset
= get_slot_offset(node
, slot
);
1597 for (tag
= 0; tag
< RADIX_TREE_MAX_TAGS
; tag
++)
1598 node_tag_clear(root
, node
, tag
, offset
);
1600 /* Clear root node tags */
1601 root
->gfp_mask
&= __GFP_BITS_MASK
;
1604 radix_tree_replace_slot(slot
, entry
);
1609 * radix_tree_tagged - test whether any items in the tree are tagged
1610 * @root: radix tree root
1613 int radix_tree_tagged(struct radix_tree_root
*root
, unsigned int tag
)
1615 return root_tag_get(root
, tag
);
1617 EXPORT_SYMBOL(radix_tree_tagged
);
1620 radix_tree_node_ctor(void *arg
)
1622 struct radix_tree_node
*node
= arg
;
1624 memset(node
, 0, sizeof(*node
));
1625 INIT_LIST_HEAD(&node
->private_list
);
1628 static __init
unsigned long __maxindex(unsigned int height
)
1630 unsigned int width
= height
* RADIX_TREE_MAP_SHIFT
;
1631 int shift
= RADIX_TREE_INDEX_BITS
- width
;
1635 if (shift
>= BITS_PER_LONG
)
1637 return ~0UL >> shift
;
1640 static __init
void radix_tree_init_maxnodes(void)
1642 unsigned long height_to_maxindex
[RADIX_TREE_MAX_PATH
+ 1];
1645 for (i
= 0; i
< ARRAY_SIZE(height_to_maxindex
); i
++)
1646 height_to_maxindex
[i
] = __maxindex(i
);
1647 for (i
= 0; i
< ARRAY_SIZE(height_to_maxnodes
); i
++) {
1648 for (j
= i
; j
> 0; j
--)
1649 height_to_maxnodes
[i
] += height_to_maxindex
[j
- 1] + 1;
1653 static int radix_tree_callback(struct notifier_block
*nfb
,
1654 unsigned long action
, void *hcpu
)
1656 int cpu
= (long)hcpu
;
1657 struct radix_tree_preload
*rtp
;
1658 struct radix_tree_node
*node
;
1660 /* Free per-cpu pool of preloaded nodes */
1661 if (action
== CPU_DEAD
|| action
== CPU_DEAD_FROZEN
) {
1662 rtp
= &per_cpu(radix_tree_preloads
, cpu
);
1665 rtp
->nodes
= node
->private_data
;
1666 kmem_cache_free(radix_tree_node_cachep
, node
);
1673 void __init
radix_tree_init(void)
1675 radix_tree_node_cachep
= kmem_cache_create("radix_tree_node",
1676 sizeof(struct radix_tree_node
), 0,
1677 SLAB_PANIC
| SLAB_RECLAIM_ACCOUNT
,
1678 radix_tree_node_ctor
);
1679 radix_tree_init_maxnodes();
1680 hotcpu_notifier(radix_tree_callback
, 0);