1 // SPDX-License-Identifier: GPL-2.0+
3 * Maple Tree implementation
4 * Copyright (c) 2018-2022 Oracle Corporation
5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6 * Matthew Wilcox <willy@infradead.org>
7 * Copyright (c) 2023 ByteDance
8 * Author: Peng Zhang <zhangpeng.00@bytedance.com>
12 * DOC: Interesting implementation details of the Maple Tree
14 * Each node type has a number of slots for entries and a number of slots for
15 * pivots. In the case of dense nodes, the pivots are implied by the position
16 * and are simply the slot index + the minimum of the node.
18 * In regular B-Tree terms, pivots are called keys. The term pivot is used to
19 * indicate that the tree is specifying ranges. Pivots may appear in the
20 * subtree with an entry attached to the value whereas keys are unique to a
21 * specific position of a B-tree. Pivot values are inclusive of the slot with
25 * The following illustrates the layout of a range64 nodes slots and pivots.
28 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
30 * │ │ │ │ │ │ │ │ └─ Implied maximum
31 * │ │ │ │ │ │ │ └─ Pivot 14
32 * │ │ │ │ │ │ └─ Pivot 13
33 * │ │ │ │ │ └─ Pivot 12
41 * Internal (non-leaf) nodes contain pointers to other nodes.
42 * Leaf nodes contain entries.
44 * The location of interest is often referred to as an offset. All offsets have
45 * a slot, but the last offset has an implied pivot from the node above (or
46 * UINT_MAX for the root node.
48 * Ranges complicate certain write activities. When modifying any of
49 * the B-tree variants, it is known that one entry will either be added or
50 * deleted. When modifying the Maple Tree, one store operation may overwrite
51 * the entire data set, or one half of the tree, or the middle half of the tree.
56 #include <linux/maple_tree.h>
57 #include <linux/xarray.h>
58 #include <linux/types.h>
59 #include <linux/export.h>
60 #include <linux/slab.h>
61 #include <linux/limits.h>
62 #include <asm/barrier.h>
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/maple_tree.h>
68 * Kernel pointer hashing renders much of the maple tree dump useless as tagged
69 * pointers get hashed to arbitrary values.
71 * If CONFIG_DEBUG_VM_MAPLE_TREE is set we are in a debug mode where it is
72 * permissible to bypass this. Otherwise remain cautious and retain the hashing.
74 * Userland doesn't know about %px so also use %p there.
76 #if defined(__KERNEL__) && defined(CONFIG_DEBUG_VM_MAPLE_TREE)
82 #define MA_ROOT_PARENT 1
86 * * MA_STATE_BULK - Bulk insert mode
87 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
88 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
90 #define MA_STATE_BULK 1
91 #define MA_STATE_REBALANCE 2
92 #define MA_STATE_PREALLOC 4
94 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
95 #define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT)
96 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
97 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
98 static struct kmem_cache
*maple_node_cache
;
100 #ifdef CONFIG_DEBUG_MAPLE_TREE
101 static const unsigned long mt_max
[] = {
102 [maple_dense
] = MAPLE_NODE_SLOTS
,
103 [maple_leaf_64
] = ULONG_MAX
,
104 [maple_range_64
] = ULONG_MAX
,
105 [maple_arange_64
] = ULONG_MAX
,
107 #define mt_node_max(x) mt_max[mte_node_type(x)]
110 static const unsigned char mt_slots
[] = {
111 [maple_dense
] = MAPLE_NODE_SLOTS
,
112 [maple_leaf_64
] = MAPLE_RANGE64_SLOTS
,
113 [maple_range_64
] = MAPLE_RANGE64_SLOTS
,
114 [maple_arange_64
] = MAPLE_ARANGE64_SLOTS
,
116 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
118 static const unsigned char mt_pivots
[] = {
120 [maple_leaf_64
] = MAPLE_RANGE64_SLOTS
- 1,
121 [maple_range_64
] = MAPLE_RANGE64_SLOTS
- 1,
122 [maple_arange_64
] = MAPLE_ARANGE64_SLOTS
- 1,
124 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
126 static const unsigned char mt_min_slots
[] = {
127 [maple_dense
] = MAPLE_NODE_SLOTS
/ 2,
128 [maple_leaf_64
] = (MAPLE_RANGE64_SLOTS
/ 2) - 2,
129 [maple_range_64
] = (MAPLE_RANGE64_SLOTS
/ 2) - 2,
130 [maple_arange_64
] = (MAPLE_ARANGE64_SLOTS
/ 2) - 1,
132 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
134 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
135 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
137 struct maple_big_node
{
138 unsigned long pivot
[MAPLE_BIG_NODE_SLOTS
- 1];
140 struct maple_enode
*slot
[MAPLE_BIG_NODE_SLOTS
];
142 unsigned long padding
[MAPLE_BIG_NODE_GAPS
];
143 unsigned long gap
[MAPLE_BIG_NODE_GAPS
];
147 enum maple_type type
;
151 * The maple_subtree_state is used to build a tree to replace a segment of an
152 * existing tree in a more atomic way. Any walkers of the older tree will hit a
153 * dead node and restart on updates.
155 struct maple_subtree_state
{
156 struct ma_state
*orig_l
; /* Original left side of subtree */
157 struct ma_state
*orig_r
; /* Original right side of subtree */
158 struct ma_state
*l
; /* New left side of subtree */
159 struct ma_state
*m
; /* New middle of subtree (rare) */
160 struct ma_state
*r
; /* New right side of subtree */
161 struct ma_topiary
*free
; /* nodes to be freed */
162 struct ma_topiary
*destroy
; /* Nodes to be destroyed (walked and freed) */
163 struct maple_big_node
*bn
;
166 #ifdef CONFIG_KASAN_STACK
167 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */
168 #define noinline_for_kasan noinline_for_stack
170 #define noinline_for_kasan inline
174 static inline struct maple_node
*mt_alloc_one(gfp_t gfp
)
176 return kmem_cache_alloc(maple_node_cache
, gfp
);
179 static inline int mt_alloc_bulk(gfp_t gfp
, size_t size
, void **nodes
)
181 return kmem_cache_alloc_bulk(maple_node_cache
, gfp
, size
, nodes
);
184 static inline void mt_free_one(struct maple_node
*node
)
186 kmem_cache_free(maple_node_cache
, node
);
189 static inline void mt_free_bulk(size_t size
, void __rcu
**nodes
)
191 kmem_cache_free_bulk(maple_node_cache
, size
, (void **)nodes
);
194 static void mt_free_rcu(struct rcu_head
*head
)
196 struct maple_node
*node
= container_of(head
, struct maple_node
, rcu
);
198 kmem_cache_free(maple_node_cache
, node
);
202 * ma_free_rcu() - Use rcu callback to free a maple node
203 * @node: The node to free
205 * The maple tree uses the parent pointer to indicate this node is no longer in
206 * use and will be freed.
208 static void ma_free_rcu(struct maple_node
*node
)
210 WARN_ON(node
->parent
!= ma_parent_ptr(node
));
211 call_rcu(&node
->rcu
, mt_free_rcu
);
214 static void mas_set_height(struct ma_state
*mas
)
216 unsigned int new_flags
= mas
->tree
->ma_flags
;
218 new_flags
&= ~MT_FLAGS_HEIGHT_MASK
;
219 MAS_BUG_ON(mas
, mas
->depth
> MAPLE_HEIGHT_MAX
);
220 new_flags
|= mas
->depth
<< MT_FLAGS_HEIGHT_OFFSET
;
221 mas
->tree
->ma_flags
= new_flags
;
224 static unsigned int mas_mt_height(struct ma_state
*mas
)
226 return mt_height(mas
->tree
);
229 static inline unsigned int mt_attr(struct maple_tree
*mt
)
231 return mt
->ma_flags
& ~MT_FLAGS_HEIGHT_MASK
;
234 static __always_inline
enum maple_type
mte_node_type(
235 const struct maple_enode
*entry
)
237 return ((unsigned long)entry
>> MAPLE_NODE_TYPE_SHIFT
) &
238 MAPLE_NODE_TYPE_MASK
;
241 static __always_inline
bool ma_is_dense(const enum maple_type type
)
243 return type
< maple_leaf_64
;
246 static __always_inline
bool ma_is_leaf(const enum maple_type type
)
248 return type
< maple_range_64
;
251 static __always_inline
bool mte_is_leaf(const struct maple_enode
*entry
)
253 return ma_is_leaf(mte_node_type(entry
));
257 * We also reserve values with the bottom two bits set to '10' which are
260 static __always_inline
bool mt_is_reserved(const void *entry
)
262 return ((unsigned long)entry
< MAPLE_RESERVED_RANGE
) &&
263 xa_is_internal(entry
);
266 static __always_inline
void mas_set_err(struct ma_state
*mas
, long err
)
268 mas
->node
= MA_ERROR(err
);
269 mas
->status
= ma_error
;
272 static __always_inline
bool mas_is_ptr(const struct ma_state
*mas
)
274 return mas
->status
== ma_root
;
277 static __always_inline
bool mas_is_start(const struct ma_state
*mas
)
279 return mas
->status
== ma_start
;
282 static __always_inline
bool mas_is_none(const struct ma_state
*mas
)
284 return mas
->status
== ma_none
;
287 static __always_inline
bool mas_is_paused(const struct ma_state
*mas
)
289 return mas
->status
== ma_pause
;
292 static __always_inline
bool mas_is_overflow(struct ma_state
*mas
)
294 return mas
->status
== ma_overflow
;
297 static inline bool mas_is_underflow(struct ma_state
*mas
)
299 return mas
->status
== ma_underflow
;
302 static __always_inline
struct maple_node
*mte_to_node(
303 const struct maple_enode
*entry
)
305 return (struct maple_node
*)((unsigned long)entry
& ~MAPLE_NODE_MASK
);
309 * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
310 * @entry: The maple encoded node
312 * Return: a maple topiary pointer
314 static inline struct maple_topiary
*mte_to_mat(const struct maple_enode
*entry
)
316 return (struct maple_topiary
*)
317 ((unsigned long)entry
& ~MAPLE_NODE_MASK
);
321 * mas_mn() - Get the maple state node.
322 * @mas: The maple state
324 * Return: the maple node (not encoded - bare pointer).
326 static inline struct maple_node
*mas_mn(const struct ma_state
*mas
)
328 return mte_to_node(mas
->node
);
332 * mte_set_node_dead() - Set a maple encoded node as dead.
333 * @mn: The maple encoded node.
335 static inline void mte_set_node_dead(struct maple_enode
*mn
)
337 mte_to_node(mn
)->parent
= ma_parent_ptr(mte_to_node(mn
));
338 smp_wmb(); /* Needed for RCU */
341 /* Bit 1 indicates the root is a node */
342 #define MAPLE_ROOT_NODE 0x02
343 /* maple_type stored bit 3-6 */
344 #define MAPLE_ENODE_TYPE_SHIFT 0x03
345 /* Bit 2 means a NULL somewhere below */
346 #define MAPLE_ENODE_NULL 0x04
348 static inline struct maple_enode
*mt_mk_node(const struct maple_node
*node
,
349 enum maple_type type
)
351 return (void *)((unsigned long)node
|
352 (type
<< MAPLE_ENODE_TYPE_SHIFT
) | MAPLE_ENODE_NULL
);
355 static inline void *mte_mk_root(const struct maple_enode
*node
)
357 return (void *)((unsigned long)node
| MAPLE_ROOT_NODE
);
360 static inline void *mte_safe_root(const struct maple_enode
*node
)
362 return (void *)((unsigned long)node
& ~MAPLE_ROOT_NODE
);
365 static inline void __maybe_unused
*mte_set_full(const struct maple_enode
*node
)
367 return (void *)((unsigned long)node
& ~MAPLE_ENODE_NULL
);
370 static inline void __maybe_unused
*mte_clear_full(const struct maple_enode
*node
)
372 return (void *)((unsigned long)node
| MAPLE_ENODE_NULL
);
375 static inline bool __maybe_unused
mte_has_null(const struct maple_enode
*node
)
377 return (unsigned long)node
& MAPLE_ENODE_NULL
;
380 static __always_inline
bool ma_is_root(struct maple_node
*node
)
382 return ((unsigned long)node
->parent
& MA_ROOT_PARENT
);
385 static __always_inline
bool mte_is_root(const struct maple_enode
*node
)
387 return ma_is_root(mte_to_node(node
));
390 static inline bool mas_is_root_limits(const struct ma_state
*mas
)
392 return !mas
->min
&& mas
->max
== ULONG_MAX
;
395 static __always_inline
bool mt_is_alloc(struct maple_tree
*mt
)
397 return (mt
->ma_flags
& MT_FLAGS_ALLOC_RANGE
);
402 * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
403 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
404 * bit values need an extra bit to store the offset. This extra bit comes from
405 * a reuse of the last bit in the node type. This is possible by using bit 1 to
406 * indicate if bit 2 is part of the type or the slot.
410 * 0x?00 = 16 bit nodes
411 * 0x010 = 32 bit nodes
412 * 0x110 = 64 bit nodes
414 * Slot size and alignment
416 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
417 * 0b010 : 32 bit values, type in 0-2, slot in 3-7
418 * 0b110 : 64 bit values, type in 0-2, slot in 3-7
421 #define MAPLE_PARENT_ROOT 0x01
423 #define MAPLE_PARENT_SLOT_SHIFT 0x03
424 #define MAPLE_PARENT_SLOT_MASK 0xF8
426 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
427 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC
429 #define MAPLE_PARENT_RANGE64 0x06
430 #define MAPLE_PARENT_RANGE32 0x04
431 #define MAPLE_PARENT_NOT_RANGE16 0x02
434 * mte_parent_shift() - Get the parent shift for the slot storage.
435 * @parent: The parent pointer cast as an unsigned long
436 * Return: The shift into that pointer to the star to of the slot
438 static inline unsigned long mte_parent_shift(unsigned long parent
)
440 /* Note bit 1 == 0 means 16B */
441 if (likely(parent
& MAPLE_PARENT_NOT_RANGE16
))
442 return MAPLE_PARENT_SLOT_SHIFT
;
444 return MAPLE_PARENT_16B_SLOT_SHIFT
;
448 * mte_parent_slot_mask() - Get the slot mask for the parent.
449 * @parent: The parent pointer cast as an unsigned long.
450 * Return: The slot mask for that parent.
452 static inline unsigned long mte_parent_slot_mask(unsigned long parent
)
454 /* Note bit 1 == 0 means 16B */
455 if (likely(parent
& MAPLE_PARENT_NOT_RANGE16
))
456 return MAPLE_PARENT_SLOT_MASK
;
458 return MAPLE_PARENT_16B_SLOT_MASK
;
462 * mas_parent_type() - Return the maple_type of the parent from the stored
464 * @mas: The maple state
465 * @enode: The maple_enode to extract the parent's enum
466 * Return: The node->parent maple_type
469 enum maple_type
mas_parent_type(struct ma_state
*mas
, struct maple_enode
*enode
)
471 unsigned long p_type
;
473 p_type
= (unsigned long)mte_to_node(enode
)->parent
;
474 if (WARN_ON(p_type
& MAPLE_PARENT_ROOT
))
477 p_type
&= MAPLE_NODE_MASK
;
478 p_type
&= ~mte_parent_slot_mask(p_type
);
480 case MAPLE_PARENT_RANGE64
: /* or MAPLE_PARENT_ARANGE64 */
481 if (mt_is_alloc(mas
->tree
))
482 return maple_arange_64
;
483 return maple_range_64
;
490 * mas_set_parent() - Set the parent node and encode the slot
491 * @mas: The maple state
492 * @enode: The encoded maple node.
493 * @parent: The encoded maple node that is the parent of @enode.
494 * @slot: The slot that @enode resides in @parent.
496 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
500 void mas_set_parent(struct ma_state
*mas
, struct maple_enode
*enode
,
501 const struct maple_enode
*parent
, unsigned char slot
)
503 unsigned long val
= (unsigned long)parent
;
506 enum maple_type p_type
= mte_node_type(parent
);
508 MAS_BUG_ON(mas
, p_type
== maple_dense
);
509 MAS_BUG_ON(mas
, p_type
== maple_leaf_64
);
513 case maple_arange_64
:
514 shift
= MAPLE_PARENT_SLOT_SHIFT
;
515 type
= MAPLE_PARENT_RANGE64
;
524 val
&= ~MAPLE_NODE_MASK
; /* Clear all node metadata in parent */
525 val
|= (slot
<< shift
) | type
;
526 mte_to_node(enode
)->parent
= ma_parent_ptr(val
);
530 * mte_parent_slot() - get the parent slot of @enode.
531 * @enode: The encoded maple node.
533 * Return: The slot in the parent node where @enode resides.
535 static __always_inline
536 unsigned int mte_parent_slot(const struct maple_enode
*enode
)
538 unsigned long val
= (unsigned long)mte_to_node(enode
)->parent
;
540 if (unlikely(val
& MA_ROOT_PARENT
))
544 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
545 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
547 return (val
& MAPLE_PARENT_16B_SLOT_MASK
) >> mte_parent_shift(val
);
551 * mte_parent() - Get the parent of @node.
552 * @enode: The encoded maple node.
554 * Return: The parent maple node.
556 static __always_inline
557 struct maple_node
*mte_parent(const struct maple_enode
*enode
)
559 return (void *)((unsigned long)
560 (mte_to_node(enode
)->parent
) & ~MAPLE_NODE_MASK
);
564 * ma_dead_node() - check if the @enode is dead.
565 * @enode: The encoded maple node
567 * Return: true if dead, false otherwise.
569 static __always_inline
bool ma_dead_node(const struct maple_node
*node
)
571 struct maple_node
*parent
;
573 /* Do not reorder reads from the node prior to the parent check */
575 parent
= (void *)((unsigned long) node
->parent
& ~MAPLE_NODE_MASK
);
576 return (parent
== node
);
580 * mte_dead_node() - check if the @enode is dead.
581 * @enode: The encoded maple node
583 * Return: true if dead, false otherwise.
585 static __always_inline
bool mte_dead_node(const struct maple_enode
*enode
)
587 struct maple_node
*parent
, *node
;
589 node
= mte_to_node(enode
);
590 /* Do not reorder reads from the node prior to the parent check */
592 parent
= mte_parent(enode
);
593 return (parent
== node
);
597 * mas_allocated() - Get the number of nodes allocated in a maple state.
598 * @mas: The maple state
600 * The ma_state alloc member is overloaded to hold a pointer to the first
601 * allocated node or to the number of requested nodes to allocate. If bit 0 is
602 * set, then the alloc contains the number of requested nodes. If there is an
603 * allocated node, then the total allocated nodes is in that node.
605 * Return: The total number of nodes allocated
607 static inline unsigned long mas_allocated(const struct ma_state
*mas
)
609 if (!mas
->alloc
|| ((unsigned long)mas
->alloc
& 0x1))
612 return mas
->alloc
->total
;
616 * mas_set_alloc_req() - Set the requested number of allocations.
617 * @mas: the maple state
618 * @count: the number of allocations.
620 * The requested number of allocations is either in the first allocated node,
621 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
622 * no allocated node. Set the request either in the node or do the necessary
623 * encoding to store in @mas->alloc directly.
625 static inline void mas_set_alloc_req(struct ma_state
*mas
, unsigned long count
)
627 if (!mas
->alloc
|| ((unsigned long)mas
->alloc
& 0x1)) {
631 mas
->alloc
= (struct maple_alloc
*)(((count
) << 1U) | 1U);
635 mas
->alloc
->request_count
= count
;
639 * mas_alloc_req() - get the requested number of allocations.
640 * @mas: The maple state
642 * The alloc count is either stored directly in @mas, or in
643 * @mas->alloc->request_count if there is at least one node allocated. Decode
644 * the request count if it's stored directly in @mas->alloc.
646 * Return: The allocation request count.
648 static inline unsigned int mas_alloc_req(const struct ma_state
*mas
)
650 if ((unsigned long)mas
->alloc
& 0x1)
651 return (unsigned long)(mas
->alloc
) >> 1;
653 return mas
->alloc
->request_count
;
658 * ma_pivots() - Get a pointer to the maple node pivots.
659 * @node: the maple node
660 * @type: the node type
662 * In the event of a dead node, this array may be %NULL
664 * Return: A pointer to the maple node pivots
666 static inline unsigned long *ma_pivots(struct maple_node
*node
,
667 enum maple_type type
)
670 case maple_arange_64
:
671 return node
->ma64
.pivot
;
674 return node
->mr64
.pivot
;
682 * ma_gaps() - Get a pointer to the maple node gaps.
683 * @node: the maple node
684 * @type: the node type
686 * Return: A pointer to the maple node gaps
688 static inline unsigned long *ma_gaps(struct maple_node
*node
,
689 enum maple_type type
)
692 case maple_arange_64
:
693 return node
->ma64
.gap
;
703 * mas_safe_pivot() - get the pivot at @piv or mas->max.
704 * @mas: The maple state
705 * @pivots: The pointer to the maple node pivots
706 * @piv: The pivot to fetch
707 * @type: The maple node type
709 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
712 static __always_inline
unsigned long
713 mas_safe_pivot(const struct ma_state
*mas
, unsigned long *pivots
,
714 unsigned char piv
, enum maple_type type
)
716 if (piv
>= mt_pivots
[type
])
723 * mas_safe_min() - Return the minimum for a given offset.
724 * @mas: The maple state
725 * @pivots: The pointer to the maple node pivots
726 * @offset: The offset into the pivot array
728 * Return: The minimum range value that is contained in @offset.
730 static inline unsigned long
731 mas_safe_min(struct ma_state
*mas
, unsigned long *pivots
, unsigned char offset
)
734 return pivots
[offset
- 1] + 1;
740 * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
741 * @mn: The encoded maple node
742 * @piv: The pivot offset
743 * @val: The value of the pivot
745 static inline void mte_set_pivot(struct maple_enode
*mn
, unsigned char piv
,
748 struct maple_node
*node
= mte_to_node(mn
);
749 enum maple_type type
= mte_node_type(mn
);
751 BUG_ON(piv
>= mt_pivots
[type
]);
755 node
->mr64
.pivot
[piv
] = val
;
757 case maple_arange_64
:
758 node
->ma64
.pivot
[piv
] = val
;
767 * ma_slots() - Get a pointer to the maple node slots.
768 * @mn: The maple node
769 * @mt: The maple node type
771 * Return: A pointer to the maple node slots
773 static inline void __rcu
**ma_slots(struct maple_node
*mn
, enum maple_type mt
)
776 case maple_arange_64
:
777 return mn
->ma64
.slot
;
780 return mn
->mr64
.slot
;
788 static inline bool mt_write_locked(const struct maple_tree
*mt
)
790 return mt_external_lock(mt
) ? mt_write_lock_is_held(mt
) :
791 lockdep_is_held(&mt
->ma_lock
);
794 static __always_inline
bool mt_locked(const struct maple_tree
*mt
)
796 return mt_external_lock(mt
) ? mt_lock_is_held(mt
) :
797 lockdep_is_held(&mt
->ma_lock
);
800 static __always_inline
void *mt_slot(const struct maple_tree
*mt
,
801 void __rcu
**slots
, unsigned char offset
)
803 return rcu_dereference_check(slots
[offset
], mt_locked(mt
));
806 static __always_inline
void *mt_slot_locked(struct maple_tree
*mt
,
807 void __rcu
**slots
, unsigned char offset
)
809 return rcu_dereference_protected(slots
[offset
], mt_write_locked(mt
));
812 * mas_slot_locked() - Get the slot value when holding the maple tree lock.
813 * @mas: The maple state
814 * @slots: The pointer to the slots
815 * @offset: The offset into the slots array to fetch
817 * Return: The entry stored in @slots at the @offset.
819 static __always_inline
void *mas_slot_locked(struct ma_state
*mas
,
820 void __rcu
**slots
, unsigned char offset
)
822 return mt_slot_locked(mas
->tree
, slots
, offset
);
826 * mas_slot() - Get the slot value when not holding the maple tree lock.
827 * @mas: The maple state
828 * @slots: The pointer to the slots
829 * @offset: The offset into the slots array to fetch
831 * Return: The entry stored in @slots at the @offset
833 static __always_inline
void *mas_slot(struct ma_state
*mas
, void __rcu
**slots
,
834 unsigned char offset
)
836 return mt_slot(mas
->tree
, slots
, offset
);
840 * mas_root() - Get the maple tree root.
841 * @mas: The maple state.
843 * Return: The pointer to the root of the tree
845 static __always_inline
void *mas_root(struct ma_state
*mas
)
847 return rcu_dereference_check(mas
->tree
->ma_root
, mt_locked(mas
->tree
));
850 static inline void *mt_root_locked(struct maple_tree
*mt
)
852 return rcu_dereference_protected(mt
->ma_root
, mt_write_locked(mt
));
856 * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
857 * @mas: The maple state.
859 * Return: The pointer to the root of the tree
861 static inline void *mas_root_locked(struct ma_state
*mas
)
863 return mt_root_locked(mas
->tree
);
866 static inline struct maple_metadata
*ma_meta(struct maple_node
*mn
,
870 case maple_arange_64
:
871 return &mn
->ma64
.meta
;
873 return &mn
->mr64
.meta
;
878 * ma_set_meta() - Set the metadata information of a node.
879 * @mn: The maple node
880 * @mt: The maple node type
881 * @offset: The offset of the highest sub-gap in this node.
882 * @end: The end of the data in this node.
884 static inline void ma_set_meta(struct maple_node
*mn
, enum maple_type mt
,
885 unsigned char offset
, unsigned char end
)
887 struct maple_metadata
*meta
= ma_meta(mn
, mt
);
894 * mt_clear_meta() - clear the metadata information of a node, if it exists
895 * @mt: The maple tree
896 * @mn: The maple node
897 * @type: The maple node type
899 static inline void mt_clear_meta(struct maple_tree
*mt
, struct maple_node
*mn
,
900 enum maple_type type
)
902 struct maple_metadata
*meta
;
903 unsigned long *pivots
;
909 pivots
= mn
->mr64
.pivot
;
910 if (unlikely(pivots
[MAPLE_RANGE64_SLOTS
- 2])) {
911 slots
= mn
->mr64
.slot
;
912 next
= mt_slot_locked(mt
, slots
,
913 MAPLE_RANGE64_SLOTS
- 1);
914 if (unlikely((mte_to_node(next
) &&
915 mte_node_type(next
))))
916 return; /* no metadata, could be node */
919 case maple_arange_64
:
920 meta
= ma_meta(mn
, type
);
931 * ma_meta_end() - Get the data end of a node from the metadata
932 * @mn: The maple node
933 * @mt: The maple node type
935 static inline unsigned char ma_meta_end(struct maple_node
*mn
,
938 struct maple_metadata
*meta
= ma_meta(mn
, mt
);
944 * ma_meta_gap() - Get the largest gap location of a node from the metadata
945 * @mn: The maple node
947 static inline unsigned char ma_meta_gap(struct maple_node
*mn
)
949 return mn
->ma64
.meta
.gap
;
953 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
954 * @mn: The maple node
955 * @mt: The maple node type
956 * @offset: The location of the largest gap.
958 static inline void ma_set_meta_gap(struct maple_node
*mn
, enum maple_type mt
,
959 unsigned char offset
)
962 struct maple_metadata
*meta
= ma_meta(mn
, mt
);
968 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
969 * @mat: the ma_topiary, a linked list of dead nodes.
970 * @dead_enode: the node to be marked as dead and added to the tail of the list
972 * Add the @dead_enode to the linked list in @mat.
974 static inline void mat_add(struct ma_topiary
*mat
,
975 struct maple_enode
*dead_enode
)
977 mte_set_node_dead(dead_enode
);
978 mte_to_mat(dead_enode
)->next
= NULL
;
980 mat
->tail
= mat
->head
= dead_enode
;
984 mte_to_mat(mat
->tail
)->next
= dead_enode
;
985 mat
->tail
= dead_enode
;
988 static void mt_free_walk(struct rcu_head
*head
);
989 static void mt_destroy_walk(struct maple_enode
*enode
, struct maple_tree
*mt
,
992 * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
993 * @mas: the maple state
994 * @mat: the ma_topiary linked list of dead nodes to free.
996 * Destroy walk a dead list.
998 static void mas_mat_destroy(struct ma_state
*mas
, struct ma_topiary
*mat
)
1000 struct maple_enode
*next
;
1001 struct maple_node
*node
;
1002 bool in_rcu
= mt_in_rcu(mas
->tree
);
1005 next
= mte_to_mat(mat
->head
)->next
;
1006 node
= mte_to_node(mat
->head
);
1007 mt_destroy_walk(mat
->head
, mas
->tree
, !in_rcu
);
1009 call_rcu(&node
->rcu
, mt_free_walk
);
1014 * mas_descend() - Descend into the slot stored in the ma_state.
1015 * @mas: the maple state.
1017 * Note: Not RCU safe, only use in write side or debug code.
1019 static inline void mas_descend(struct ma_state
*mas
)
1021 enum maple_type type
;
1022 unsigned long *pivots
;
1023 struct maple_node
*node
;
1027 type
= mte_node_type(mas
->node
);
1028 pivots
= ma_pivots(node
, type
);
1029 slots
= ma_slots(node
, type
);
1032 mas
->min
= pivots
[mas
->offset
- 1] + 1;
1033 mas
->max
= mas_safe_pivot(mas
, pivots
, mas
->offset
, type
);
1034 mas
->node
= mas_slot(mas
, slots
, mas
->offset
);
1038 * mte_set_gap() - Set a maple node gap.
1039 * @mn: The encoded maple node
1040 * @gap: The offset of the gap to set
1041 * @val: The gap value
1043 static inline void mte_set_gap(const struct maple_enode
*mn
,
1044 unsigned char gap
, unsigned long val
)
1046 switch (mte_node_type(mn
)) {
1049 case maple_arange_64
:
1050 mte_to_node(mn
)->ma64
.gap
[gap
] = val
;
1056 * mas_ascend() - Walk up a level of the tree.
1057 * @mas: The maple state
1059 * Sets the @mas->max and @mas->min to the correct values when walking up. This
1060 * may cause several levels of walking up to find the correct min and max.
1061 * May find a dead node which will cause a premature return.
1062 * Return: 1 on dead node, 0 otherwise
1064 static int mas_ascend(struct ma_state
*mas
)
1066 struct maple_enode
*p_enode
; /* parent enode. */
1067 struct maple_enode
*a_enode
; /* ancestor enode. */
1068 struct maple_node
*a_node
; /* ancestor node. */
1069 struct maple_node
*p_node
; /* parent node. */
1070 unsigned char a_slot
;
1071 enum maple_type a_type
;
1072 unsigned long min
, max
;
1073 unsigned long *pivots
;
1074 bool set_max
= false, set_min
= false;
1076 a_node
= mas_mn(mas
);
1077 if (ma_is_root(a_node
)) {
1082 p_node
= mte_parent(mas
->node
);
1083 if (unlikely(a_node
== p_node
))
1086 a_type
= mas_parent_type(mas
, mas
->node
);
1087 mas
->offset
= mte_parent_slot(mas
->node
);
1088 a_enode
= mt_mk_node(p_node
, a_type
);
1090 /* Check to make sure all parent information is still accurate */
1091 if (p_node
!= mte_parent(mas
->node
))
1094 mas
->node
= a_enode
;
1096 if (mte_is_root(a_enode
)) {
1097 mas
->max
= ULONG_MAX
;
1109 if (mas
->max
== ULONG_MAX
)
1114 a_type
= mas_parent_type(mas
, p_enode
);
1115 a_node
= mte_parent(p_enode
);
1116 a_slot
= mte_parent_slot(p_enode
);
1117 a_enode
= mt_mk_node(a_node
, a_type
);
1118 pivots
= ma_pivots(a_node
, a_type
);
1120 if (unlikely(ma_dead_node(a_node
)))
1123 if (!set_min
&& a_slot
) {
1125 min
= pivots
[a_slot
- 1] + 1;
1128 if (!set_max
&& a_slot
< mt_pivots
[a_type
]) {
1130 max
= pivots
[a_slot
];
1133 if (unlikely(ma_dead_node(a_node
)))
1136 if (unlikely(ma_is_root(a_node
)))
1139 } while (!set_min
|| !set_max
);
1147 * mas_pop_node() - Get a previously allocated maple node from the maple state.
1148 * @mas: The maple state
1150 * Return: A pointer to a maple node.
1152 static inline struct maple_node
*mas_pop_node(struct ma_state
*mas
)
1154 struct maple_alloc
*ret
, *node
= mas
->alloc
;
1155 unsigned long total
= mas_allocated(mas
);
1156 unsigned int req
= mas_alloc_req(mas
);
1158 /* nothing or a request pending. */
1159 if (WARN_ON(!total
))
1163 /* single allocation in this ma_state */
1169 if (node
->node_count
== 1) {
1170 /* Single allocation in this node. */
1171 mas
->alloc
= node
->slot
[0];
1172 mas
->alloc
->total
= node
->total
- 1;
1177 ret
= node
->slot
[--node
->node_count
];
1178 node
->slot
[node
->node_count
] = NULL
;
1184 mas_set_alloc_req(mas
, req
);
1187 memset(ret
, 0, sizeof(*ret
));
1188 return (struct maple_node
*)ret
;
1192 * mas_push_node() - Push a node back on the maple state allocation.
1193 * @mas: The maple state
1194 * @used: The used maple node
1196 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
1197 * requested node count as necessary.
1199 static inline void mas_push_node(struct ma_state
*mas
, struct maple_node
*used
)
1201 struct maple_alloc
*reuse
= (struct maple_alloc
*)used
;
1202 struct maple_alloc
*head
= mas
->alloc
;
1203 unsigned long count
;
1204 unsigned int requested
= mas_alloc_req(mas
);
1206 count
= mas_allocated(mas
);
1208 reuse
->request_count
= 0;
1209 reuse
->node_count
= 0;
1211 if (head
->node_count
< MAPLE_ALLOC_SLOTS
) {
1212 head
->slot
[head
->node_count
++] = reuse
;
1216 reuse
->slot
[0] = head
;
1217 reuse
->node_count
= 1;
1220 reuse
->total
= count
+ 1;
1224 mas_set_alloc_req(mas
, requested
- 1);
1228 * mas_alloc_nodes() - Allocate nodes into a maple state
1229 * @mas: The maple state
1230 * @gfp: The GFP Flags
1232 static inline void mas_alloc_nodes(struct ma_state
*mas
, gfp_t gfp
)
1234 struct maple_alloc
*node
;
1235 unsigned long allocated
= mas_allocated(mas
);
1236 unsigned int requested
= mas_alloc_req(mas
);
1238 void **slots
= NULL
;
1239 unsigned int max_req
= 0;
1244 mas_set_alloc_req(mas
, 0);
1245 if (mas
->mas_flags
& MA_STATE_PREALLOC
) {
1249 WARN_ON(!allocated
);
1252 if (!allocated
|| mas
->alloc
->node_count
== MAPLE_ALLOC_SLOTS
) {
1253 node
= (struct maple_alloc
*)mt_alloc_one(gfp
);
1258 node
->slot
[0] = mas
->alloc
;
1259 node
->node_count
= 1;
1261 node
->node_count
= 0;
1265 node
->total
= ++allocated
;
1266 node
->request_count
= 0;
1272 max_req
= MAPLE_ALLOC_SLOTS
- node
->node_count
;
1273 slots
= (void **)&node
->slot
[node
->node_count
];
1274 max_req
= min(requested
, max_req
);
1275 count
= mt_alloc_bulk(gfp
, max_req
, slots
);
1279 if (node
->node_count
== 0) {
1280 node
->slot
[0]->node_count
= 0;
1281 node
->slot
[0]->request_count
= 0;
1284 node
->node_count
+= count
;
1286 /* find a non-full node*/
1288 node
= node
->slot
[0];
1289 } while (unlikely(node
->node_count
== MAPLE_ALLOC_SLOTS
));
1292 mas
->alloc
->total
= allocated
;
1296 /* Clean up potential freed allocations on bulk failure */
1297 memset(slots
, 0, max_req
* sizeof(unsigned long));
1298 mas
->alloc
->total
= allocated
;
1300 mas_set_alloc_req(mas
, requested
);
1301 mas_set_err(mas
, -ENOMEM
);
1305 * mas_free() - Free an encoded maple node
1306 * @mas: The maple state
1307 * @used: The encoded maple node to free.
1309 * Uses rcu free if necessary, pushes @used back on the maple state allocations
1312 static inline void mas_free(struct ma_state
*mas
, struct maple_enode
*used
)
1314 struct maple_node
*tmp
= mte_to_node(used
);
1316 if (mt_in_rcu(mas
->tree
))
1319 mas_push_node(mas
, tmp
);
1323 * mas_node_count_gfp() - Check if enough nodes are allocated and request more
1324 * if there is not enough nodes.
1325 * @mas: The maple state
1326 * @count: The number of nodes needed
1327 * @gfp: the gfp flags
1329 static void mas_node_count_gfp(struct ma_state
*mas
, int count
, gfp_t gfp
)
1331 unsigned long allocated
= mas_allocated(mas
);
1333 if (allocated
< count
) {
1334 mas_set_alloc_req(mas
, count
- allocated
);
1335 mas_alloc_nodes(mas
, gfp
);
1340 * mas_node_count() - Check if enough nodes are allocated and request more if
1341 * there is not enough nodes.
1342 * @mas: The maple state
1343 * @count: The number of nodes needed
1345 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1347 static void mas_node_count(struct ma_state
*mas
, int count
)
1349 return mas_node_count_gfp(mas
, count
, GFP_NOWAIT
| __GFP_NOWARN
);
1353 * mas_start() - Sets up maple state for operations.
1354 * @mas: The maple state.
1356 * If mas->status == mas_start, then set the min, max and depth to
1360 * - If mas->node is an error or not mas_start, return NULL.
1361 * - If it's an empty tree: NULL & mas->status == ma_none
1362 * - If it's a single entry: The entry & mas->status == ma_root
1363 * - If it's a tree: NULL & mas->status == ma_active
1365 static inline struct maple_enode
*mas_start(struct ma_state
*mas
)
1367 if (likely(mas_is_start(mas
))) {
1368 struct maple_enode
*root
;
1371 mas
->max
= ULONG_MAX
;
1375 root
= mas_root(mas
);
1376 /* Tree with nodes */
1377 if (likely(xa_is_node(root
))) {
1379 mas
->status
= ma_active
;
1380 mas
->node
= mte_safe_root(root
);
1382 if (mte_dead_node(mas
->node
))
1390 if (unlikely(!root
)) {
1391 mas
->status
= ma_none
;
1392 mas
->offset
= MAPLE_NODE_SLOTS
;
1396 /* Single entry tree */
1397 mas
->status
= ma_root
;
1398 mas
->offset
= MAPLE_NODE_SLOTS
;
1400 /* Single entry tree. */
1411 * ma_data_end() - Find the end of the data in a node.
1412 * @node: The maple node
1413 * @type: The maple node type
1414 * @pivots: The array of pivots in the node
1415 * @max: The maximum value in the node
1417 * Uses metadata to find the end of the data when possible.
1418 * Return: The zero indexed last slot with data (may be null).
1420 static __always_inline
unsigned char ma_data_end(struct maple_node
*node
,
1421 enum maple_type type
, unsigned long *pivots
, unsigned long max
)
1423 unsigned char offset
;
1428 if (type
== maple_arange_64
)
1429 return ma_meta_end(node
, type
);
1431 offset
= mt_pivots
[type
] - 1;
1432 if (likely(!pivots
[offset
]))
1433 return ma_meta_end(node
, type
);
1435 if (likely(pivots
[offset
] == max
))
1438 return mt_pivots
[type
];
1442 * mas_data_end() - Find the end of the data (slot).
1443 * @mas: the maple state
1445 * This method is optimized to check the metadata of a node if the node type
1446 * supports data end metadata.
1448 * Return: The zero indexed last slot with data (may be null).
1450 static inline unsigned char mas_data_end(struct ma_state
*mas
)
1452 enum maple_type type
;
1453 struct maple_node
*node
;
1454 unsigned char offset
;
1455 unsigned long *pivots
;
1457 type
= mte_node_type(mas
->node
);
1459 if (type
== maple_arange_64
)
1460 return ma_meta_end(node
, type
);
1462 pivots
= ma_pivots(node
, type
);
1463 if (unlikely(ma_dead_node(node
)))
1466 offset
= mt_pivots
[type
] - 1;
1467 if (likely(!pivots
[offset
]))
1468 return ma_meta_end(node
, type
);
1470 if (likely(pivots
[offset
] == mas
->max
))
1473 return mt_pivots
[type
];
1477 * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1478 * @mas: the maple state
1480 * Return: The maximum gap in the leaf.
1482 static unsigned long mas_leaf_max_gap(struct ma_state
*mas
)
1485 unsigned long pstart
, gap
, max_gap
;
1486 struct maple_node
*mn
;
1487 unsigned long *pivots
;
1490 unsigned char max_piv
;
1492 mt
= mte_node_type(mas
->node
);
1494 slots
= ma_slots(mn
, mt
);
1496 if (unlikely(ma_is_dense(mt
))) {
1498 for (i
= 0; i
< mt_slots
[mt
]; i
++) {
1513 * Check the first implied pivot optimizes the loop below and slot 1 may
1514 * be skipped if there is a gap in slot 0.
1516 pivots
= ma_pivots(mn
, mt
);
1517 if (likely(!slots
[0])) {
1518 max_gap
= pivots
[0] - mas
->min
+ 1;
1524 /* reduce max_piv as the special case is checked before the loop */
1525 max_piv
= ma_data_end(mn
, mt
, pivots
, mas
->max
) - 1;
1527 * Check end implied pivot which can only be a gap on the right most
1530 if (unlikely(mas
->max
== ULONG_MAX
) && !slots
[max_piv
+ 1]) {
1531 gap
= ULONG_MAX
- pivots
[max_piv
];
1535 if (max_gap
> pivots
[max_piv
] - mas
->min
)
1539 for (; i
<= max_piv
; i
++) {
1540 /* data == no gap. */
1541 if (likely(slots
[i
]))
1544 pstart
= pivots
[i
- 1];
1545 gap
= pivots
[i
] - pstart
;
1549 /* There cannot be two gaps in a row. */
1556 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1557 * @node: The maple node
1558 * @gaps: The pointer to the gaps
1559 * @mt: The maple node type
1560 * @off: Pointer to store the offset location of the gap.
1562 * Uses the metadata data end to scan backwards across set gaps.
1564 * Return: The maximum gap value
1566 static inline unsigned long
1567 ma_max_gap(struct maple_node
*node
, unsigned long *gaps
, enum maple_type mt
,
1570 unsigned char offset
, i
;
1571 unsigned long max_gap
= 0;
1573 i
= offset
= ma_meta_end(node
, mt
);
1575 if (gaps
[i
] > max_gap
) {
1586 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1587 * @mas: The maple state.
1589 * Return: The gap value.
1591 static inline unsigned long mas_max_gap(struct ma_state
*mas
)
1593 unsigned long *gaps
;
1594 unsigned char offset
;
1596 struct maple_node
*node
;
1598 mt
= mte_node_type(mas
->node
);
1600 return mas_leaf_max_gap(mas
);
1603 MAS_BUG_ON(mas
, mt
!= maple_arange_64
);
1604 offset
= ma_meta_gap(node
);
1605 gaps
= ma_gaps(node
, mt
);
1606 return gaps
[offset
];
1610 * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1611 * @mas: The maple state
1612 * @offset: The gap offset in the parent to set
1613 * @new: The new gap value.
1615 * Set the parent gap then continue to set the gap upwards, using the metadata
1616 * of the parent to see if it is necessary to check the node above.
1618 static inline void mas_parent_gap(struct ma_state
*mas
, unsigned char offset
,
1621 unsigned long meta_gap
= 0;
1622 struct maple_node
*pnode
;
1623 struct maple_enode
*penode
;
1624 unsigned long *pgaps
;
1625 unsigned char meta_offset
;
1626 enum maple_type pmt
;
1628 pnode
= mte_parent(mas
->node
);
1629 pmt
= mas_parent_type(mas
, mas
->node
);
1630 penode
= mt_mk_node(pnode
, pmt
);
1631 pgaps
= ma_gaps(pnode
, pmt
);
1634 MAS_BUG_ON(mas
, pmt
!= maple_arange_64
);
1635 meta_offset
= ma_meta_gap(pnode
);
1636 meta_gap
= pgaps
[meta_offset
];
1638 pgaps
[offset
] = new;
1640 if (meta_gap
== new)
1643 if (offset
!= meta_offset
) {
1647 ma_set_meta_gap(pnode
, pmt
, offset
);
1648 } else if (new < meta_gap
) {
1649 new = ma_max_gap(pnode
, pgaps
, pmt
, &meta_offset
);
1650 ma_set_meta_gap(pnode
, pmt
, meta_offset
);
1653 if (ma_is_root(pnode
))
1656 /* Go to the parent node. */
1657 pnode
= mte_parent(penode
);
1658 pmt
= mas_parent_type(mas
, penode
);
1659 pgaps
= ma_gaps(pnode
, pmt
);
1660 offset
= mte_parent_slot(penode
);
1661 penode
= mt_mk_node(pnode
, pmt
);
1666 * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1667 * @mas: the maple state.
1669 static inline void mas_update_gap(struct ma_state
*mas
)
1671 unsigned char pslot
;
1672 unsigned long p_gap
;
1673 unsigned long max_gap
;
1675 if (!mt_is_alloc(mas
->tree
))
1678 if (mte_is_root(mas
->node
))
1681 max_gap
= mas_max_gap(mas
);
1683 pslot
= mte_parent_slot(mas
->node
);
1684 p_gap
= ma_gaps(mte_parent(mas
->node
),
1685 mas_parent_type(mas
, mas
->node
))[pslot
];
1687 if (p_gap
!= max_gap
)
1688 mas_parent_gap(mas
, pslot
, max_gap
);
1692 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1693 * @parent with the slot encoded.
1694 * @mas: the maple state (for the tree)
1695 * @parent: the maple encoded node containing the children.
1697 static inline void mas_adopt_children(struct ma_state
*mas
,
1698 struct maple_enode
*parent
)
1700 enum maple_type type
= mte_node_type(parent
);
1701 struct maple_node
*node
= mte_to_node(parent
);
1702 void __rcu
**slots
= ma_slots(node
, type
);
1703 unsigned long *pivots
= ma_pivots(node
, type
);
1704 struct maple_enode
*child
;
1705 unsigned char offset
;
1707 offset
= ma_data_end(node
, type
, pivots
, mas
->max
);
1709 child
= mas_slot_locked(mas
, slots
, offset
);
1710 mas_set_parent(mas
, child
, parent
, offset
);
1715 * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
1717 * @mas: the maple state with the new node
1718 * @old_enode: The old maple encoded node to replace.
1720 static inline void mas_put_in_tree(struct ma_state
*mas
,
1721 struct maple_enode
*old_enode
)
1722 __must_hold(mas
->tree
->ma_lock
)
1724 unsigned char offset
;
1727 if (mte_is_root(mas
->node
)) {
1728 mas_mn(mas
)->parent
= ma_parent_ptr(mas_tree_parent(mas
));
1729 rcu_assign_pointer(mas
->tree
->ma_root
, mte_mk_root(mas
->node
));
1730 mas_set_height(mas
);
1733 offset
= mte_parent_slot(mas
->node
);
1734 slots
= ma_slots(mte_parent(mas
->node
),
1735 mas_parent_type(mas
, mas
->node
));
1736 rcu_assign_pointer(slots
[offset
], mas
->node
);
1739 mte_set_node_dead(old_enode
);
1743 * mas_replace_node() - Replace a node by putting it in the tree, marking it
1744 * dead, and freeing it.
1745 * the parent encoding to locate the maple node in the tree.
1746 * @mas: the ma_state with @mas->node pointing to the new node.
1747 * @old_enode: The old maple encoded node.
1749 static inline void mas_replace_node(struct ma_state
*mas
,
1750 struct maple_enode
*old_enode
)
1751 __must_hold(mas
->tree
->ma_lock
)
1753 mas_put_in_tree(mas
, old_enode
);
1754 mas_free(mas
, old_enode
);
1758 * mas_find_child() - Find a child who has the parent @mas->node.
1759 * @mas: the maple state with the parent.
1760 * @child: the maple state to store the child.
1762 static inline bool mas_find_child(struct ma_state
*mas
, struct ma_state
*child
)
1763 __must_hold(mas
->tree
->ma_lock
)
1766 unsigned char offset
;
1768 unsigned long *pivots
;
1769 struct maple_enode
*entry
;
1770 struct maple_node
*node
;
1773 mt
= mte_node_type(mas
->node
);
1775 slots
= ma_slots(node
, mt
);
1776 pivots
= ma_pivots(node
, mt
);
1777 end
= ma_data_end(node
, mt
, pivots
, mas
->max
);
1778 for (offset
= mas
->offset
; offset
<= end
; offset
++) {
1779 entry
= mas_slot_locked(mas
, slots
, offset
);
1780 if (mte_parent(entry
) == node
) {
1782 mas
->offset
= offset
+ 1;
1783 child
->offset
= offset
;
1793 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1794 * old data or set b_node->b_end.
1795 * @b_node: the maple_big_node
1796 * @shift: the shift count
1798 static inline void mab_shift_right(struct maple_big_node
*b_node
,
1799 unsigned char shift
)
1801 unsigned long size
= b_node
->b_end
* sizeof(unsigned long);
1803 memmove(b_node
->pivot
+ shift
, b_node
->pivot
, size
);
1804 memmove(b_node
->slot
+ shift
, b_node
->slot
, size
);
1805 if (b_node
->type
== maple_arange_64
)
1806 memmove(b_node
->gap
+ shift
, b_node
->gap
, size
);
1810 * mab_middle_node() - Check if a middle node is needed (unlikely)
1811 * @b_node: the maple_big_node that contains the data.
1812 * @split: the potential split location
1813 * @slot_count: the size that can be stored in a single node being considered.
1815 * Return: true if a middle node is required.
1817 static inline bool mab_middle_node(struct maple_big_node
*b_node
, int split
,
1818 unsigned char slot_count
)
1820 unsigned char size
= b_node
->b_end
;
1822 if (size
>= 2 * slot_count
)
1825 if (!b_node
->slot
[split
] && (size
>= 2 * slot_count
- 1))
1832 * mab_no_null_split() - ensure the split doesn't fall on a NULL
1833 * @b_node: the maple_big_node with the data
1834 * @split: the suggested split location
1835 * @slot_count: the number of slots in the node being considered.
1837 * Return: the split location.
1839 static inline int mab_no_null_split(struct maple_big_node
*b_node
,
1840 unsigned char split
, unsigned char slot_count
)
1842 if (!b_node
->slot
[split
]) {
1844 * If the split is less than the max slot && the right side will
1845 * still be sufficient, then increment the split on NULL.
1847 if ((split
< slot_count
- 1) &&
1848 (b_node
->b_end
- split
) > (mt_min_slots
[b_node
->type
]))
1857 * mab_calc_split() - Calculate the split location and if there needs to be two
1859 * @mas: The maple state
1860 * @bn: The maple_big_node with the data
1861 * @mid_split: The second split, if required. 0 otherwise.
1863 * Return: The first split location. The middle split is set in @mid_split.
1865 static inline int mab_calc_split(struct ma_state
*mas
,
1866 struct maple_big_node
*bn
, unsigned char *mid_split
, unsigned long min
)
1868 unsigned char b_end
= bn
->b_end
;
1869 int split
= b_end
/ 2; /* Assume equal split. */
1870 unsigned char slot_min
, slot_count
= mt_slots
[bn
->type
];
1873 * To support gap tracking, all NULL entries are kept together and a node cannot
1874 * end on a NULL entry, with the exception of the left-most leaf. The
1875 * limitation means that the split of a node must be checked for this condition
1876 * and be able to put more data in one direction or the other.
1878 if (unlikely((mas
->mas_flags
& MA_STATE_BULK
))) {
1880 split
= b_end
- mt_min_slots
[bn
->type
];
1882 if (!ma_is_leaf(bn
->type
))
1885 mas
->mas_flags
|= MA_STATE_REBALANCE
;
1886 if (!bn
->slot
[split
])
1892 * Although extremely rare, it is possible to enter what is known as the 3-way
1893 * split scenario. The 3-way split comes about by means of a store of a range
1894 * that overwrites the end and beginning of two full nodes. The result is a set
1895 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
1896 * also be located in different parent nodes which are also full. This can
1897 * carry upwards all the way to the root in the worst case.
1899 if (unlikely(mab_middle_node(bn
, split
, slot_count
))) {
1901 *mid_split
= split
* 2;
1903 slot_min
= mt_min_slots
[bn
->type
];
1907 * Avoid having a range less than the slot count unless it
1908 * causes one node to be deficient.
1909 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1911 while ((split
< slot_count
- 1) &&
1912 ((bn
->pivot
[split
] - min
) < slot_count
- 1) &&
1913 (b_end
- split
> slot_min
))
1917 /* Avoid ending a node on a NULL entry */
1918 split
= mab_no_null_split(bn
, split
, slot_count
);
1920 if (unlikely(*mid_split
))
1921 *mid_split
= mab_no_null_split(bn
, *mid_split
, slot_count
);
1927 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1928 * and set @b_node->b_end to the next free slot.
1929 * @mas: The maple state
1930 * @mas_start: The starting slot to copy
1931 * @mas_end: The end slot to copy (inclusively)
1932 * @b_node: The maple_big_node to place the data
1933 * @mab_start: The starting location in maple_big_node to store the data.
1935 static inline void mas_mab_cp(struct ma_state
*mas
, unsigned char mas_start
,
1936 unsigned char mas_end
, struct maple_big_node
*b_node
,
1937 unsigned char mab_start
)
1940 struct maple_node
*node
;
1942 unsigned long *pivots
, *gaps
;
1943 int i
= mas_start
, j
= mab_start
;
1944 unsigned char piv_end
;
1947 mt
= mte_node_type(mas
->node
);
1948 pivots
= ma_pivots(node
, mt
);
1950 b_node
->pivot
[j
] = pivots
[i
++];
1951 if (unlikely(i
> mas_end
))
1956 piv_end
= min(mas_end
, mt_pivots
[mt
]);
1957 for (; i
< piv_end
; i
++, j
++) {
1958 b_node
->pivot
[j
] = pivots
[i
];
1959 if (unlikely(!b_node
->pivot
[j
]))
1962 if (unlikely(mas
->max
== b_node
->pivot
[j
]))
1966 b_node
->pivot
[j
] = mas_safe_pivot(mas
, pivots
, i
, mt
);
1969 b_node
->b_end
= ++j
;
1971 slots
= ma_slots(node
, mt
);
1972 memcpy(b_node
->slot
+ mab_start
, slots
+ mas_start
, sizeof(void *) * j
);
1973 if (!ma_is_leaf(mt
) && mt_is_alloc(mas
->tree
)) {
1974 gaps
= ma_gaps(node
, mt
);
1975 memcpy(b_node
->gap
+ mab_start
, gaps
+ mas_start
,
1976 sizeof(unsigned long) * j
);
1981 * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
1982 * @node: The maple node
1983 * @mt: The maple type
1984 * @end: The node end
1986 static inline void mas_leaf_set_meta(struct maple_node
*node
,
1987 enum maple_type mt
, unsigned char end
)
1989 if (end
< mt_slots
[mt
] - 1)
1990 ma_set_meta(node
, mt
, 0, end
);
1994 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
1995 * @b_node: the maple_big_node that has the data
1996 * @mab_start: the start location in @b_node.
1997 * @mab_end: The end location in @b_node (inclusively)
1998 * @mas: The maple state with the maple encoded node.
2000 static inline void mab_mas_cp(struct maple_big_node
*b_node
,
2001 unsigned char mab_start
, unsigned char mab_end
,
2002 struct ma_state
*mas
, bool new_max
)
2005 enum maple_type mt
= mte_node_type(mas
->node
);
2006 struct maple_node
*node
= mte_to_node(mas
->node
);
2007 void __rcu
**slots
= ma_slots(node
, mt
);
2008 unsigned long *pivots
= ma_pivots(node
, mt
);
2009 unsigned long *gaps
= NULL
;
2012 if (mab_end
- mab_start
> mt_pivots
[mt
])
2015 if (!pivots
[mt_pivots
[mt
] - 1])
2016 slots
[mt_pivots
[mt
]] = NULL
;
2020 pivots
[j
++] = b_node
->pivot
[i
++];
2021 } while (i
<= mab_end
&& likely(b_node
->pivot
[i
]));
2023 memcpy(slots
, b_node
->slot
+ mab_start
,
2024 sizeof(void *) * (i
- mab_start
));
2027 mas
->max
= b_node
->pivot
[i
- 1];
2030 if (likely(!ma_is_leaf(mt
) && mt_is_alloc(mas
->tree
))) {
2031 unsigned long max_gap
= 0;
2032 unsigned char offset
= 0;
2034 gaps
= ma_gaps(node
, mt
);
2036 gaps
[--j
] = b_node
->gap
[--i
];
2037 if (gaps
[j
] > max_gap
) {
2043 ma_set_meta(node
, mt
, offset
, end
);
2045 mas_leaf_set_meta(node
, mt
, end
);
2050 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2051 * @mas: The maple state
2052 * @end: The maple node end
2053 * @mt: The maple node type
2055 static inline void mas_bulk_rebalance(struct ma_state
*mas
, unsigned char end
,
2058 if (!(mas
->mas_flags
& MA_STATE_BULK
))
2061 if (mte_is_root(mas
->node
))
2064 if (end
> mt_min_slots
[mt
]) {
2065 mas
->mas_flags
&= ~MA_STATE_REBALANCE
;
2071 * mas_store_b_node() - Store an @entry into the b_node while also copying the
2072 * data from a maple encoded node.
2073 * @wr_mas: the maple write state
2074 * @b_node: the maple_big_node to fill with data
2075 * @offset_end: the offset to end copying
2077 * Return: The actual end of the data stored in @b_node
2079 static noinline_for_kasan
void mas_store_b_node(struct ma_wr_state
*wr_mas
,
2080 struct maple_big_node
*b_node
, unsigned char offset_end
)
2083 unsigned char b_end
;
2084 /* Possible underflow of piv will wrap back to 0 before use. */
2086 struct ma_state
*mas
= wr_mas
->mas
;
2088 b_node
->type
= wr_mas
->type
;
2092 /* Copy start data up to insert. */
2093 mas_mab_cp(mas
, 0, slot
- 1, b_node
, 0);
2094 b_end
= b_node
->b_end
;
2095 piv
= b_node
->pivot
[b_end
- 1];
2099 if (piv
+ 1 < mas
->index
) {
2100 /* Handle range starting after old range */
2101 b_node
->slot
[b_end
] = wr_mas
->content
;
2102 if (!wr_mas
->content
)
2103 b_node
->gap
[b_end
] = mas
->index
- 1 - piv
;
2104 b_node
->pivot
[b_end
++] = mas
->index
- 1;
2107 /* Store the new entry. */
2108 mas
->offset
= b_end
;
2109 b_node
->slot
[b_end
] = wr_mas
->entry
;
2110 b_node
->pivot
[b_end
] = mas
->last
;
2113 if (mas
->last
>= mas
->max
)
2116 /* Handle new range ending before old range ends */
2117 piv
= mas_safe_pivot(mas
, wr_mas
->pivots
, offset_end
, wr_mas
->type
);
2118 if (piv
> mas
->last
) {
2119 if (piv
== ULONG_MAX
)
2120 mas_bulk_rebalance(mas
, b_node
->b_end
, wr_mas
->type
);
2122 if (offset_end
!= slot
)
2123 wr_mas
->content
= mas_slot_locked(mas
, wr_mas
->slots
,
2126 b_node
->slot
[++b_end
] = wr_mas
->content
;
2127 if (!wr_mas
->content
)
2128 b_node
->gap
[b_end
] = piv
- mas
->last
+ 1;
2129 b_node
->pivot
[b_end
] = piv
;
2132 slot
= offset_end
+ 1;
2133 if (slot
> mas
->end
)
2136 /* Copy end data to the end of the node. */
2137 mas_mab_cp(mas
, slot
, mas
->end
+ 1, b_node
, ++b_end
);
2142 b_node
->b_end
= b_end
;
2146 * mas_prev_sibling() - Find the previous node with the same parent.
2147 * @mas: the maple state
2149 * Return: True if there is a previous sibling, false otherwise.
2151 static inline bool mas_prev_sibling(struct ma_state
*mas
)
2153 unsigned int p_slot
= mte_parent_slot(mas
->node
);
2155 /* For root node, p_slot is set to 0 by mte_parent_slot(). */
2160 mas
->offset
= p_slot
- 1;
2166 * mas_next_sibling() - Find the next node with the same parent.
2167 * @mas: the maple state
2169 * Return: true if there is a next sibling, false otherwise.
2171 static inline bool mas_next_sibling(struct ma_state
*mas
)
2173 MA_STATE(parent
, mas
->tree
, mas
->index
, mas
->last
);
2175 if (mte_is_root(mas
->node
))
2179 mas_ascend(&parent
);
2180 parent
.offset
= mte_parent_slot(mas
->node
) + 1;
2181 if (parent
.offset
> mas_data_end(&parent
))
2190 * mas_node_or_none() - Set the enode and state.
2191 * @mas: the maple state
2192 * @enode: The encoded maple node.
2194 * Set the node to the enode and the status.
2196 static inline void mas_node_or_none(struct ma_state
*mas
,
2197 struct maple_enode
*enode
)
2201 mas
->status
= ma_active
;
2204 mas
->status
= ma_none
;
2209 * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2210 * If @mas->index cannot be found within the containing
2211 * node, we traverse to the last entry in the node.
2212 * @wr_mas: The maple write state
2214 * Uses mas_slot_locked() and does not need to worry about dead nodes.
2216 static inline void mas_wr_node_walk(struct ma_wr_state
*wr_mas
)
2218 struct ma_state
*mas
= wr_mas
->mas
;
2219 unsigned char count
, offset
;
2221 if (unlikely(ma_is_dense(wr_mas
->type
))) {
2222 wr_mas
->r_max
= wr_mas
->r_min
= mas
->index
;
2223 mas
->offset
= mas
->index
= mas
->min
;
2227 wr_mas
->node
= mas_mn(wr_mas
->mas
);
2228 wr_mas
->pivots
= ma_pivots(wr_mas
->node
, wr_mas
->type
);
2229 count
= mas
->end
= ma_data_end(wr_mas
->node
, wr_mas
->type
,
2230 wr_mas
->pivots
, mas
->max
);
2231 offset
= mas
->offset
;
2233 while (offset
< count
&& mas
->index
> wr_mas
->pivots
[offset
])
2236 wr_mas
->r_max
= offset
< count
? wr_mas
->pivots
[offset
] : mas
->max
;
2237 wr_mas
->r_min
= mas_safe_min(mas
, wr_mas
->pivots
, offset
);
2238 wr_mas
->offset_end
= mas
->offset
= offset
;
2242 * mast_rebalance_next() - Rebalance against the next node
2243 * @mast: The maple subtree state
2245 static inline void mast_rebalance_next(struct maple_subtree_state
*mast
)
2247 unsigned char b_end
= mast
->bn
->b_end
;
2249 mas_mab_cp(mast
->orig_r
, 0, mt_slot_count(mast
->orig_r
->node
),
2251 mast
->orig_r
->last
= mast
->orig_r
->max
;
2255 * mast_rebalance_prev() - Rebalance against the previous node
2256 * @mast: The maple subtree state
2258 static inline void mast_rebalance_prev(struct maple_subtree_state
*mast
)
2260 unsigned char end
= mas_data_end(mast
->orig_l
) + 1;
2261 unsigned char b_end
= mast
->bn
->b_end
;
2263 mab_shift_right(mast
->bn
, end
);
2264 mas_mab_cp(mast
->orig_l
, 0, end
- 1, mast
->bn
, 0);
2265 mast
->l
->min
= mast
->orig_l
->min
;
2266 mast
->orig_l
->index
= mast
->orig_l
->min
;
2267 mast
->bn
->b_end
= end
+ b_end
;
2268 mast
->l
->offset
+= end
;
2272 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2273 * the node to the right. Checking the nodes to the right then the left at each
2274 * level upwards until root is reached.
2275 * Data is copied into the @mast->bn.
2276 * @mast: The maple_subtree_state.
2279 bool mast_spanning_rebalance(struct maple_subtree_state
*mast
)
2281 struct ma_state r_tmp
= *mast
->orig_r
;
2282 struct ma_state l_tmp
= *mast
->orig_l
;
2283 unsigned char depth
= 0;
2286 mas_ascend(mast
->orig_r
);
2287 mas_ascend(mast
->orig_l
);
2289 if (mast
->orig_r
->offset
< mas_data_end(mast
->orig_r
)) {
2290 mast
->orig_r
->offset
++;
2292 mas_descend(mast
->orig_r
);
2293 mast
->orig_r
->offset
= 0;
2296 mast_rebalance_next(mast
);
2297 *mast
->orig_l
= l_tmp
;
2299 } else if (mast
->orig_l
->offset
!= 0) {
2300 mast
->orig_l
->offset
--;
2302 mas_descend(mast
->orig_l
);
2303 mast
->orig_l
->offset
=
2304 mas_data_end(mast
->orig_l
);
2307 mast_rebalance_prev(mast
);
2308 *mast
->orig_r
= r_tmp
;
2311 } while (!mte_is_root(mast
->orig_r
->node
));
2313 *mast
->orig_r
= r_tmp
;
2314 *mast
->orig_l
= l_tmp
;
2319 * mast_ascend() - Ascend the original left and right maple states.
2320 * @mast: the maple subtree state.
2322 * Ascend the original left and right sides. Set the offsets to point to the
2323 * data already in the new tree (@mast->l and @mast->r).
2325 static inline void mast_ascend(struct maple_subtree_state
*mast
)
2327 MA_WR_STATE(wr_mas
, mast
->orig_r
, NULL
);
2328 mas_ascend(mast
->orig_l
);
2329 mas_ascend(mast
->orig_r
);
2331 mast
->orig_r
->offset
= 0;
2332 mast
->orig_r
->index
= mast
->r
->max
;
2333 /* last should be larger than or equal to index */
2334 if (mast
->orig_r
->last
< mast
->orig_r
->index
)
2335 mast
->orig_r
->last
= mast
->orig_r
->index
;
2337 wr_mas
.type
= mte_node_type(mast
->orig_r
->node
);
2338 mas_wr_node_walk(&wr_mas
);
2339 /* Set up the left side of things */
2340 mast
->orig_l
->offset
= 0;
2341 mast
->orig_l
->index
= mast
->l
->min
;
2342 wr_mas
.mas
= mast
->orig_l
;
2343 wr_mas
.type
= mte_node_type(mast
->orig_l
->node
);
2344 mas_wr_node_walk(&wr_mas
);
2346 mast
->bn
->type
= wr_mas
.type
;
2350 * mas_new_ma_node() - Create and return a new maple node. Helper function.
2351 * @mas: the maple state with the allocations.
2352 * @b_node: the maple_big_node with the type encoding.
2354 * Use the node type from the maple_big_node to allocate a new node from the
2355 * ma_state. This function exists mainly for code readability.
2357 * Return: A new maple encoded node
2359 static inline struct maple_enode
2360 *mas_new_ma_node(struct ma_state
*mas
, struct maple_big_node
*b_node
)
2362 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas
)), b_node
->type
);
2366 * mas_mab_to_node() - Set up right and middle nodes
2368 * @mas: the maple state that contains the allocations.
2369 * @b_node: the node which contains the data.
2370 * @left: The pointer which will have the left node
2371 * @right: The pointer which may have the right node
2372 * @middle: the pointer which may have the middle node (rare)
2373 * @mid_split: the split location for the middle node
2375 * Return: the split of left.
2377 static inline unsigned char mas_mab_to_node(struct ma_state
*mas
,
2378 struct maple_big_node
*b_node
, struct maple_enode
**left
,
2379 struct maple_enode
**right
, struct maple_enode
**middle
,
2380 unsigned char *mid_split
, unsigned long min
)
2382 unsigned char split
= 0;
2383 unsigned char slot_count
= mt_slots
[b_node
->type
];
2385 *left
= mas_new_ma_node(mas
, b_node
);
2390 if (b_node
->b_end
< slot_count
) {
2391 split
= b_node
->b_end
;
2393 split
= mab_calc_split(mas
, b_node
, mid_split
, min
);
2394 *right
= mas_new_ma_node(mas
, b_node
);
2398 *middle
= mas_new_ma_node(mas
, b_node
);
2405 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2407 * @b_node: the big node to add the entry
2408 * @mas: the maple state to get the pivot (mas->max)
2409 * @entry: the entry to add, if NULL nothing happens.
2411 static inline void mab_set_b_end(struct maple_big_node
*b_node
,
2412 struct ma_state
*mas
,
2418 b_node
->slot
[b_node
->b_end
] = entry
;
2419 if (mt_is_alloc(mas
->tree
))
2420 b_node
->gap
[b_node
->b_end
] = mas_max_gap(mas
);
2421 b_node
->pivot
[b_node
->b_end
++] = mas
->max
;
2425 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
2426 * of @mas->node to either @left or @right, depending on @slot and @split
2428 * @mas: the maple state with the node that needs a parent
2429 * @left: possible parent 1
2430 * @right: possible parent 2
2431 * @slot: the slot the mas->node was placed
2432 * @split: the split location between @left and @right
2434 static inline void mas_set_split_parent(struct ma_state
*mas
,
2435 struct maple_enode
*left
,
2436 struct maple_enode
*right
,
2437 unsigned char *slot
, unsigned char split
)
2439 if (mas_is_none(mas
))
2442 if ((*slot
) <= split
)
2443 mas_set_parent(mas
, mas
->node
, left
, *slot
);
2445 mas_set_parent(mas
, mas
->node
, right
, (*slot
) - split
- 1);
2451 * mte_mid_split_check() - Check if the next node passes the mid-split
2452 * @l: Pointer to left encoded maple node.
2453 * @m: Pointer to middle encoded maple node.
2454 * @r: Pointer to right encoded maple node.
2456 * @split: The split location.
2457 * @mid_split: The middle split.
2459 static inline void mte_mid_split_check(struct maple_enode
**l
,
2460 struct maple_enode
**r
,
2461 struct maple_enode
*right
,
2463 unsigned char *split
,
2464 unsigned char mid_split
)
2469 if (slot
< mid_split
)
2478 * mast_set_split_parents() - Helper function to set three nodes parents. Slot
2479 * is taken from @mast->l.
2480 * @mast: the maple subtree state
2481 * @left: the left node
2482 * @right: the right node
2483 * @split: the split location.
2485 static inline void mast_set_split_parents(struct maple_subtree_state
*mast
,
2486 struct maple_enode
*left
,
2487 struct maple_enode
*middle
,
2488 struct maple_enode
*right
,
2489 unsigned char split
,
2490 unsigned char mid_split
)
2493 struct maple_enode
*l
= left
;
2494 struct maple_enode
*r
= right
;
2496 if (mas_is_none(mast
->l
))
2502 slot
= mast
->l
->offset
;
2504 mte_mid_split_check(&l
, &r
, right
, slot
, &split
, mid_split
);
2505 mas_set_split_parent(mast
->l
, l
, r
, &slot
, split
);
2507 mte_mid_split_check(&l
, &r
, right
, slot
, &split
, mid_split
);
2508 mas_set_split_parent(mast
->m
, l
, r
, &slot
, split
);
2510 mte_mid_split_check(&l
, &r
, right
, slot
, &split
, mid_split
);
2511 mas_set_split_parent(mast
->r
, l
, r
, &slot
, split
);
2515 * mas_topiary_node() - Dispose of a single node
2516 * @mas: The maple state for pushing nodes
2517 * @in_rcu: If the tree is in rcu mode
2519 * The node will either be RCU freed or pushed back on the maple state.
2521 static inline void mas_topiary_node(struct ma_state
*mas
,
2522 struct ma_state
*tmp_mas
, bool in_rcu
)
2524 struct maple_node
*tmp
;
2525 struct maple_enode
*enode
;
2527 if (mas_is_none(tmp_mas
))
2530 enode
= tmp_mas
->node
;
2531 tmp
= mte_to_node(enode
);
2532 mte_set_node_dead(enode
);
2536 mas_push_node(mas
, tmp
);
2540 * mas_topiary_replace() - Replace the data with new data, then repair the
2541 * parent links within the new tree. Iterate over the dead sub-tree and collect
2542 * the dead subtrees and topiary the nodes that are no longer of use.
2544 * The new tree will have up to three children with the correct parent. Keep
2545 * track of the new entries as they need to be followed to find the next level
2548 * The old tree will have up to three children with the old parent. Keep track
2549 * of the old entries as they may have more nodes below replaced. Nodes within
2550 * [index, last] are dead subtrees, others need to be freed and followed.
2552 * @mas: The maple state pointing at the new data
2553 * @old_enode: The maple encoded node being replaced
2556 static inline void mas_topiary_replace(struct ma_state
*mas
,
2557 struct maple_enode
*old_enode
)
2559 struct ma_state tmp
[3], tmp_next
[3];
2560 MA_TOPIARY(subtrees
, mas
->tree
);
2564 /* Place data in tree & then mark node as old */
2565 mas_put_in_tree(mas
, old_enode
);
2567 /* Update the parent pointers in the tree */
2570 tmp
[1].status
= ma_none
;
2571 tmp
[2].status
= ma_none
;
2572 while (!mte_is_leaf(tmp
[0].node
)) {
2574 for (i
= 0; i
< 3; i
++) {
2575 if (mas_is_none(&tmp
[i
]))
2579 if (!mas_find_child(&tmp
[i
], &tmp_next
[n
]))
2584 mas_adopt_children(&tmp
[i
], tmp
[i
].node
);
2587 if (MAS_WARN_ON(mas
, n
== 0))
2591 tmp_next
[n
++].status
= ma_none
;
2593 for (i
= 0; i
< 3; i
++)
2594 tmp
[i
] = tmp_next
[i
];
2597 /* Collect the old nodes that need to be discarded */
2598 if (mte_is_leaf(old_enode
))
2599 return mas_free(mas
, old_enode
);
2603 tmp
[0].node
= old_enode
;
2604 tmp
[1].status
= ma_none
;
2605 tmp
[2].status
= ma_none
;
2606 in_rcu
= mt_in_rcu(mas
->tree
);
2609 for (i
= 0; i
< 3; i
++) {
2610 if (mas_is_none(&tmp
[i
]))
2614 if (!mas_find_child(&tmp
[i
], &tmp_next
[n
]))
2617 if ((tmp_next
[n
].min
>= tmp_next
->index
) &&
2618 (tmp_next
[n
].max
<= tmp_next
->last
)) {
2619 mat_add(&subtrees
, tmp_next
[n
].node
);
2620 tmp_next
[n
].status
= ma_none
;
2627 if (MAS_WARN_ON(mas
, n
== 0))
2631 tmp_next
[n
++].status
= ma_none
;
2633 for (i
= 0; i
< 3; i
++) {
2634 mas_topiary_node(mas
, &tmp
[i
], in_rcu
);
2635 tmp
[i
] = tmp_next
[i
];
2637 } while (!mte_is_leaf(tmp
[0].node
));
2639 for (i
= 0; i
< 3; i
++)
2640 mas_topiary_node(mas
, &tmp
[i
], in_rcu
);
2642 mas_mat_destroy(mas
, &subtrees
);
2646 * mas_wmb_replace() - Write memory barrier and replace
2647 * @mas: The maple state
2648 * @old_enode: The old maple encoded node that is being replaced.
2650 * Updates gap as necessary.
2652 static inline void mas_wmb_replace(struct ma_state
*mas
,
2653 struct maple_enode
*old_enode
)
2655 /* Insert the new data in the tree */
2656 mas_topiary_replace(mas
, old_enode
);
2658 if (mte_is_leaf(mas
->node
))
2661 mas_update_gap(mas
);
2665 * mast_cp_to_nodes() - Copy data out to nodes.
2666 * @mast: The maple subtree state
2667 * @left: The left encoded maple node
2668 * @middle: The middle encoded maple node
2669 * @right: The right encoded maple node
2670 * @split: The location to split between left and (middle ? middle : right)
2671 * @mid_split: The location to split between middle and right.
2673 static inline void mast_cp_to_nodes(struct maple_subtree_state
*mast
,
2674 struct maple_enode
*left
, struct maple_enode
*middle
,
2675 struct maple_enode
*right
, unsigned char split
, unsigned char mid_split
)
2677 bool new_lmax
= true;
2679 mas_node_or_none(mast
->l
, left
);
2680 mas_node_or_none(mast
->m
, middle
);
2681 mas_node_or_none(mast
->r
, right
);
2683 mast
->l
->min
= mast
->orig_l
->min
;
2684 if (split
== mast
->bn
->b_end
) {
2685 mast
->l
->max
= mast
->orig_r
->max
;
2689 mab_mas_cp(mast
->bn
, 0, split
, mast
->l
, new_lmax
);
2692 mab_mas_cp(mast
->bn
, 1 + split
, mid_split
, mast
->m
, true);
2693 mast
->m
->min
= mast
->bn
->pivot
[split
] + 1;
2697 mast
->r
->max
= mast
->orig_r
->max
;
2699 mab_mas_cp(mast
->bn
, 1 + split
, mast
->bn
->b_end
, mast
->r
, false);
2700 mast
->r
->min
= mast
->bn
->pivot
[split
] + 1;
2705 * mast_combine_cp_left - Copy in the original left side of the tree into the
2706 * combined data set in the maple subtree state big node.
2707 * @mast: The maple subtree state
2709 static inline void mast_combine_cp_left(struct maple_subtree_state
*mast
)
2711 unsigned char l_slot
= mast
->orig_l
->offset
;
2716 mas_mab_cp(mast
->orig_l
, 0, l_slot
- 1, mast
->bn
, 0);
2720 * mast_combine_cp_right: Copy in the original right side of the tree into the
2721 * combined data set in the maple subtree state big node.
2722 * @mast: The maple subtree state
2724 static inline void mast_combine_cp_right(struct maple_subtree_state
*mast
)
2726 if (mast
->bn
->pivot
[mast
->bn
->b_end
- 1] >= mast
->orig_r
->max
)
2729 mas_mab_cp(mast
->orig_r
, mast
->orig_r
->offset
+ 1,
2730 mt_slot_count(mast
->orig_r
->node
), mast
->bn
,
2732 mast
->orig_r
->last
= mast
->orig_r
->max
;
2736 * mast_sufficient: Check if the maple subtree state has enough data in the big
2737 * node to create at least one sufficient node
2738 * @mast: the maple subtree state
2740 static inline bool mast_sufficient(struct maple_subtree_state
*mast
)
2742 if (mast
->bn
->b_end
> mt_min_slot_count(mast
->orig_l
->node
))
2749 * mast_overflow: Check if there is too much data in the subtree state for a
2751 * @mast: The maple subtree state
2753 static inline bool mast_overflow(struct maple_subtree_state
*mast
)
2755 if (mast
->bn
->b_end
>= mt_slot_count(mast
->orig_l
->node
))
2761 static inline void *mtree_range_walk(struct ma_state
*mas
)
2763 unsigned long *pivots
;
2764 unsigned char offset
;
2765 struct maple_node
*node
;
2766 struct maple_enode
*next
, *last
;
2767 enum maple_type type
;
2770 unsigned long max
, min
;
2771 unsigned long prev_max
, prev_min
;
2778 node
= mte_to_node(next
);
2779 type
= mte_node_type(next
);
2780 pivots
= ma_pivots(node
, type
);
2781 end
= ma_data_end(node
, type
, pivots
, max
);
2784 if (pivots
[0] >= mas
->index
) {
2791 while (offset
< end
) {
2792 if (pivots
[offset
] >= mas
->index
) {
2793 max
= pivots
[offset
];
2799 min
= pivots
[offset
- 1] + 1;
2801 slots
= ma_slots(node
, type
);
2802 next
= mt_slot(mas
->tree
, slots
, offset
);
2803 if (unlikely(ma_dead_node(node
)))
2805 } while (!ma_is_leaf(type
));
2808 mas
->offset
= offset
;
2811 mas
->min
= prev_min
;
2812 mas
->max
= prev_max
;
2814 return (void *)next
;
2822 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2823 * @mas: The starting maple state
2824 * @mast: The maple_subtree_state, keeps track of 4 maple states.
2825 * @count: The estimated count of iterations needed.
2827 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
2828 * is hit. First @b_node is split into two entries which are inserted into the
2829 * next iteration of the loop. @b_node is returned populated with the final
2830 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
2831 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
2832 * to account of what has been copied into the new sub-tree. The update of
2833 * orig_l_mas->last is used in mas_consume to find the slots that will need to
2834 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
2835 * the new sub-tree in case the sub-tree becomes the full tree.
2837 static void mas_spanning_rebalance(struct ma_state
*mas
,
2838 struct maple_subtree_state
*mast
, unsigned char count
)
2840 unsigned char split
, mid_split
;
2841 unsigned char slot
= 0;
2842 struct maple_enode
*left
= NULL
, *middle
= NULL
, *right
= NULL
;
2843 struct maple_enode
*old_enode
;
2845 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->index
);
2846 MA_STATE(r_mas
, mas
->tree
, mas
->index
, mas
->last
);
2847 MA_STATE(m_mas
, mas
->tree
, mas
->index
, mas
->index
);
2850 * The tree needs to be rebalanced and leaves need to be kept at the same level.
2851 * Rebalancing is done by use of the ``struct maple_topiary``.
2856 l_mas
.status
= r_mas
.status
= m_mas
.status
= ma_none
;
2858 /* Check if this is not root and has sufficient data. */
2859 if (((mast
->orig_l
->min
!= 0) || (mast
->orig_r
->max
!= ULONG_MAX
)) &&
2860 unlikely(mast
->bn
->b_end
<= mt_min_slots
[mast
->bn
->type
]))
2861 mast_spanning_rebalance(mast
);
2866 * Each level of the tree is examined and balanced, pushing data to the left or
2867 * right, or rebalancing against left or right nodes is employed to avoid
2868 * rippling up the tree to limit the amount of churn. Once a new sub-section of
2869 * the tree is created, there may be a mix of new and old nodes. The old nodes
2870 * will have the incorrect parent pointers and currently be in two trees: the
2871 * original tree and the partially new tree. To remedy the parent pointers in
2872 * the old tree, the new data is swapped into the active tree and a walk down
2873 * the tree is performed and the parent pointers are updated.
2874 * See mas_topiary_replace() for more information.
2878 mast
->bn
->type
= mte_node_type(mast
->orig_l
->node
);
2879 split
= mas_mab_to_node(mas
, mast
->bn
, &left
, &right
, &middle
,
2880 &mid_split
, mast
->orig_l
->min
);
2881 mast_set_split_parents(mast
, left
, middle
, right
, split
,
2883 mast_cp_to_nodes(mast
, left
, middle
, right
, split
, mid_split
);
2886 * Copy data from next level in the tree to mast->bn from next
2889 memset(mast
->bn
, 0, sizeof(struct maple_big_node
));
2890 mast
->bn
->type
= mte_node_type(left
);
2893 /* Root already stored in l->node. */
2894 if (mas_is_root_limits(mast
->l
))
2898 mast_combine_cp_left(mast
);
2899 l_mas
.offset
= mast
->bn
->b_end
;
2900 mab_set_b_end(mast
->bn
, &l_mas
, left
);
2901 mab_set_b_end(mast
->bn
, &m_mas
, middle
);
2902 mab_set_b_end(mast
->bn
, &r_mas
, right
);
2904 /* Copy anything necessary out of the right node. */
2905 mast_combine_cp_right(mast
);
2906 mast
->orig_l
->last
= mast
->orig_l
->max
;
2908 if (mast_sufficient(mast
))
2911 if (mast_overflow(mast
))
2914 /* May be a new root stored in mast->bn */
2915 if (mas_is_root_limits(mast
->orig_l
))
2918 mast_spanning_rebalance(mast
);
2920 /* rebalancing from other nodes may require another loop. */
2925 l_mas
.node
= mt_mk_node(ma_mnode_ptr(mas_pop_node(mas
)),
2926 mte_node_type(mast
->orig_l
->node
));
2928 mab_mas_cp(mast
->bn
, 0, mt_slots
[mast
->bn
->type
] - 1, &l_mas
, true);
2929 mas_set_parent(mas
, left
, l_mas
.node
, slot
);
2931 mas_set_parent(mas
, middle
, l_mas
.node
, ++slot
);
2934 mas_set_parent(mas
, right
, l_mas
.node
, ++slot
);
2936 if (mas_is_root_limits(mast
->l
)) {
2938 mas_mn(mast
->l
)->parent
= ma_parent_ptr(mas_tree_parent(mas
));
2939 while (!mte_is_root(mast
->orig_l
->node
))
2942 mas_mn(&l_mas
)->parent
= mas_mn(mast
->orig_l
)->parent
;
2945 old_enode
= mast
->orig_l
->node
;
2946 mas
->depth
= l_mas
.depth
;
2947 mas
->node
= l_mas
.node
;
2948 mas
->min
= l_mas
.min
;
2949 mas
->max
= l_mas
.max
;
2950 mas
->offset
= l_mas
.offset
;
2951 mas_wmb_replace(mas
, old_enode
);
2952 mtree_range_walk(mas
);
2957 * mas_rebalance() - Rebalance a given node.
2958 * @mas: The maple state
2959 * @b_node: The big maple node.
2961 * Rebalance two nodes into a single node or two new nodes that are sufficient.
2962 * Continue upwards until tree is sufficient.
2964 static inline void mas_rebalance(struct ma_state
*mas
,
2965 struct maple_big_node
*b_node
)
2967 char empty_count
= mas_mt_height(mas
);
2968 struct maple_subtree_state mast
;
2969 unsigned char shift
, b_end
= ++b_node
->b_end
;
2971 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->last
);
2972 MA_STATE(r_mas
, mas
->tree
, mas
->index
, mas
->last
);
2974 trace_ma_op(__func__
, mas
);
2977 * Rebalancing occurs if a node is insufficient. Data is rebalanced
2978 * against the node to the right if it exists, otherwise the node to the
2979 * left of this node is rebalanced against this node. If rebalancing
2980 * causes just one node to be produced instead of two, then the parent
2981 * is also examined and rebalanced if it is insufficient. Every level
2982 * tries to combine the data in the same way. If one node contains the
2983 * entire range of the tree, then that node is used as a new root node.
2986 mast
.orig_l
= &l_mas
;
2987 mast
.orig_r
= &r_mas
;
2989 mast
.bn
->type
= mte_node_type(mas
->node
);
2991 l_mas
= r_mas
= *mas
;
2993 if (mas_next_sibling(&r_mas
)) {
2994 mas_mab_cp(&r_mas
, 0, mt_slot_count(r_mas
.node
), b_node
, b_end
);
2995 r_mas
.last
= r_mas
.index
= r_mas
.max
;
2997 mas_prev_sibling(&l_mas
);
2998 shift
= mas_data_end(&l_mas
) + 1;
2999 mab_shift_right(b_node
, shift
);
3000 mas
->offset
+= shift
;
3001 mas_mab_cp(&l_mas
, 0, shift
- 1, b_node
, 0);
3002 b_node
->b_end
= shift
+ b_end
;
3003 l_mas
.index
= l_mas
.last
= l_mas
.min
;
3006 return mas_spanning_rebalance(mas
, &mast
, empty_count
);
3010 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3012 * @mas: The maple state
3013 * @end: The end of the left-most node.
3015 * During a mass-insert event (such as forking), it may be necessary to
3016 * rebalance the left-most node when it is not sufficient.
3018 static inline void mas_destroy_rebalance(struct ma_state
*mas
, unsigned char end
)
3020 enum maple_type mt
= mte_node_type(mas
->node
);
3021 struct maple_node reuse
, *newnode
, *parent
, *new_left
, *left
, *node
;
3022 struct maple_enode
*eparent
, *old_eparent
;
3023 unsigned char offset
, tmp
, split
= mt_slots
[mt
] / 2;
3024 void __rcu
**l_slots
, **slots
;
3025 unsigned long *l_pivs
, *pivs
, gap
;
3026 bool in_rcu
= mt_in_rcu(mas
->tree
);
3028 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->last
);
3031 mas_prev_sibling(&l_mas
);
3035 newnode
= mas_pop_node(mas
);
3041 newnode
->parent
= node
->parent
;
3042 slots
= ma_slots(newnode
, mt
);
3043 pivs
= ma_pivots(newnode
, mt
);
3044 left
= mas_mn(&l_mas
);
3045 l_slots
= ma_slots(left
, mt
);
3046 l_pivs
= ma_pivots(left
, mt
);
3047 if (!l_slots
[split
])
3049 tmp
= mas_data_end(&l_mas
) - split
;
3051 memcpy(slots
, l_slots
+ split
+ 1, sizeof(void *) * tmp
);
3052 memcpy(pivs
, l_pivs
+ split
+ 1, sizeof(unsigned long) * tmp
);
3053 pivs
[tmp
] = l_mas
.max
;
3054 memcpy(slots
+ tmp
, ma_slots(node
, mt
), sizeof(void *) * end
);
3055 memcpy(pivs
+ tmp
, ma_pivots(node
, mt
), sizeof(unsigned long) * end
);
3057 l_mas
.max
= l_pivs
[split
];
3058 mas
->min
= l_mas
.max
+ 1;
3059 old_eparent
= mt_mk_node(mte_parent(l_mas
.node
),
3060 mas_parent_type(&l_mas
, l_mas
.node
));
3063 unsigned char max_p
= mt_pivots
[mt
];
3064 unsigned char max_s
= mt_slots
[mt
];
3067 memset(pivs
+ tmp
, 0,
3068 sizeof(unsigned long) * (max_p
- tmp
));
3070 if (tmp
< mt_slots
[mt
])
3071 memset(slots
+ tmp
, 0, sizeof(void *) * (max_s
- tmp
));
3073 memcpy(node
, newnode
, sizeof(struct maple_node
));
3074 ma_set_meta(node
, mt
, 0, tmp
- 1);
3075 mte_set_pivot(old_eparent
, mte_parent_slot(l_mas
.node
),
3078 /* Remove data from l_pivs. */
3080 memset(l_pivs
+ tmp
, 0, sizeof(unsigned long) * (max_p
- tmp
));
3081 memset(l_slots
+ tmp
, 0, sizeof(void *) * (max_s
- tmp
));
3082 ma_set_meta(left
, mt
, 0, split
);
3083 eparent
= old_eparent
;
3088 /* RCU requires replacing both l_mas, mas, and parent. */
3089 mas
->node
= mt_mk_node(newnode
, mt
);
3090 ma_set_meta(newnode
, mt
, 0, tmp
);
3092 new_left
= mas_pop_node(mas
);
3093 new_left
->parent
= left
->parent
;
3094 mt
= mte_node_type(l_mas
.node
);
3095 slots
= ma_slots(new_left
, mt
);
3096 pivs
= ma_pivots(new_left
, mt
);
3097 memcpy(slots
, l_slots
, sizeof(void *) * split
);
3098 memcpy(pivs
, l_pivs
, sizeof(unsigned long) * split
);
3099 ma_set_meta(new_left
, mt
, 0, split
);
3100 l_mas
.node
= mt_mk_node(new_left
, mt
);
3102 /* replace parent. */
3103 offset
= mte_parent_slot(mas
->node
);
3104 mt
= mas_parent_type(&l_mas
, l_mas
.node
);
3105 parent
= mas_pop_node(mas
);
3106 slots
= ma_slots(parent
, mt
);
3107 pivs
= ma_pivots(parent
, mt
);
3108 memcpy(parent
, mte_to_node(old_eparent
), sizeof(struct maple_node
));
3109 rcu_assign_pointer(slots
[offset
], mas
->node
);
3110 rcu_assign_pointer(slots
[offset
- 1], l_mas
.node
);
3111 pivs
[offset
- 1] = l_mas
.max
;
3112 eparent
= mt_mk_node(parent
, mt
);
3114 gap
= mas_leaf_max_gap(mas
);
3115 mte_set_gap(eparent
, mte_parent_slot(mas
->node
), gap
);
3116 gap
= mas_leaf_max_gap(&l_mas
);
3117 mte_set_gap(eparent
, mte_parent_slot(l_mas
.node
), gap
);
3121 mas_replace_node(mas
, old_eparent
);
3122 mas_adopt_children(mas
, mas
->node
);
3125 mas_update_gap(mas
);
3129 * mas_split_final_node() - Split the final node in a subtree operation.
3130 * @mast: the maple subtree state
3131 * @mas: The maple state
3132 * @height: The height of the tree in case it's a new root.
3134 static inline void mas_split_final_node(struct maple_subtree_state
*mast
,
3135 struct ma_state
*mas
, int height
)
3137 struct maple_enode
*ancestor
;
3139 if (mte_is_root(mas
->node
)) {
3140 if (mt_is_alloc(mas
->tree
))
3141 mast
->bn
->type
= maple_arange_64
;
3143 mast
->bn
->type
= maple_range_64
;
3144 mas
->depth
= height
;
3147 * Only a single node is used here, could be root.
3148 * The Big_node data should just fit in a single node.
3150 ancestor
= mas_new_ma_node(mas
, mast
->bn
);
3151 mas_set_parent(mas
, mast
->l
->node
, ancestor
, mast
->l
->offset
);
3152 mas_set_parent(mas
, mast
->r
->node
, ancestor
, mast
->r
->offset
);
3153 mte_to_node(ancestor
)->parent
= mas_mn(mas
)->parent
;
3155 mast
->l
->node
= ancestor
;
3156 mab_mas_cp(mast
->bn
, 0, mt_slots
[mast
->bn
->type
] - 1, mast
->l
, true);
3157 mas
->offset
= mast
->bn
->b_end
- 1;
3161 * mast_fill_bnode() - Copy data into the big node in the subtree state
3162 * @mast: The maple subtree state
3163 * @mas: the maple state
3164 * @skip: The number of entries to skip for new nodes insertion.
3166 static inline void mast_fill_bnode(struct maple_subtree_state
*mast
,
3167 struct ma_state
*mas
,
3171 unsigned char split
;
3173 memset(mast
->bn
, 0, sizeof(struct maple_big_node
));
3175 if (mte_is_root(mas
->node
)) {
3179 mas
->offset
= mte_parent_slot(mas
->node
);
3182 if (cp
&& mast
->l
->offset
)
3183 mas_mab_cp(mas
, 0, mast
->l
->offset
- 1, mast
->bn
, 0);
3185 split
= mast
->bn
->b_end
;
3186 mab_set_b_end(mast
->bn
, mast
->l
, mast
->l
->node
);
3187 mast
->r
->offset
= mast
->bn
->b_end
;
3188 mab_set_b_end(mast
->bn
, mast
->r
, mast
->r
->node
);
3189 if (mast
->bn
->pivot
[mast
->bn
->b_end
- 1] == mas
->max
)
3193 mas_mab_cp(mas
, split
+ skip
, mt_slot_count(mas
->node
) - 1,
3194 mast
->bn
, mast
->bn
->b_end
);
3197 mast
->bn
->type
= mte_node_type(mas
->node
);
3201 * mast_split_data() - Split the data in the subtree state big node into regular
3203 * @mast: The maple subtree state
3204 * @mas: The maple state
3205 * @split: The location to split the big node
3207 static inline void mast_split_data(struct maple_subtree_state
*mast
,
3208 struct ma_state
*mas
, unsigned char split
)
3210 unsigned char p_slot
;
3212 mab_mas_cp(mast
->bn
, 0, split
, mast
->l
, true);
3213 mte_set_pivot(mast
->r
->node
, 0, mast
->r
->max
);
3214 mab_mas_cp(mast
->bn
, split
+ 1, mast
->bn
->b_end
, mast
->r
, false);
3215 mast
->l
->offset
= mte_parent_slot(mas
->node
);
3216 mast
->l
->max
= mast
->bn
->pivot
[split
];
3217 mast
->r
->min
= mast
->l
->max
+ 1;
3218 if (mte_is_leaf(mas
->node
))
3221 p_slot
= mast
->orig_l
->offset
;
3222 mas_set_split_parent(mast
->orig_l
, mast
->l
->node
, mast
->r
->node
,
3224 mas_set_split_parent(mast
->orig_r
, mast
->l
->node
, mast
->r
->node
,
3229 * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3230 * data to the right or left node if there is room.
3231 * @mas: The maple state
3232 * @height: The current height of the maple state
3233 * @mast: The maple subtree state
3234 * @left: Push left or not.
3236 * Keeping the height of the tree low means faster lookups.
3238 * Return: True if pushed, false otherwise.
3240 static inline bool mas_push_data(struct ma_state
*mas
, int height
,
3241 struct maple_subtree_state
*mast
, bool left
)
3243 unsigned char slot_total
= mast
->bn
->b_end
;
3244 unsigned char end
, space
, split
;
3246 MA_STATE(tmp_mas
, mas
->tree
, mas
->index
, mas
->last
);
3248 tmp_mas
.depth
= mast
->l
->depth
;
3250 if (left
&& !mas_prev_sibling(&tmp_mas
))
3252 else if (!left
&& !mas_next_sibling(&tmp_mas
))
3255 end
= mas_data_end(&tmp_mas
);
3257 space
= 2 * mt_slot_count(mas
->node
) - 2;
3258 /* -2 instead of -1 to ensure there isn't a triple split */
3259 if (ma_is_leaf(mast
->bn
->type
))
3262 if (mas
->max
== ULONG_MAX
)
3265 if (slot_total
>= space
)
3268 /* Get the data; Fill mast->bn */
3271 mab_shift_right(mast
->bn
, end
+ 1);
3272 mas_mab_cp(&tmp_mas
, 0, end
, mast
->bn
, 0);
3273 mast
->bn
->b_end
= slot_total
+ 1;
3275 mas_mab_cp(&tmp_mas
, 0, end
, mast
->bn
, mast
->bn
->b_end
);
3278 /* Configure mast for splitting of mast->bn */
3279 split
= mt_slots
[mast
->bn
->type
] - 2;
3281 /* Switch mas to prev node */
3283 /* Start using mast->l for the left side. */
3284 tmp_mas
.node
= mast
->l
->node
;
3287 tmp_mas
.node
= mast
->r
->node
;
3289 split
= slot_total
- split
;
3291 split
= mab_no_null_split(mast
->bn
, split
, mt_slots
[mast
->bn
->type
]);
3292 /* Update parent slot for split calculation. */
3294 mast
->orig_l
->offset
+= end
+ 1;
3296 mast_split_data(mast
, mas
, split
);
3297 mast_fill_bnode(mast
, mas
, 2);
3298 mas_split_final_node(mast
, mas
, height
+ 1);
3303 * mas_split() - Split data that is too big for one node into two.
3304 * @mas: The maple state
3305 * @b_node: The maple big node
3307 static void mas_split(struct ma_state
*mas
, struct maple_big_node
*b_node
)
3309 struct maple_subtree_state mast
;
3311 unsigned char mid_split
, split
= 0;
3312 struct maple_enode
*old
;
3315 * Splitting is handled differently from any other B-tree; the Maple
3316 * Tree splits upwards. Splitting up means that the split operation
3317 * occurs when the walk of the tree hits the leaves and not on the way
3318 * down. The reason for splitting up is that it is impossible to know
3319 * how much space will be needed until the leaf is (or leaves are)
3320 * reached. Since overwriting data is allowed and a range could
3321 * overwrite more than one range or result in changing one entry into 3
3322 * entries, it is impossible to know if a split is required until the
3325 * Splitting is a balancing act between keeping allocations to a minimum
3326 * and avoiding a 'jitter' event where a tree is expanded to make room
3327 * for an entry followed by a contraction when the entry is removed. To
3328 * accomplish the balance, there are empty slots remaining in both left
3329 * and right nodes after a split.
3331 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->last
);
3332 MA_STATE(r_mas
, mas
->tree
, mas
->index
, mas
->last
);
3333 MA_STATE(prev_l_mas
, mas
->tree
, mas
->index
, mas
->last
);
3334 MA_STATE(prev_r_mas
, mas
->tree
, mas
->index
, mas
->last
);
3336 trace_ma_op(__func__
, mas
);
3337 mas
->depth
= mas_mt_height(mas
);
3341 mast
.orig_l
= &prev_l_mas
;
3342 mast
.orig_r
= &prev_r_mas
;
3345 while (height
++ <= mas
->depth
) {
3346 if (mt_slots
[b_node
->type
] > b_node
->b_end
) {
3347 mas_split_final_node(&mast
, mas
, height
);
3351 l_mas
= r_mas
= *mas
;
3352 l_mas
.node
= mas_new_ma_node(mas
, b_node
);
3353 r_mas
.node
= mas_new_ma_node(mas
, b_node
);
3355 * Another way that 'jitter' is avoided is to terminate a split up early if the
3356 * left or right node has space to spare. This is referred to as "pushing left"
3357 * or "pushing right" and is similar to the B* tree, except the nodes left or
3358 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3359 * is a significant savings.
3361 /* Try to push left. */
3362 if (mas_push_data(mas
, height
, &mast
, true))
3364 /* Try to push right. */
3365 if (mas_push_data(mas
, height
, &mast
, false))
3368 split
= mab_calc_split(mas
, b_node
, &mid_split
, prev_l_mas
.min
);
3369 mast_split_data(&mast
, mas
, split
);
3371 * Usually correct, mab_mas_cp in the above call overwrites
3374 mast
.r
->max
= mas
->max
;
3375 mast_fill_bnode(&mast
, mas
, 1);
3376 prev_l_mas
= *mast
.l
;
3377 prev_r_mas
= *mast
.r
;
3380 /* Set the original node as dead */
3382 mas
->node
= l_mas
.node
;
3383 mas_wmb_replace(mas
, old
);
3384 mtree_range_walk(mas
);
3389 * mas_commit_b_node() - Commit the big node into the tree.
3390 * @wr_mas: The maple write state
3391 * @b_node: The maple big node
3393 static noinline_for_kasan
void mas_commit_b_node(struct ma_wr_state
*wr_mas
,
3394 struct maple_big_node
*b_node
)
3396 enum store_type type
= wr_mas
->mas
->store_type
;
3398 WARN_ON_ONCE(type
!= wr_rebalance
&& type
!= wr_split_store
);
3400 if (type
== wr_rebalance
)
3401 return mas_rebalance(wr_mas
->mas
, b_node
);
3403 return mas_split(wr_mas
->mas
, b_node
);
3407 * mas_root_expand() - Expand a root to a node
3408 * @mas: The maple state
3409 * @entry: The entry to store into the tree
3411 static inline void mas_root_expand(struct ma_state
*mas
, void *entry
)
3413 void *contents
= mas_root_locked(mas
);
3414 enum maple_type type
= maple_leaf_64
;
3415 struct maple_node
*node
;
3417 unsigned long *pivots
;
3420 node
= mas_pop_node(mas
);
3421 pivots
= ma_pivots(node
, type
);
3422 slots
= ma_slots(node
, type
);
3423 node
->parent
= ma_parent_ptr(mas_tree_parent(mas
));
3424 mas
->node
= mt_mk_node(node
, type
);
3425 mas
->status
= ma_active
;
3429 rcu_assign_pointer(slots
[slot
], contents
);
3430 if (likely(mas
->index
> 1))
3433 pivots
[slot
++] = mas
->index
- 1;
3436 rcu_assign_pointer(slots
[slot
], entry
);
3438 pivots
[slot
] = mas
->last
;
3439 if (mas
->last
!= ULONG_MAX
)
3440 pivots
[++slot
] = ULONG_MAX
;
3443 mas_set_height(mas
);
3444 ma_set_meta(node
, maple_leaf_64
, 0, slot
);
3445 /* swap the new root into the tree */
3446 rcu_assign_pointer(mas
->tree
->ma_root
, mte_mk_root(mas
->node
));
3451 * mas_store_root() - Storing value into root.
3452 * @mas: The maple state
3453 * @entry: The entry to store.
3455 * There is no root node now and we are storing a value into the root - this
3456 * function either assigns the pointer or expands into a node.
3458 static inline void mas_store_root(struct ma_state
*mas
, void *entry
)
3462 rcu_assign_pointer(mas
->tree
->ma_root
, NULL
);
3463 } else if (likely((mas
->last
!= 0) || (mas
->index
!= 0)))
3464 mas_root_expand(mas
, entry
);
3465 else if (((unsigned long) (entry
) & 3) == 2)
3466 mas_root_expand(mas
, entry
);
3468 rcu_assign_pointer(mas
->tree
->ma_root
, entry
);
3469 mas
->status
= ma_start
;
3474 * mas_is_span_wr() - Check if the write needs to be treated as a write that
3476 * @wr_mas: The maple write state
3478 * Spanning writes are writes that start in one node and end in another OR if
3479 * the write of a %NULL will cause the node to end with a %NULL.
3481 * Return: True if this is a spanning write, false otherwise.
3483 static bool mas_is_span_wr(struct ma_wr_state
*wr_mas
)
3485 unsigned long max
= wr_mas
->r_max
;
3486 unsigned long last
= wr_mas
->mas
->last
;
3487 enum maple_type type
= wr_mas
->type
;
3488 void *entry
= wr_mas
->entry
;
3490 /* Contained in this pivot, fast path */
3494 if (ma_is_leaf(type
)) {
3495 max
= wr_mas
->mas
->max
;
3502 * The last entry of leaf node cannot be NULL unless it is the
3503 * rightmost node (writing ULONG_MAX), otherwise it spans slots.
3505 if (entry
|| last
== ULONG_MAX
)
3509 trace_ma_write(__func__
, wr_mas
->mas
, wr_mas
->r_max
, entry
);
3513 static inline void mas_wr_walk_descend(struct ma_wr_state
*wr_mas
)
3515 wr_mas
->type
= mte_node_type(wr_mas
->mas
->node
);
3516 mas_wr_node_walk(wr_mas
);
3517 wr_mas
->slots
= ma_slots(wr_mas
->node
, wr_mas
->type
);
3520 static inline void mas_wr_walk_traverse(struct ma_wr_state
*wr_mas
)
3522 wr_mas
->mas
->max
= wr_mas
->r_max
;
3523 wr_mas
->mas
->min
= wr_mas
->r_min
;
3524 wr_mas
->mas
->node
= wr_mas
->content
;
3525 wr_mas
->mas
->offset
= 0;
3526 wr_mas
->mas
->depth
++;
3529 * mas_wr_walk() - Walk the tree for a write.
3530 * @wr_mas: The maple write state
3532 * Uses mas_slot_locked() and does not need to worry about dead nodes.
3534 * Return: True if it's contained in a node, false on spanning write.
3536 static bool mas_wr_walk(struct ma_wr_state
*wr_mas
)
3538 struct ma_state
*mas
= wr_mas
->mas
;
3541 mas_wr_walk_descend(wr_mas
);
3542 if (unlikely(mas_is_span_wr(wr_mas
)))
3545 wr_mas
->content
= mas_slot_locked(mas
, wr_mas
->slots
,
3547 if (ma_is_leaf(wr_mas
->type
))
3550 mas_wr_walk_traverse(wr_mas
);
3556 static void mas_wr_walk_index(struct ma_wr_state
*wr_mas
)
3558 struct ma_state
*mas
= wr_mas
->mas
;
3561 mas_wr_walk_descend(wr_mas
);
3562 wr_mas
->content
= mas_slot_locked(mas
, wr_mas
->slots
,
3564 if (ma_is_leaf(wr_mas
->type
))
3566 mas_wr_walk_traverse(wr_mas
);
3570 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3571 * @l_wr_mas: The left maple write state
3572 * @r_wr_mas: The right maple write state
3574 static inline void mas_extend_spanning_null(struct ma_wr_state
*l_wr_mas
,
3575 struct ma_wr_state
*r_wr_mas
)
3577 struct ma_state
*r_mas
= r_wr_mas
->mas
;
3578 struct ma_state
*l_mas
= l_wr_mas
->mas
;
3579 unsigned char l_slot
;
3581 l_slot
= l_mas
->offset
;
3582 if (!l_wr_mas
->content
)
3583 l_mas
->index
= l_wr_mas
->r_min
;
3585 if ((l_mas
->index
== l_wr_mas
->r_min
) &&
3587 !mas_slot_locked(l_mas
, l_wr_mas
->slots
, l_slot
- 1))) {
3589 l_mas
->index
= l_wr_mas
->pivots
[l_slot
- 2] + 1;
3591 l_mas
->index
= l_mas
->min
;
3593 l_mas
->offset
= l_slot
- 1;
3596 if (!r_wr_mas
->content
) {
3597 if (r_mas
->last
< r_wr_mas
->r_max
)
3598 r_mas
->last
= r_wr_mas
->r_max
;
3600 } else if ((r_mas
->last
== r_wr_mas
->r_max
) &&
3601 (r_mas
->last
< r_mas
->max
) &&
3602 !mas_slot_locked(r_mas
, r_wr_mas
->slots
, r_mas
->offset
+ 1)) {
3603 r_mas
->last
= mas_safe_pivot(r_mas
, r_wr_mas
->pivots
,
3604 r_wr_mas
->type
, r_mas
->offset
+ 1);
3609 static inline void *mas_state_walk(struct ma_state
*mas
)
3613 entry
= mas_start(mas
);
3614 if (mas_is_none(mas
))
3617 if (mas_is_ptr(mas
))
3620 return mtree_range_walk(mas
);
3624 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3627 * @mas: The maple state.
3629 * Note: Leaves mas in undesirable state.
3630 * Return: The entry for @mas->index or %NULL on dead node.
3632 static inline void *mtree_lookup_walk(struct ma_state
*mas
)
3634 unsigned long *pivots
;
3635 unsigned char offset
;
3636 struct maple_node
*node
;
3637 struct maple_enode
*next
;
3638 enum maple_type type
;
3644 node
= mte_to_node(next
);
3645 type
= mte_node_type(next
);
3646 pivots
= ma_pivots(node
, type
);
3647 end
= mt_pivots
[type
];
3650 if (pivots
[offset
] >= mas
->index
)
3652 } while (++offset
< end
);
3654 slots
= ma_slots(node
, type
);
3655 next
= mt_slot(mas
->tree
, slots
, offset
);
3656 if (unlikely(ma_dead_node(node
)))
3658 } while (!ma_is_leaf(type
));
3660 return (void *)next
;
3667 static void mte_destroy_walk(struct maple_enode
*, struct maple_tree
*);
3669 * mas_new_root() - Create a new root node that only contains the entry passed
3671 * @mas: The maple state
3672 * @entry: The entry to store.
3674 * Only valid when the index == 0 and the last == ULONG_MAX
3676 static inline void mas_new_root(struct ma_state
*mas
, void *entry
)
3678 struct maple_enode
*root
= mas_root_locked(mas
);
3679 enum maple_type type
= maple_leaf_64
;
3680 struct maple_node
*node
;
3682 unsigned long *pivots
;
3684 WARN_ON_ONCE(mas
->index
|| mas
->last
!= ULONG_MAX
);
3688 mas_set_height(mas
);
3689 rcu_assign_pointer(mas
->tree
->ma_root
, entry
);
3690 mas
->status
= ma_start
;
3694 node
= mas_pop_node(mas
);
3695 pivots
= ma_pivots(node
, type
);
3696 slots
= ma_slots(node
, type
);
3697 node
->parent
= ma_parent_ptr(mas_tree_parent(mas
));
3698 mas
->node
= mt_mk_node(node
, type
);
3699 mas
->status
= ma_active
;
3700 rcu_assign_pointer(slots
[0], entry
);
3701 pivots
[0] = mas
->last
;
3703 mas_set_height(mas
);
3704 rcu_assign_pointer(mas
->tree
->ma_root
, mte_mk_root(mas
->node
));
3707 if (xa_is_node(root
))
3708 mte_destroy_walk(root
, mas
->tree
);
3713 * mas_wr_spanning_store() - Create a subtree with the store operation completed
3714 * and new nodes where necessary, then place the sub-tree in the actual tree.
3715 * Note that mas is expected to point to the node which caused the store to
3717 * @wr_mas: The maple write state
3719 static noinline
void mas_wr_spanning_store(struct ma_wr_state
*wr_mas
)
3721 struct maple_subtree_state mast
;
3722 struct maple_big_node b_node
;
3723 struct ma_state
*mas
;
3724 unsigned char height
;
3726 /* Left and Right side of spanning store */
3727 MA_STATE(l_mas
, NULL
, 0, 0);
3728 MA_STATE(r_mas
, NULL
, 0, 0);
3729 MA_WR_STATE(r_wr_mas
, &r_mas
, wr_mas
->entry
);
3730 MA_WR_STATE(l_wr_mas
, &l_mas
, wr_mas
->entry
);
3733 * A store operation that spans multiple nodes is called a spanning
3734 * store and is handled early in the store call stack by the function
3735 * mas_is_span_wr(). When a spanning store is identified, the maple
3736 * state is duplicated. The first maple state walks the left tree path
3737 * to ``index``, the duplicate walks the right tree path to ``last``.
3738 * The data in the two nodes are combined into a single node, two nodes,
3739 * or possibly three nodes (see the 3-way split above). A ``NULL``
3740 * written to the last entry of a node is considered a spanning store as
3741 * a rebalance is required for the operation to complete and an overflow
3742 * of data may happen.
3745 trace_ma_op(__func__
, mas
);
3747 if (unlikely(!mas
->index
&& mas
->last
== ULONG_MAX
))
3748 return mas_new_root(mas
, wr_mas
->entry
);
3750 * Node rebalancing may occur due to this store, so there may be three new
3751 * entries per level plus a new root.
3753 height
= mas_mt_height(mas
);
3756 * Set up right side. Need to get to the next offset after the spanning
3757 * store to ensure it's not NULL and to combine both the next node and
3758 * the node with the start together.
3761 /* Avoid overflow, walk to next slot in the tree. */
3765 r_mas
.index
= r_mas
.last
;
3766 mas_wr_walk_index(&r_wr_mas
);
3767 r_mas
.last
= r_mas
.index
= mas
->last
;
3769 /* Set up left side. */
3771 mas_wr_walk_index(&l_wr_mas
);
3773 if (!wr_mas
->entry
) {
3774 mas_extend_spanning_null(&l_wr_mas
, &r_wr_mas
);
3775 mas
->offset
= l_mas
.offset
;
3776 mas
->index
= l_mas
.index
;
3777 mas
->last
= l_mas
.last
= r_mas
.last
;
3780 /* expanding NULLs may make this cover the entire range */
3781 if (!l_mas
.index
&& r_mas
.last
== ULONG_MAX
) {
3782 mas_set_range(mas
, 0, ULONG_MAX
);
3783 return mas_new_root(mas
, wr_mas
->entry
);
3786 memset(&b_node
, 0, sizeof(struct maple_big_node
));
3787 /* Copy l_mas and store the value in b_node. */
3788 mas_store_b_node(&l_wr_mas
, &b_node
, l_mas
.end
);
3789 /* Copy r_mas into b_node if there is anything to copy. */
3790 if (r_mas
.max
> r_mas
.last
)
3791 mas_mab_cp(&r_mas
, r_mas
.offset
, r_mas
.end
,
3792 &b_node
, b_node
.b_end
+ 1);
3796 /* Stop spanning searches by searching for just index. */
3797 l_mas
.index
= l_mas
.last
= mas
->index
;
3800 mast
.orig_l
= &l_mas
;
3801 mast
.orig_r
= &r_mas
;
3802 /* Combine l_mas and r_mas and split them up evenly again. */
3803 return mas_spanning_rebalance(mas
, &mast
, height
+ 1);
3807 * mas_wr_node_store() - Attempt to store the value in a node
3808 * @wr_mas: The maple write state
3810 * Attempts to reuse the node, but may allocate.
3812 static inline void mas_wr_node_store(struct ma_wr_state
*wr_mas
,
3813 unsigned char new_end
)
3815 struct ma_state
*mas
= wr_mas
->mas
;
3816 void __rcu
**dst_slots
;
3817 unsigned long *dst_pivots
;
3818 unsigned char dst_offset
, offset_end
= wr_mas
->offset_end
;
3819 struct maple_node reuse
, *newnode
;
3820 unsigned char copy_size
, node_pivots
= mt_pivots
[wr_mas
->type
];
3821 bool in_rcu
= mt_in_rcu(mas
->tree
);
3823 if (mas
->last
== wr_mas
->end_piv
)
3824 offset_end
++; /* don't copy this offset */
3825 else if (unlikely(wr_mas
->r_max
== ULONG_MAX
))
3826 mas_bulk_rebalance(mas
, mas
->end
, wr_mas
->type
);
3830 newnode
= mas_pop_node(mas
);
3832 memset(&reuse
, 0, sizeof(struct maple_node
));
3836 newnode
->parent
= mas_mn(mas
)->parent
;
3837 dst_pivots
= ma_pivots(newnode
, wr_mas
->type
);
3838 dst_slots
= ma_slots(newnode
, wr_mas
->type
);
3839 /* Copy from start to insert point */
3840 memcpy(dst_pivots
, wr_mas
->pivots
, sizeof(unsigned long) * mas
->offset
);
3841 memcpy(dst_slots
, wr_mas
->slots
, sizeof(void *) * mas
->offset
);
3843 /* Handle insert of new range starting after old range */
3844 if (wr_mas
->r_min
< mas
->index
) {
3845 rcu_assign_pointer(dst_slots
[mas
->offset
], wr_mas
->content
);
3846 dst_pivots
[mas
->offset
++] = mas
->index
- 1;
3849 /* Store the new entry and range end. */
3850 if (mas
->offset
< node_pivots
)
3851 dst_pivots
[mas
->offset
] = mas
->last
;
3852 rcu_assign_pointer(dst_slots
[mas
->offset
], wr_mas
->entry
);
3855 * this range wrote to the end of the node or it overwrote the rest of
3858 if (offset_end
> mas
->end
)
3861 dst_offset
= mas
->offset
+ 1;
3862 /* Copy to the end of node if necessary. */
3863 copy_size
= mas
->end
- offset_end
+ 1;
3864 memcpy(dst_slots
+ dst_offset
, wr_mas
->slots
+ offset_end
,
3865 sizeof(void *) * copy_size
);
3866 memcpy(dst_pivots
+ dst_offset
, wr_mas
->pivots
+ offset_end
,
3867 sizeof(unsigned long) * (copy_size
- 1));
3869 if (new_end
< node_pivots
)
3870 dst_pivots
[new_end
] = mas
->max
;
3873 mas_leaf_set_meta(newnode
, maple_leaf_64
, new_end
);
3875 struct maple_enode
*old_enode
= mas
->node
;
3877 mas
->node
= mt_mk_node(newnode
, wr_mas
->type
);
3878 mas_replace_node(mas
, old_enode
);
3880 memcpy(wr_mas
->node
, newnode
, sizeof(struct maple_node
));
3882 trace_ma_write(__func__
, mas
, 0, wr_mas
->entry
);
3883 mas_update_gap(mas
);
3889 * mas_wr_slot_store: Attempt to store a value in a slot.
3890 * @wr_mas: the maple write state
3892 static inline void mas_wr_slot_store(struct ma_wr_state
*wr_mas
)
3894 struct ma_state
*mas
= wr_mas
->mas
;
3895 unsigned char offset
= mas
->offset
;
3896 void __rcu
**slots
= wr_mas
->slots
;
3899 gap
|= !mt_slot_locked(mas
->tree
, slots
, offset
);
3900 gap
|= !mt_slot_locked(mas
->tree
, slots
, offset
+ 1);
3902 if (wr_mas
->offset_end
- offset
== 1) {
3903 if (mas
->index
== wr_mas
->r_min
) {
3904 /* Overwriting the range and a part of the next one */
3905 rcu_assign_pointer(slots
[offset
], wr_mas
->entry
);
3906 wr_mas
->pivots
[offset
] = mas
->last
;
3908 /* Overwriting a part of the range and the next one */
3909 rcu_assign_pointer(slots
[offset
+ 1], wr_mas
->entry
);
3910 wr_mas
->pivots
[offset
] = mas
->index
- 1;
3911 mas
->offset
++; /* Keep mas accurate. */
3914 WARN_ON_ONCE(mt_in_rcu(mas
->tree
));
3916 * Expand the range, only partially overwriting the previous and
3919 gap
|= !mt_slot_locked(mas
->tree
, slots
, offset
+ 2);
3920 rcu_assign_pointer(slots
[offset
+ 1], wr_mas
->entry
);
3921 wr_mas
->pivots
[offset
] = mas
->index
- 1;
3922 wr_mas
->pivots
[offset
+ 1] = mas
->last
;
3923 mas
->offset
++; /* Keep mas accurate. */
3926 trace_ma_write(__func__
, mas
, 0, wr_mas
->entry
);
3928 * Only update gap when the new entry is empty or there is an empty
3929 * entry in the original two ranges.
3931 if (!wr_mas
->entry
|| gap
)
3932 mas_update_gap(mas
);
3937 static inline void mas_wr_extend_null(struct ma_wr_state
*wr_mas
)
3939 struct ma_state
*mas
= wr_mas
->mas
;
3941 if (!wr_mas
->slots
[wr_mas
->offset_end
]) {
3942 /* If this one is null, the next and prev are not */
3943 mas
->last
= wr_mas
->end_piv
;
3945 /* Check next slot(s) if we are overwriting the end */
3946 if ((mas
->last
== wr_mas
->end_piv
) &&
3947 (mas
->end
!= wr_mas
->offset_end
) &&
3948 !wr_mas
->slots
[wr_mas
->offset_end
+ 1]) {
3949 wr_mas
->offset_end
++;
3950 if (wr_mas
->offset_end
== mas
->end
)
3951 mas
->last
= mas
->max
;
3953 mas
->last
= wr_mas
->pivots
[wr_mas
->offset_end
];
3954 wr_mas
->end_piv
= mas
->last
;
3958 if (!wr_mas
->content
) {
3959 /* If this one is null, the next and prev are not */
3960 mas
->index
= wr_mas
->r_min
;
3962 /* Check prev slot if we are overwriting the start */
3963 if (mas
->index
== wr_mas
->r_min
&& mas
->offset
&&
3964 !wr_mas
->slots
[mas
->offset
- 1]) {
3966 wr_mas
->r_min
= mas
->index
=
3967 mas_safe_min(mas
, wr_mas
->pivots
, mas
->offset
);
3968 wr_mas
->r_max
= wr_mas
->pivots
[mas
->offset
];
3973 static inline void mas_wr_end_piv(struct ma_wr_state
*wr_mas
)
3975 while ((wr_mas
->offset_end
< wr_mas
->mas
->end
) &&
3976 (wr_mas
->mas
->last
> wr_mas
->pivots
[wr_mas
->offset_end
]))
3977 wr_mas
->offset_end
++;
3979 if (wr_mas
->offset_end
< wr_mas
->mas
->end
)
3980 wr_mas
->end_piv
= wr_mas
->pivots
[wr_mas
->offset_end
];
3982 wr_mas
->end_piv
= wr_mas
->mas
->max
;
3985 static inline unsigned char mas_wr_new_end(struct ma_wr_state
*wr_mas
)
3987 struct ma_state
*mas
= wr_mas
->mas
;
3988 unsigned char new_end
= mas
->end
+ 2;
3990 new_end
-= wr_mas
->offset_end
- mas
->offset
;
3991 if (wr_mas
->r_min
== mas
->index
)
3994 if (wr_mas
->end_piv
== mas
->last
)
4001 * mas_wr_append: Attempt to append
4002 * @wr_mas: the maple write state
4003 * @new_end: The end of the node after the modification
4005 * This is currently unsafe in rcu mode since the end of the node may be cached
4006 * by readers while the node contents may be updated which could result in
4007 * inaccurate information.
4009 static inline void mas_wr_append(struct ma_wr_state
*wr_mas
,
4010 unsigned char new_end
)
4012 struct ma_state
*mas
= wr_mas
->mas
;
4014 unsigned char end
= mas
->end
;
4016 if (new_end
< mt_pivots
[wr_mas
->type
]) {
4017 wr_mas
->pivots
[new_end
] = wr_mas
->pivots
[end
];
4018 ma_set_meta(wr_mas
->node
, wr_mas
->type
, 0, new_end
);
4021 slots
= wr_mas
->slots
;
4022 if (new_end
== end
+ 1) {
4023 if (mas
->last
== wr_mas
->r_max
) {
4024 /* Append to end of range */
4025 rcu_assign_pointer(slots
[new_end
], wr_mas
->entry
);
4026 wr_mas
->pivots
[end
] = mas
->index
- 1;
4027 mas
->offset
= new_end
;
4029 /* Append to start of range */
4030 rcu_assign_pointer(slots
[new_end
], wr_mas
->content
);
4031 wr_mas
->pivots
[end
] = mas
->last
;
4032 rcu_assign_pointer(slots
[end
], wr_mas
->entry
);
4035 /* Append to the range without touching any boundaries. */
4036 rcu_assign_pointer(slots
[new_end
], wr_mas
->content
);
4037 wr_mas
->pivots
[end
+ 1] = mas
->last
;
4038 rcu_assign_pointer(slots
[end
+ 1], wr_mas
->entry
);
4039 wr_mas
->pivots
[end
] = mas
->index
- 1;
4040 mas
->offset
= end
+ 1;
4043 if (!wr_mas
->content
|| !wr_mas
->entry
)
4044 mas_update_gap(mas
);
4047 trace_ma_write(__func__
, mas
, new_end
, wr_mas
->entry
);
4052 * mas_wr_bnode() - Slow path for a modification.
4053 * @wr_mas: The write maple state
4055 * This is where split, rebalance end up.
4057 static void mas_wr_bnode(struct ma_wr_state
*wr_mas
)
4059 struct maple_big_node b_node
;
4061 trace_ma_write(__func__
, wr_mas
->mas
, 0, wr_mas
->entry
);
4062 memset(&b_node
, 0, sizeof(struct maple_big_node
));
4063 mas_store_b_node(wr_mas
, &b_node
, wr_mas
->offset_end
);
4064 mas_commit_b_node(wr_mas
, &b_node
);
4068 * mas_wr_store_entry() - Internal call to store a value
4069 * @wr_mas: The maple write state
4071 static inline void mas_wr_store_entry(struct ma_wr_state
*wr_mas
)
4073 struct ma_state
*mas
= wr_mas
->mas
;
4074 unsigned char new_end
= mas_wr_new_end(wr_mas
);
4076 switch (mas
->store_type
) {
4078 MT_BUG_ON(mas
->tree
, 1);
4081 mas_new_root(mas
, wr_mas
->entry
);
4084 mas_store_root(mas
, wr_mas
->entry
);
4087 rcu_assign_pointer(wr_mas
->slots
[mas
->offset
], wr_mas
->entry
);
4088 if (!!wr_mas
->entry
^ !!wr_mas
->content
)
4089 mas_update_gap(mas
);
4092 mas_wr_append(wr_mas
, new_end
);
4095 mas_wr_slot_store(wr_mas
);
4098 mas_wr_node_store(wr_mas
, new_end
);
4100 case wr_spanning_store
:
4101 mas_wr_spanning_store(wr_mas
);
4103 case wr_split_store
:
4105 mas_wr_bnode(wr_mas
);
4112 static inline void mas_wr_prealloc_setup(struct ma_wr_state
*wr_mas
)
4114 struct ma_state
*mas
= wr_mas
->mas
;
4116 if (!mas_is_active(mas
)) {
4117 if (mas_is_start(mas
))
4120 if (unlikely(mas_is_paused(mas
)))
4123 if (unlikely(mas_is_none(mas
)))
4126 if (unlikely(mas_is_overflow(mas
)))
4129 if (unlikely(mas_is_underflow(mas
)))
4134 * A less strict version of mas_is_span_wr() where we allow spanning
4135 * writes within this node. This is to stop partial walks in
4136 * mas_prealloc() from being reset.
4138 if (mas
->last
> mas
->max
)
4144 if (mte_is_leaf(mas
->node
) && mas
->last
== mas
->max
)
4152 wr_mas
->content
= mas_start(mas
);
4156 * mas_prealloc_calc() - Calculate number of nodes needed for a
4157 * given store oepration
4158 * @mas: The maple state
4159 * @entry: The entry to store into the tree
4161 * Return: Number of nodes required for preallocation.
4163 static inline int mas_prealloc_calc(struct ma_state
*mas
, void *entry
)
4165 int ret
= mas_mt_height(mas
) * 3 + 1;
4167 switch (mas
->store_type
) {
4175 if (likely((mas
->last
!= 0) || (mas
->index
!= 0)))
4177 else if (((unsigned long) (entry
) & 3) == 2)
4182 case wr_spanning_store
:
4183 ret
= mas_mt_height(mas
) * 3 + 1;
4185 case wr_split_store
:
4186 ret
= mas_mt_height(mas
) * 2 + 1;
4189 ret
= mas_mt_height(mas
) * 2 - 1;
4192 ret
= mt_in_rcu(mas
->tree
) ? 1 : 0;
4204 * mas_wr_store_type() - Determine the store type for a given
4206 * @wr_mas: The maple write state
4208 * Return: the type of store needed for the operation
4210 static inline enum store_type
mas_wr_store_type(struct ma_wr_state
*wr_mas
)
4212 struct ma_state
*mas
= wr_mas
->mas
;
4213 unsigned char new_end
;
4215 if (unlikely(mas_is_none(mas
) || mas_is_ptr(mas
)))
4216 return wr_store_root
;
4218 if (unlikely(!mas_wr_walk(wr_mas
)))
4219 return wr_spanning_store
;
4221 /* At this point, we are at the leaf node that needs to be altered. */
4222 mas_wr_end_piv(wr_mas
);
4224 mas_wr_extend_null(wr_mas
);
4226 if ((wr_mas
->r_min
== mas
->index
) && (wr_mas
->r_max
== mas
->last
))
4227 return wr_exact_fit
;
4229 if (unlikely(!mas
->index
&& mas
->last
== ULONG_MAX
))
4232 new_end
= mas_wr_new_end(wr_mas
);
4233 /* Potential spanning rebalance collapsing a node */
4234 if (new_end
< mt_min_slots
[wr_mas
->type
]) {
4235 if (!mte_is_root(mas
->node
) && !(mas
->mas_flags
& MA_STATE_BULK
))
4236 return wr_rebalance
;
4237 return wr_node_store
;
4240 if (new_end
>= mt_slots
[wr_mas
->type
])
4241 return wr_split_store
;
4243 if (!mt_in_rcu(mas
->tree
) && (mas
->offset
== mas
->end
))
4246 if ((new_end
== mas
->end
) && (!mt_in_rcu(mas
->tree
) ||
4247 (wr_mas
->offset_end
- mas
->offset
== 1)))
4248 return wr_slot_store
;
4250 return wr_node_store
;
4254 * mas_wr_preallocate() - Preallocate enough nodes for a store operation
4255 * @wr_mas: The maple write state
4256 * @entry: The entry that will be stored
4259 static inline void mas_wr_preallocate(struct ma_wr_state
*wr_mas
, void *entry
)
4261 struct ma_state
*mas
= wr_mas
->mas
;
4264 mas_wr_prealloc_setup(wr_mas
);
4265 mas
->store_type
= mas_wr_store_type(wr_mas
);
4266 request
= mas_prealloc_calc(mas
, entry
);
4270 mas_node_count(mas
, request
);
4274 * mas_insert() - Internal call to insert a value
4275 * @mas: The maple state
4276 * @entry: The entry to store
4278 * Return: %NULL or the contents that already exists at the requested index
4279 * otherwise. The maple state needs to be checked for error conditions.
4281 static inline void *mas_insert(struct ma_state
*mas
, void *entry
)
4283 MA_WR_STATE(wr_mas
, mas
, entry
);
4286 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4287 * tree. If the insert fits exactly into an existing gap with a value
4288 * of NULL, then the slot only needs to be written with the new value.
4289 * If the range being inserted is adjacent to another range, then only a
4290 * single pivot needs to be inserted (as well as writing the entry). If
4291 * the new range is within a gap but does not touch any other ranges,
4292 * then two pivots need to be inserted: the start - 1, and the end. As
4293 * usual, the entry must be written. Most operations require a new node
4294 * to be allocated and replace an existing node to ensure RCU safety,
4295 * when in RCU mode. The exception to requiring a newly allocated node
4296 * is when inserting at the end of a node (appending). When done
4297 * carefully, appending can reuse the node in place.
4299 wr_mas
.content
= mas_start(mas
);
4303 mas_wr_preallocate(&wr_mas
, entry
);
4304 if (mas_is_err(mas
))
4307 /* spanning writes always overwrite something */
4308 if (mas
->store_type
== wr_spanning_store
)
4311 /* At this point, we are at the leaf node that needs to be altered. */
4312 if (mas
->store_type
!= wr_new_root
&& mas
->store_type
!= wr_store_root
) {
4313 wr_mas
.offset_end
= mas
->offset
;
4314 wr_mas
.end_piv
= wr_mas
.r_max
;
4316 if (wr_mas
.content
|| (mas
->last
> wr_mas
.r_max
))
4320 mas_wr_store_entry(&wr_mas
);
4321 return wr_mas
.content
;
4324 mas_set_err(mas
, -EEXIST
);
4325 return wr_mas
.content
;
4330 * mas_alloc_cyclic() - Internal call to find somewhere to store an entry
4331 * @mas: The maple state.
4332 * @startp: Pointer to ID.
4333 * @range_lo: Lower bound of range to search.
4334 * @range_hi: Upper bound of range to search.
4335 * @entry: The entry to store.
4336 * @next: Pointer to next ID to allocate.
4337 * @gfp: The GFP_FLAGS to use for allocations.
4339 * Return: 0 if the allocation succeeded without wrapping, 1 if the
4340 * allocation succeeded after wrapping, or -EBUSY if there are no
4343 int mas_alloc_cyclic(struct ma_state
*mas
, unsigned long *startp
,
4344 void *entry
, unsigned long range_lo
, unsigned long range_hi
,
4345 unsigned long *next
, gfp_t gfp
)
4347 unsigned long min
= range_lo
;
4350 range_lo
= max(min
, *next
);
4351 ret
= mas_empty_area(mas
, range_lo
, range_hi
, 1);
4352 if ((mas
->tree
->ma_flags
& MT_FLAGS_ALLOC_WRAPPED
) && ret
== 0) {
4353 mas
->tree
->ma_flags
&= ~MT_FLAGS_ALLOC_WRAPPED
;
4356 if (ret
< 0 && range_lo
> min
) {
4357 ret
= mas_empty_area(mas
, min
, range_hi
, 1);
4365 mas_insert(mas
, entry
);
4366 } while (mas_nomem(mas
, gfp
));
4367 if (mas_is_err(mas
))
4368 return xa_err(mas
->node
);
4370 *startp
= mas
->index
;
4371 *next
= *startp
+ 1;
4373 mas
->tree
->ma_flags
|= MT_FLAGS_ALLOC_WRAPPED
;
4378 EXPORT_SYMBOL(mas_alloc_cyclic
);
4380 static __always_inline
void mas_rewalk(struct ma_state
*mas
, unsigned long index
)
4383 mas_set(mas
, index
);
4384 mas_state_walk(mas
);
4385 if (mas_is_start(mas
))
4389 static __always_inline
bool mas_rewalk_if_dead(struct ma_state
*mas
,
4390 struct maple_node
*node
, const unsigned long index
)
4392 if (unlikely(ma_dead_node(node
))) {
4393 mas_rewalk(mas
, index
);
4400 * mas_prev_node() - Find the prev non-null entry at the same level in the
4401 * tree. The prev value will be mas->node[mas->offset] or the status will be
4403 * @mas: The maple state
4404 * @min: The lower limit to search
4406 * The prev node value will be mas->node[mas->offset] or the status will be
4408 * Return: 1 if the node is dead, 0 otherwise.
4410 static int mas_prev_node(struct ma_state
*mas
, unsigned long min
)
4415 struct maple_node
*node
;
4416 unsigned long *pivots
;
4429 if (ma_is_root(node
))
4433 if (unlikely(mas_ascend(mas
)))
4435 offset
= mas
->offset
;
4441 mt
= mte_node_type(mas
->node
);
4444 slots
= ma_slots(node
, mt
);
4445 mas
->node
= mas_slot(mas
, slots
, offset
);
4446 if (unlikely(ma_dead_node(node
)))
4449 mt
= mte_node_type(mas
->node
);
4451 pivots
= ma_pivots(node
, mt
);
4452 offset
= ma_data_end(node
, mt
, pivots
, max
);
4453 if (unlikely(ma_dead_node(node
)))
4457 slots
= ma_slots(node
, mt
);
4458 mas
->node
= mas_slot(mas
, slots
, offset
);
4459 pivots
= ma_pivots(node
, mt
);
4460 if (unlikely(ma_dead_node(node
)))
4464 mas
->min
= pivots
[offset
- 1] + 1;
4466 mas
->offset
= mas_data_end(mas
);
4467 if (unlikely(mte_dead_node(mas
->node
)))
4470 mas
->end
= mas
->offset
;
4474 if (unlikely(ma_dead_node(node
)))
4477 mas
->status
= ma_underflow
;
4482 * mas_prev_slot() - Get the entry in the previous slot
4484 * @mas: The maple state
4485 * @min: The minimum starting range
4486 * @empty: Can be empty
4488 * Return: The entry in the previous slot which is possibly NULL
4490 static void *mas_prev_slot(struct ma_state
*mas
, unsigned long min
, bool empty
)
4494 unsigned long pivot
;
4495 enum maple_type type
;
4496 unsigned long *pivots
;
4497 struct maple_node
*node
;
4498 unsigned long save_point
= mas
->index
;
4502 type
= mte_node_type(mas
->node
);
4503 pivots
= ma_pivots(node
, type
);
4504 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4507 if (mas
->min
<= min
) {
4508 pivot
= mas_safe_min(mas
, pivots
, mas
->offset
);
4510 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4518 if (likely(mas
->offset
)) {
4520 mas
->last
= mas
->index
- 1;
4521 mas
->index
= mas_safe_min(mas
, pivots
, mas
->offset
);
4523 if (mas
->index
<= min
)
4526 if (mas_prev_node(mas
, min
)) {
4527 mas_rewalk(mas
, save_point
);
4531 if (WARN_ON_ONCE(mas_is_underflow(mas
)))
4534 mas
->last
= mas
->max
;
4536 type
= mte_node_type(mas
->node
);
4537 pivots
= ma_pivots(node
, type
);
4538 mas
->index
= pivots
[mas
->offset
- 1] + 1;
4541 slots
= ma_slots(node
, type
);
4542 entry
= mas_slot(mas
, slots
, mas
->offset
);
4543 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4551 if (mas
->index
<= min
) {
4552 mas
->status
= ma_underflow
;
4562 mas
->status
= ma_underflow
;
4567 * mas_next_node() - Get the next node at the same level in the tree.
4568 * @mas: The maple state
4569 * @node: The maple node
4570 * @max: The maximum pivot value to check.
4572 * The next value will be mas->node[mas->offset] or the status will have
4574 * Return: 1 on dead node, 0 otherwise.
4576 static int mas_next_node(struct ma_state
*mas
, struct maple_node
*node
,
4580 unsigned long *pivots
;
4581 struct maple_enode
*enode
;
4582 struct maple_node
*tmp
;
4584 unsigned char node_end
;
4588 if (mas
->max
>= max
)
4594 if (ma_is_root(node
))
4598 if (unlikely(mas_ascend(mas
)))
4603 mt
= mte_node_type(mas
->node
);
4604 pivots
= ma_pivots(node
, mt
);
4605 node_end
= ma_data_end(node
, mt
, pivots
, mas
->max
);
4606 if (unlikely(ma_dead_node(node
)))
4609 } while (unlikely(mas
->offset
== node_end
));
4611 slots
= ma_slots(node
, mt
);
4613 enode
= mas_slot(mas
, slots
, mas
->offset
);
4614 if (unlikely(ma_dead_node(node
)))
4620 while (unlikely(level
> 1)) {
4624 mt
= mte_node_type(mas
->node
);
4625 slots
= ma_slots(node
, mt
);
4626 enode
= mas_slot(mas
, slots
, 0);
4627 if (unlikely(ma_dead_node(node
)))
4632 pivots
= ma_pivots(node
, mt
);
4634 mas
->max
= mas_safe_pivot(mas
, pivots
, mas
->offset
, mt
);
4635 tmp
= mte_to_node(enode
);
4636 mt
= mte_node_type(enode
);
4637 pivots
= ma_pivots(tmp
, mt
);
4638 mas
->end
= ma_data_end(tmp
, mt
, pivots
, mas
->max
);
4639 if (unlikely(ma_dead_node(node
)))
4647 if (unlikely(ma_dead_node(node
)))
4650 mas
->status
= ma_overflow
;
4655 * mas_next_slot() - Get the entry in the next slot
4657 * @mas: The maple state
4658 * @max: The maximum starting range
4659 * @empty: Can be empty
4661 * Return: The entry in the next slot which is possibly NULL
4663 static void *mas_next_slot(struct ma_state
*mas
, unsigned long max
, bool empty
)
4666 unsigned long *pivots
;
4667 unsigned long pivot
;
4668 enum maple_type type
;
4669 struct maple_node
*node
;
4670 unsigned long save_point
= mas
->last
;
4675 type
= mte_node_type(mas
->node
);
4676 pivots
= ma_pivots(node
, type
);
4677 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4680 if (mas
->max
>= max
) {
4681 if (likely(mas
->offset
< mas
->end
))
4682 pivot
= pivots
[mas
->offset
];
4686 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4689 if (pivot
>= max
) { /* Was at the limit, next will extend beyond */
4690 mas
->status
= ma_overflow
;
4695 if (likely(mas
->offset
< mas
->end
)) {
4696 mas
->index
= pivots
[mas
->offset
] + 1;
4699 if (likely(mas
->offset
< mas
->end
))
4700 mas
->last
= pivots
[mas
->offset
];
4702 mas
->last
= mas
->max
;
4704 if (mas
->last
>= max
) {
4705 mas
->status
= ma_overflow
;
4709 if (mas_next_node(mas
, node
, max
)) {
4710 mas_rewalk(mas
, save_point
);
4714 if (WARN_ON_ONCE(mas_is_overflow(mas
)))
4718 mas
->index
= mas
->min
;
4720 type
= mte_node_type(mas
->node
);
4721 pivots
= ma_pivots(node
, type
);
4722 mas
->last
= pivots
[0];
4725 slots
= ma_slots(node
, type
);
4726 entry
= mt_slot(mas
->tree
, slots
, mas
->offset
);
4727 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4735 if (mas
->last
>= max
) {
4736 mas
->status
= ma_overflow
;
4740 mas
->index
= mas
->last
+ 1;
4748 * mas_next_entry() - Internal function to get the next entry.
4749 * @mas: The maple state
4750 * @limit: The maximum range start.
4752 * Set the @mas->node to the next entry and the range_start to
4753 * the beginning value for the entry. Does not check beyond @limit.
4754 * Sets @mas->index and @mas->last to the range, Does not update @mas->index and
4755 * @mas->last on overflow.
4756 * Restarts on dead nodes.
4758 * Return: the next entry or %NULL.
4760 static inline void *mas_next_entry(struct ma_state
*mas
, unsigned long limit
)
4762 if (mas
->last
>= limit
) {
4763 mas
->status
= ma_overflow
;
4767 return mas_next_slot(mas
, limit
, false);
4771 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
4772 * highest gap address of a given size in a given node and descend.
4773 * @mas: The maple state
4774 * @size: The needed size.
4776 * Return: True if found in a leaf, false otherwise.
4779 static bool mas_rev_awalk(struct ma_state
*mas
, unsigned long size
,
4780 unsigned long *gap_min
, unsigned long *gap_max
)
4782 enum maple_type type
= mte_node_type(mas
->node
);
4783 struct maple_node
*node
= mas_mn(mas
);
4784 unsigned long *pivots
, *gaps
;
4786 unsigned long gap
= 0;
4787 unsigned long max
, min
;
4788 unsigned char offset
;
4790 if (unlikely(mas_is_err(mas
)))
4793 if (ma_is_dense(type
)) {
4795 mas
->offset
= (unsigned char)(mas
->index
- mas
->min
);
4799 pivots
= ma_pivots(node
, type
);
4800 slots
= ma_slots(node
, type
);
4801 gaps
= ma_gaps(node
, type
);
4802 offset
= mas
->offset
;
4803 min
= mas_safe_min(mas
, pivots
, offset
);
4804 /* Skip out of bounds. */
4805 while (mas
->last
< min
)
4806 min
= mas_safe_min(mas
, pivots
, --offset
);
4808 max
= mas_safe_pivot(mas
, pivots
, offset
, type
);
4809 while (mas
->index
<= max
) {
4813 else if (!mas_slot(mas
, slots
, offset
))
4814 gap
= max
- min
+ 1;
4817 if ((size
<= gap
) && (size
<= mas
->last
- min
+ 1))
4821 /* Skip the next slot, it cannot be a gap. */
4826 max
= pivots
[offset
];
4827 min
= mas_safe_min(mas
, pivots
, offset
);
4837 min
= mas_safe_min(mas
, pivots
, offset
);
4840 if (unlikely((mas
->index
> max
) || (size
- 1 > max
- mas
->index
)))
4843 if (unlikely(ma_is_leaf(type
))) {
4844 mas
->offset
= offset
;
4846 *gap_max
= min
+ gap
- 1;
4850 /* descend, only happens under lock. */
4851 mas
->node
= mas_slot(mas
, slots
, offset
);
4854 mas
->offset
= mas_data_end(mas
);
4858 if (!mte_is_root(mas
->node
))
4862 mas_set_err(mas
, -EBUSY
);
4866 static inline bool mas_anode_descend(struct ma_state
*mas
, unsigned long size
)
4868 enum maple_type type
= mte_node_type(mas
->node
);
4869 unsigned long pivot
, min
, gap
= 0;
4870 unsigned char offset
, data_end
;
4871 unsigned long *gaps
, *pivots
;
4873 struct maple_node
*node
;
4876 if (ma_is_dense(type
)) {
4877 mas
->offset
= (unsigned char)(mas
->index
- mas
->min
);
4882 pivots
= ma_pivots(node
, type
);
4883 slots
= ma_slots(node
, type
);
4884 gaps
= ma_gaps(node
, type
);
4885 offset
= mas
->offset
;
4886 min
= mas_safe_min(mas
, pivots
, offset
);
4887 data_end
= ma_data_end(node
, type
, pivots
, mas
->max
);
4888 for (; offset
<= data_end
; offset
++) {
4889 pivot
= mas_safe_pivot(mas
, pivots
, offset
, type
);
4891 /* Not within lower bounds */
4892 if (mas
->index
> pivot
)
4897 else if (!mas_slot(mas
, slots
, offset
))
4898 gap
= min(pivot
, mas
->last
) - max(mas
->index
, min
) + 1;
4903 if (ma_is_leaf(type
)) {
4907 if (mas
->index
<= pivot
) {
4908 mas
->node
= mas_slot(mas
, slots
, offset
);
4917 if (mas
->last
<= pivot
) {
4918 mas_set_err(mas
, -EBUSY
);
4923 if (mte_is_root(mas
->node
))
4926 mas
->offset
= offset
;
4931 * mas_walk() - Search for @mas->index in the tree.
4932 * @mas: The maple state.
4934 * mas->index and mas->last will be set to the range if there is a value. If
4935 * mas->status is ma_none, reset to ma_start
4937 * Return: the entry at the location or %NULL.
4939 void *mas_walk(struct ma_state
*mas
)
4943 if (!mas_is_active(mas
) || !mas_is_start(mas
))
4944 mas
->status
= ma_start
;
4946 entry
= mas_state_walk(mas
);
4947 if (mas_is_start(mas
)) {
4949 } else if (mas_is_none(mas
)) {
4951 mas
->last
= ULONG_MAX
;
4952 } else if (mas_is_ptr(mas
)) {
4959 mas
->last
= ULONG_MAX
;
4960 mas
->status
= ma_none
;
4966 EXPORT_SYMBOL_GPL(mas_walk
);
4968 static inline bool mas_rewind_node(struct ma_state
*mas
)
4973 if (mte_is_root(mas
->node
)) {
4983 mas
->offset
= --slot
;
4988 * mas_skip_node() - Internal function. Skip over a node.
4989 * @mas: The maple state.
4991 * Return: true if there is another node, false otherwise.
4993 static inline bool mas_skip_node(struct ma_state
*mas
)
4995 if (mas_is_err(mas
))
4999 if (mte_is_root(mas
->node
)) {
5000 if (mas
->offset
>= mas_data_end(mas
)) {
5001 mas_set_err(mas
, -EBUSY
);
5007 } while (mas
->offset
>= mas_data_end(mas
));
5014 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
5016 * @mas: The maple state
5017 * @size: The size of the gap required
5019 * Search between @mas->index and @mas->last for a gap of @size.
5021 static inline void mas_awalk(struct ma_state
*mas
, unsigned long size
)
5023 struct maple_enode
*last
= NULL
;
5026 * There are 4 options:
5027 * go to child (descend)
5028 * go back to parent (ascend)
5029 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
5030 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
5032 while (!mas_is_err(mas
) && !mas_anode_descend(mas
, size
)) {
5033 if (last
== mas
->node
)
5041 * mas_sparse_area() - Internal function. Return upper or lower limit when
5042 * searching for a gap in an empty tree.
5043 * @mas: The maple state
5044 * @min: the minimum range
5045 * @max: The maximum range
5046 * @size: The size of the gap
5047 * @fwd: Searching forward or back
5049 static inline int mas_sparse_area(struct ma_state
*mas
, unsigned long min
,
5050 unsigned long max
, unsigned long size
, bool fwd
)
5052 if (!unlikely(mas_is_none(mas
)) && min
== 0) {
5055 * At this time, min is increased, we need to recheck whether
5056 * the size is satisfied.
5058 if (min
> max
|| max
- min
+ 1 < size
)
5065 mas
->last
= min
+ size
- 1;
5068 mas
->index
= max
- size
+ 1;
5074 * mas_empty_area() - Get the lowest address within the range that is
5075 * sufficient for the size requested.
5076 * @mas: The maple state
5077 * @min: The lowest value of the range
5078 * @max: The highest value of the range
5079 * @size: The size needed
5081 int mas_empty_area(struct ma_state
*mas
, unsigned long min
,
5082 unsigned long max
, unsigned long size
)
5084 unsigned char offset
;
5085 unsigned long *pivots
;
5087 struct maple_node
*node
;
5092 if (size
== 0 || max
- min
< size
- 1)
5095 if (mas_is_start(mas
))
5097 else if (mas
->offset
>= 2)
5099 else if (!mas_skip_node(mas
))
5103 if (mas_is_none(mas
) || mas_is_ptr(mas
))
5104 return mas_sparse_area(mas
, min
, max
, size
, true);
5106 /* The start of the window can only be within these values */
5109 mas_awalk(mas
, size
);
5111 if (unlikely(mas_is_err(mas
)))
5112 return xa_err(mas
->node
);
5114 offset
= mas
->offset
;
5115 if (unlikely(offset
== MAPLE_NODE_SLOTS
))
5119 mt
= mte_node_type(mas
->node
);
5120 pivots
= ma_pivots(node
, mt
);
5121 min
= mas_safe_min(mas
, pivots
, offset
);
5122 if (mas
->index
< min
)
5124 mas
->last
= mas
->index
+ size
- 1;
5125 mas
->end
= ma_data_end(node
, mt
, pivots
, mas
->max
);
5128 EXPORT_SYMBOL_GPL(mas_empty_area
);
5131 * mas_empty_area_rev() - Get the highest address within the range that is
5132 * sufficient for the size requested.
5133 * @mas: The maple state
5134 * @min: The lowest value of the range
5135 * @max: The highest value of the range
5136 * @size: The size needed
5138 int mas_empty_area_rev(struct ma_state
*mas
, unsigned long min
,
5139 unsigned long max
, unsigned long size
)
5141 struct maple_enode
*last
= mas
->node
;
5146 if (size
== 0 || max
- min
< size
- 1)
5149 if (mas_is_start(mas
))
5151 else if ((mas
->offset
< 2) && (!mas_rewind_node(mas
)))
5154 if (unlikely(mas_is_none(mas
) || mas_is_ptr(mas
)))
5155 return mas_sparse_area(mas
, min
, max
, size
, false);
5156 else if (mas
->offset
>= 2)
5159 mas
->offset
= mas_data_end(mas
);
5162 /* The start of the window can only be within these values. */
5166 while (!mas_rev_awalk(mas
, size
, &min
, &max
)) {
5167 if (last
== mas
->node
) {
5168 if (!mas_rewind_node(mas
))
5175 if (mas_is_err(mas
))
5176 return xa_err(mas
->node
);
5178 if (unlikely(mas
->offset
== MAPLE_NODE_SLOTS
))
5181 /* Trim the upper limit to the max. */
5182 if (max
< mas
->last
)
5185 mas
->index
= mas
->last
- size
+ 1;
5186 mas
->end
= mas_data_end(mas
);
5189 EXPORT_SYMBOL_GPL(mas_empty_area_rev
);
5192 * mte_dead_leaves() - Mark all leaves of a node as dead.
5193 * @enode: the encoded node
5194 * @mt: the maple tree
5195 * @slots: Pointer to the slot array
5197 * Must hold the write lock.
5199 * Return: The number of leaves marked as dead.
5202 unsigned char mte_dead_leaves(struct maple_enode
*enode
, struct maple_tree
*mt
,
5205 struct maple_node
*node
;
5206 enum maple_type type
;
5210 for (offset
= 0; offset
< mt_slot_count(enode
); offset
++) {
5211 entry
= mt_slot(mt
, slots
, offset
);
5212 type
= mte_node_type(entry
);
5213 node
= mte_to_node(entry
);
5214 /* Use both node and type to catch LE & BE metadata */
5218 mte_set_node_dead(entry
);
5220 rcu_assign_pointer(slots
[offset
], node
);
5227 * mte_dead_walk() - Walk down a dead tree to just before the leaves
5228 * @enode: The maple encoded node
5229 * @offset: The starting offset
5231 * Note: This can only be used from the RCU callback context.
5233 static void __rcu
**mte_dead_walk(struct maple_enode
**enode
, unsigned char offset
)
5235 struct maple_node
*node
, *next
;
5236 void __rcu
**slots
= NULL
;
5238 next
= mte_to_node(*enode
);
5240 *enode
= ma_enode_ptr(next
);
5241 node
= mte_to_node(*enode
);
5242 slots
= ma_slots(node
, node
->type
);
5243 next
= rcu_dereference_protected(slots
[offset
],
5244 lock_is_held(&rcu_callback_map
));
5246 } while (!ma_is_leaf(next
->type
));
5252 * mt_free_walk() - Walk & free a tree in the RCU callback context
5253 * @head: The RCU head that's within the node.
5255 * Note: This can only be used from the RCU callback context.
5257 static void mt_free_walk(struct rcu_head
*head
)
5260 struct maple_node
*node
, *start
;
5261 struct maple_enode
*enode
;
5262 unsigned char offset
;
5263 enum maple_type type
;
5265 node
= container_of(head
, struct maple_node
, rcu
);
5267 if (ma_is_leaf(node
->type
))
5271 enode
= mt_mk_node(node
, node
->type
);
5272 slots
= mte_dead_walk(&enode
, 0);
5273 node
= mte_to_node(enode
);
5275 mt_free_bulk(node
->slot_len
, slots
);
5276 offset
= node
->parent_slot
+ 1;
5277 enode
= node
->piv_parent
;
5278 if (mte_to_node(enode
) == node
)
5281 type
= mte_node_type(enode
);
5282 slots
= ma_slots(mte_to_node(enode
), type
);
5283 if ((offset
< mt_slots
[type
]) &&
5284 rcu_dereference_protected(slots
[offset
],
5285 lock_is_held(&rcu_callback_map
)))
5286 slots
= mte_dead_walk(&enode
, offset
);
5287 node
= mte_to_node(enode
);
5288 } while ((node
!= start
) || (node
->slot_len
< offset
));
5290 slots
= ma_slots(node
, node
->type
);
5291 mt_free_bulk(node
->slot_len
, slots
);
5294 mt_free_rcu(&node
->rcu
);
5297 static inline void __rcu
**mte_destroy_descend(struct maple_enode
**enode
,
5298 struct maple_tree
*mt
, struct maple_enode
*prev
, unsigned char offset
)
5300 struct maple_node
*node
;
5301 struct maple_enode
*next
= *enode
;
5302 void __rcu
**slots
= NULL
;
5303 enum maple_type type
;
5304 unsigned char next_offset
= 0;
5308 node
= mte_to_node(*enode
);
5309 type
= mte_node_type(*enode
);
5310 slots
= ma_slots(node
, type
);
5311 next
= mt_slot_locked(mt
, slots
, next_offset
);
5312 if ((mte_dead_node(next
)))
5313 next
= mt_slot_locked(mt
, slots
, ++next_offset
);
5315 mte_set_node_dead(*enode
);
5317 node
->piv_parent
= prev
;
5318 node
->parent_slot
= offset
;
5319 offset
= next_offset
;
5322 } while (!mte_is_leaf(next
));
5327 static void mt_destroy_walk(struct maple_enode
*enode
, struct maple_tree
*mt
,
5331 struct maple_node
*node
= mte_to_node(enode
);
5332 struct maple_enode
*start
;
5334 if (mte_is_leaf(enode
)) {
5335 node
->type
= mte_node_type(enode
);
5340 slots
= mte_destroy_descend(&enode
, mt
, start
, 0);
5341 node
= mte_to_node(enode
); // Updated in the above call.
5343 enum maple_type type
;
5344 unsigned char offset
;
5345 struct maple_enode
*parent
, *tmp
;
5347 node
->slot_len
= mte_dead_leaves(enode
, mt
, slots
);
5349 mt_free_bulk(node
->slot_len
, slots
);
5350 offset
= node
->parent_slot
+ 1;
5351 enode
= node
->piv_parent
;
5352 if (mte_to_node(enode
) == node
)
5355 type
= mte_node_type(enode
);
5356 slots
= ma_slots(mte_to_node(enode
), type
);
5357 if (offset
>= mt_slots
[type
])
5360 tmp
= mt_slot_locked(mt
, slots
, offset
);
5361 if (mte_node_type(tmp
) && mte_to_node(tmp
)) {
5364 slots
= mte_destroy_descend(&enode
, mt
, parent
, offset
);
5367 node
= mte_to_node(enode
);
5368 } while (start
!= enode
);
5370 node
= mte_to_node(enode
);
5371 node
->slot_len
= mte_dead_leaves(enode
, mt
, slots
);
5373 mt_free_bulk(node
->slot_len
, slots
);
5377 mt_free_rcu(&node
->rcu
);
5379 mt_clear_meta(mt
, node
, node
->type
);
5383 * mte_destroy_walk() - Free a tree or sub-tree.
5384 * @enode: the encoded maple node (maple_enode) to start
5385 * @mt: the tree to free - needed for node types.
5387 * Must hold the write lock.
5389 static inline void mte_destroy_walk(struct maple_enode
*enode
,
5390 struct maple_tree
*mt
)
5392 struct maple_node
*node
= mte_to_node(enode
);
5394 if (mt_in_rcu(mt
)) {
5395 mt_destroy_walk(enode
, mt
, false);
5396 call_rcu(&node
->rcu
, mt_free_walk
);
5398 mt_destroy_walk(enode
, mt
, true);
5404 * mas_store() - Store an @entry.
5405 * @mas: The maple state.
5406 * @entry: The entry to store.
5408 * The @mas->index and @mas->last is used to set the range for the @entry.
5410 * Return: the first entry between mas->index and mas->last or %NULL.
5412 void *mas_store(struct ma_state
*mas
, void *entry
)
5415 MA_WR_STATE(wr_mas
, mas
, entry
);
5417 trace_ma_write(__func__
, mas
, 0, entry
);
5418 #ifdef CONFIG_DEBUG_MAPLE_TREE
5419 if (MAS_WARN_ON(mas
, mas
->index
> mas
->last
))
5420 pr_err("Error %lX > %lX " PTR_FMT
"\n", mas
->index
, mas
->last
,
5423 if (mas
->index
> mas
->last
) {
5424 mas_set_err(mas
, -EINVAL
);
5431 * Storing is the same operation as insert with the added caveat that it
5432 * can overwrite entries. Although this seems simple enough, one may
5433 * want to examine what happens if a single store operation was to
5434 * overwrite multiple entries within a self-balancing B-Tree.
5436 mas_wr_prealloc_setup(&wr_mas
);
5437 mas
->store_type
= mas_wr_store_type(&wr_mas
);
5438 if (mas
->mas_flags
& MA_STATE_PREALLOC
) {
5439 mas_wr_store_entry(&wr_mas
);
5440 MAS_WR_BUG_ON(&wr_mas
, mas_is_err(mas
));
5441 return wr_mas
.content
;
5444 request
= mas_prealloc_calc(mas
, entry
);
5448 mas_node_count(mas
, request
);
5449 if (mas_is_err(mas
))
5453 mas_wr_store_entry(&wr_mas
);
5455 return wr_mas
.content
;
5457 EXPORT_SYMBOL_GPL(mas_store
);
5460 * mas_store_gfp() - Store a value into the tree.
5461 * @mas: The maple state
5462 * @entry: The entry to store
5463 * @gfp: The GFP_FLAGS to use for allocations if necessary.
5465 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5468 int mas_store_gfp(struct ma_state
*mas
, void *entry
, gfp_t gfp
)
5470 unsigned long index
= mas
->index
;
5471 unsigned long last
= mas
->last
;
5472 MA_WR_STATE(wr_mas
, mas
, entry
);
5476 mas_wr_preallocate(&wr_mas
, entry
);
5477 if (unlikely(mas_nomem(mas
, gfp
))) {
5479 __mas_set_range(mas
, index
, last
);
5483 if (mas_is_err(mas
)) {
5484 ret
= xa_err(mas
->node
);
5488 mas_wr_store_entry(&wr_mas
);
5493 EXPORT_SYMBOL_GPL(mas_store_gfp
);
5496 * mas_store_prealloc() - Store a value into the tree using memory
5497 * preallocated in the maple state.
5498 * @mas: The maple state
5499 * @entry: The entry to store.
5501 void mas_store_prealloc(struct ma_state
*mas
, void *entry
)
5503 MA_WR_STATE(wr_mas
, mas
, entry
);
5505 if (mas
->store_type
== wr_store_root
) {
5506 mas_wr_prealloc_setup(&wr_mas
);
5510 mas_wr_walk_descend(&wr_mas
);
5511 if (mas
->store_type
!= wr_spanning_store
) {
5512 /* set wr_mas->content to current slot */
5513 wr_mas
.content
= mas_slot_locked(mas
, wr_mas
.slots
, mas
->offset
);
5514 mas_wr_end_piv(&wr_mas
);
5518 trace_ma_write(__func__
, mas
, 0, entry
);
5519 mas_wr_store_entry(&wr_mas
);
5520 MAS_WR_BUG_ON(&wr_mas
, mas_is_err(mas
));
5523 EXPORT_SYMBOL_GPL(mas_store_prealloc
);
5526 * mas_preallocate() - Preallocate enough nodes for a store operation
5527 * @mas: The maple state
5528 * @entry: The entry that will be stored
5529 * @gfp: The GFP_FLAGS to use for allocations.
5531 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5533 int mas_preallocate(struct ma_state
*mas
, void *entry
, gfp_t gfp
)
5535 MA_WR_STATE(wr_mas
, mas
, entry
);
5539 mas_wr_prealloc_setup(&wr_mas
);
5540 mas
->store_type
= mas_wr_store_type(&wr_mas
);
5541 request
= mas_prealloc_calc(mas
, entry
);
5545 mas_node_count_gfp(mas
, request
, gfp
);
5546 if (mas_is_err(mas
)) {
5547 mas_set_alloc_req(mas
, 0);
5548 ret
= xa_err(mas
->node
);
5554 mas
->mas_flags
|= MA_STATE_PREALLOC
;
5557 EXPORT_SYMBOL_GPL(mas_preallocate
);
5560 * mas_destroy() - destroy a maple state.
5561 * @mas: The maple state
5563 * Upon completion, check the left-most node and rebalance against the node to
5564 * the right if necessary. Frees any allocated nodes associated with this maple
5567 void mas_destroy(struct ma_state
*mas
)
5569 struct maple_alloc
*node
;
5570 unsigned long total
;
5573 * When using mas_for_each() to insert an expected number of elements,
5574 * it is possible that the number inserted is less than the expected
5575 * number. To fix an invalid final node, a check is performed here to
5576 * rebalance the previous node with the final node.
5578 if (mas
->mas_flags
& MA_STATE_REBALANCE
) {
5580 if (mas_is_err(mas
))
5583 mtree_range_walk(mas
);
5585 if (end
< mt_min_slot_count(mas
->node
) - 1)
5586 mas_destroy_rebalance(mas
, end
);
5588 mas
->mas_flags
&= ~MA_STATE_REBALANCE
;
5590 mas
->mas_flags
&= ~(MA_STATE_BULK
|MA_STATE_PREALLOC
);
5592 total
= mas_allocated(mas
);
5595 mas
->alloc
= node
->slot
[0];
5596 if (node
->node_count
> 1) {
5597 size_t count
= node
->node_count
- 1;
5599 mt_free_bulk(count
, (void __rcu
**)&node
->slot
[1]);
5602 mt_free_one(ma_mnode_ptr(node
));
5608 EXPORT_SYMBOL_GPL(mas_destroy
);
5611 * mas_expected_entries() - Set the expected number of entries that will be inserted.
5612 * @mas: The maple state
5613 * @nr_entries: The number of expected entries.
5615 * This will attempt to pre-allocate enough nodes to store the expected number
5616 * of entries. The allocations will occur using the bulk allocator interface
5617 * for speed. Please call mas_destroy() on the @mas after inserting the entries
5618 * to ensure any unused nodes are freed.
5620 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5622 int mas_expected_entries(struct ma_state
*mas
, unsigned long nr_entries
)
5624 int nonleaf_cap
= MAPLE_ARANGE64_SLOTS
- 2;
5625 struct maple_enode
*enode
= mas
->node
;
5630 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5631 * forking a process and duplicating the VMAs from one tree to a new
5632 * tree. When such a situation arises, it is known that the new tree is
5633 * not going to be used until the entire tree is populated. For
5634 * performance reasons, it is best to use a bulk load with RCU disabled.
5635 * This allows for optimistic splitting that favours the left and reuse
5636 * of nodes during the operation.
5639 /* Optimize splitting for bulk insert in-order */
5640 mas
->mas_flags
|= MA_STATE_BULK
;
5643 * Avoid overflow, assume a gap between each entry and a trailing null.
5644 * If this is wrong, it just means allocation can happen during
5645 * insertion of entries.
5647 nr_nodes
= max(nr_entries
, nr_entries
* 2 + 1);
5648 if (!mt_is_alloc(mas
->tree
))
5649 nonleaf_cap
= MAPLE_RANGE64_SLOTS
- 2;
5651 /* Leaves; reduce slots to keep space for expansion */
5652 nr_nodes
= DIV_ROUND_UP(nr_nodes
, MAPLE_RANGE64_SLOTS
- 2);
5653 /* Internal nodes */
5654 nr_nodes
+= DIV_ROUND_UP(nr_nodes
, nonleaf_cap
);
5655 /* Add working room for split (2 nodes) + new parents */
5656 mas_node_count_gfp(mas
, nr_nodes
+ 3, GFP_KERNEL
);
5658 /* Detect if allocations run out */
5659 mas
->mas_flags
|= MA_STATE_PREALLOC
;
5661 if (!mas_is_err(mas
))
5664 ret
= xa_err(mas
->node
);
5670 EXPORT_SYMBOL_GPL(mas_expected_entries
);
5672 static bool mas_next_setup(struct ma_state
*mas
, unsigned long max
,
5675 bool was_none
= mas_is_none(mas
);
5677 if (unlikely(mas
->last
>= max
)) {
5678 mas
->status
= ma_overflow
;
5682 switch (mas
->status
) {
5688 mas
->status
= ma_start
;
5691 mas_walk(mas
); /* Retries on dead nodes handled by mas_walk */
5694 /* Overflowed before, but the max changed */
5695 mas
->status
= ma_active
;
5698 /* The user expects the mas to be one before where it is */
5699 mas
->status
= ma_active
;
5700 *entry
= mas_walk(mas
);
5710 if (likely(mas_is_active(mas
))) /* Fast path */
5713 if (mas_is_ptr(mas
)) {
5715 if (was_none
&& mas
->index
== 0) {
5716 mas
->index
= mas
->last
= 0;
5720 mas
->last
= ULONG_MAX
;
5721 mas
->status
= ma_none
;
5725 if (mas_is_none(mas
))
5732 * mas_next() - Get the next entry.
5733 * @mas: The maple state
5734 * @max: The maximum index to check.
5736 * Returns the next entry after @mas->index.
5737 * Must hold rcu_read_lock or the write lock.
5738 * Can return the zero entry.
5740 * Return: The next entry or %NULL
5742 void *mas_next(struct ma_state
*mas
, unsigned long max
)
5746 if (mas_next_setup(mas
, max
, &entry
))
5749 /* Retries on dead nodes handled by mas_next_slot */
5750 return mas_next_slot(mas
, max
, false);
5752 EXPORT_SYMBOL_GPL(mas_next
);
5755 * mas_next_range() - Advance the maple state to the next range
5756 * @mas: The maple state
5757 * @max: The maximum index to check.
5759 * Sets @mas->index and @mas->last to the range.
5760 * Must hold rcu_read_lock or the write lock.
5761 * Can return the zero entry.
5763 * Return: The next entry or %NULL
5765 void *mas_next_range(struct ma_state
*mas
, unsigned long max
)
5769 if (mas_next_setup(mas
, max
, &entry
))
5772 /* Retries on dead nodes handled by mas_next_slot */
5773 return mas_next_slot(mas
, max
, true);
5775 EXPORT_SYMBOL_GPL(mas_next_range
);
5778 * mt_next() - get the next value in the maple tree
5779 * @mt: The maple tree
5780 * @index: The start index
5781 * @max: The maximum index to check
5783 * Takes RCU read lock internally to protect the search, which does not
5784 * protect the returned pointer after dropping RCU read lock.
5785 * See also: Documentation/core-api/maple_tree.rst
5787 * Return: The entry higher than @index or %NULL if nothing is found.
5789 void *mt_next(struct maple_tree
*mt
, unsigned long index
, unsigned long max
)
5792 MA_STATE(mas
, mt
, index
, index
);
5795 entry
= mas_next(&mas
, max
);
5799 EXPORT_SYMBOL_GPL(mt_next
);
5801 static bool mas_prev_setup(struct ma_state
*mas
, unsigned long min
, void **entry
)
5803 if (unlikely(mas
->index
<= min
)) {
5804 mas
->status
= ma_underflow
;
5808 switch (mas
->status
) {
5816 mas
->status
= ma_start
;
5819 /* underflowed before but the min changed */
5820 mas
->status
= ma_active
;
5823 /* User expects mas to be one after where it is */
5824 mas
->status
= ma_active
;
5825 *entry
= mas_walk(mas
);
5835 if (mas_is_start(mas
))
5838 if (unlikely(mas_is_ptr(mas
))) {
5840 mas
->status
= ma_none
;
5843 mas
->index
= mas
->last
= 0;
5844 *entry
= mas_root(mas
);
5848 if (mas_is_none(mas
)) {
5850 /* Walked to out-of-range pointer? */
5851 mas
->index
= mas
->last
= 0;
5852 mas
->status
= ma_root
;
5853 *entry
= mas_root(mas
);
5863 * mas_prev() - Get the previous entry
5864 * @mas: The maple state
5865 * @min: The minimum value to check.
5867 * Must hold rcu_read_lock or the write lock.
5868 * Will reset mas to ma_start if the status is ma_none. Will stop on not
5871 * Return: the previous value or %NULL.
5873 void *mas_prev(struct ma_state
*mas
, unsigned long min
)
5877 if (mas_prev_setup(mas
, min
, &entry
))
5880 return mas_prev_slot(mas
, min
, false);
5882 EXPORT_SYMBOL_GPL(mas_prev
);
5885 * mas_prev_range() - Advance to the previous range
5886 * @mas: The maple state
5887 * @min: The minimum value to check.
5889 * Sets @mas->index and @mas->last to the range.
5890 * Must hold rcu_read_lock or the write lock.
5891 * Will reset mas to ma_start if the node is ma_none. Will stop on not
5894 * Return: the previous value or %NULL.
5896 void *mas_prev_range(struct ma_state
*mas
, unsigned long min
)
5900 if (mas_prev_setup(mas
, min
, &entry
))
5903 return mas_prev_slot(mas
, min
, true);
5905 EXPORT_SYMBOL_GPL(mas_prev_range
);
5908 * mt_prev() - get the previous value in the maple tree
5909 * @mt: The maple tree
5910 * @index: The start index
5911 * @min: The minimum index to check
5913 * Takes RCU read lock internally to protect the search, which does not
5914 * protect the returned pointer after dropping RCU read lock.
5915 * See also: Documentation/core-api/maple_tree.rst
5917 * Return: The entry before @index or %NULL if nothing is found.
5919 void *mt_prev(struct maple_tree
*mt
, unsigned long index
, unsigned long min
)
5922 MA_STATE(mas
, mt
, index
, index
);
5925 entry
= mas_prev(&mas
, min
);
5929 EXPORT_SYMBOL_GPL(mt_prev
);
5932 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
5933 * @mas: The maple state to pause
5935 * Some users need to pause a walk and drop the lock they're holding in
5936 * order to yield to a higher priority thread or carry out an operation
5937 * on an entry. Those users should call this function before they drop
5938 * the lock. It resets the @mas to be suitable for the next iteration
5939 * of the loop after the user has reacquired the lock. If most entries
5940 * found during a walk require you to call mas_pause(), the mt_for_each()
5941 * iterator may be more appropriate.
5944 void mas_pause(struct ma_state
*mas
)
5946 mas
->status
= ma_pause
;
5949 EXPORT_SYMBOL_GPL(mas_pause
);
5952 * mas_find_setup() - Internal function to set up mas_find*().
5953 * @mas: The maple state
5954 * @max: The maximum index
5955 * @entry: Pointer to the entry
5957 * Returns: True if entry is the answer, false otherwise.
5959 static __always_inline
bool mas_find_setup(struct ma_state
*mas
, unsigned long max
, void **entry
)
5961 switch (mas
->status
) {
5963 if (mas
->last
< max
)
5969 if (unlikely(mas
->last
>= max
))
5972 mas
->index
= ++mas
->last
;
5973 mas
->status
= ma_start
;
5976 if (unlikely(mas
->last
>= max
))
5979 mas
->index
= mas
->last
;
5980 mas
->status
= ma_start
;
5983 /* mas is pointing at entry before unable to go lower */
5984 if (unlikely(mas
->index
>= max
)) {
5985 mas
->status
= ma_overflow
;
5989 mas
->status
= ma_active
;
5990 *entry
= mas_walk(mas
);
5995 if (unlikely(mas
->last
>= max
))
5998 mas
->status
= ma_active
;
5999 *entry
= mas_walk(mas
);
6009 if (mas_is_start(mas
)) {
6010 /* First run or continue */
6011 if (mas
->index
> max
)
6014 *entry
= mas_walk(mas
);
6020 if (unlikely(mas_is_ptr(mas
)))
6021 goto ptr_out_of_range
;
6023 if (unlikely(mas_is_none(mas
)))
6026 if (mas
->index
== max
)
6032 mas
->status
= ma_none
;
6034 mas
->last
= ULONG_MAX
;
6039 * mas_find() - On the first call, find the entry at or after mas->index up to
6040 * %max. Otherwise, find the entry after mas->index.
6041 * @mas: The maple state
6042 * @max: The maximum value to check.
6044 * Must hold rcu_read_lock or the write lock.
6045 * If an entry exists, last and index are updated accordingly.
6046 * May set @mas->status to ma_overflow.
6048 * Return: The entry or %NULL.
6050 void *mas_find(struct ma_state
*mas
, unsigned long max
)
6054 if (mas_find_setup(mas
, max
, &entry
))
6057 /* Retries on dead nodes handled by mas_next_slot */
6058 entry
= mas_next_slot(mas
, max
, false);
6059 /* Ignore overflow */
6060 mas
->status
= ma_active
;
6063 EXPORT_SYMBOL_GPL(mas_find
);
6066 * mas_find_range() - On the first call, find the entry at or after
6067 * mas->index up to %max. Otherwise, advance to the next slot mas->index.
6068 * @mas: The maple state
6069 * @max: The maximum value to check.
6071 * Must hold rcu_read_lock or the write lock.
6072 * If an entry exists, last and index are updated accordingly.
6073 * May set @mas->status to ma_overflow.
6075 * Return: The entry or %NULL.
6077 void *mas_find_range(struct ma_state
*mas
, unsigned long max
)
6081 if (mas_find_setup(mas
, max
, &entry
))
6084 /* Retries on dead nodes handled by mas_next_slot */
6085 return mas_next_slot(mas
, max
, true);
6087 EXPORT_SYMBOL_GPL(mas_find_range
);
6090 * mas_find_rev_setup() - Internal function to set up mas_find_*_rev()
6091 * @mas: The maple state
6092 * @min: The minimum index
6093 * @entry: Pointer to the entry
6095 * Returns: True if entry is the answer, false otherwise.
6097 static bool mas_find_rev_setup(struct ma_state
*mas
, unsigned long min
,
6101 switch (mas
->status
) {
6107 if (unlikely(mas
->index
<= min
)) {
6108 mas
->status
= ma_underflow
;
6111 mas
->last
= --mas
->index
;
6112 mas
->status
= ma_start
;
6115 if (mas
->index
<= min
)
6118 mas
->last
= mas
->index
;
6119 mas
->status
= ma_start
;
6121 case ma_overflow
: /* user expects the mas to be one after where it is */
6122 if (unlikely(mas
->index
<= min
)) {
6123 mas
->status
= ma_underflow
;
6127 mas
->status
= ma_active
;
6129 case ma_underflow
: /* user expects the mas to be one before where it is */
6130 if (unlikely(mas
->index
<= min
))
6133 mas
->status
= ma_active
;
6141 if (mas_is_start(mas
)) {
6142 /* First run or continue */
6143 if (mas
->index
< min
)
6146 *entry
= mas_walk(mas
);
6151 if (unlikely(mas_is_ptr(mas
)))
6154 if (unlikely(mas_is_none(mas
))) {
6156 * Walked to the location, and there was nothing so the previous
6159 mas
->last
= mas
->index
= 0;
6160 mas
->status
= ma_root
;
6161 *entry
= mas_root(mas
);
6166 if (mas
->index
< min
)
6172 mas
->status
= ma_none
;
6177 * mas_find_rev: On the first call, find the first non-null entry at or below
6178 * mas->index down to %min. Otherwise find the first non-null entry below
6179 * mas->index down to %min.
6180 * @mas: The maple state
6181 * @min: The minimum value to check.
6183 * Must hold rcu_read_lock or the write lock.
6184 * If an entry exists, last and index are updated accordingly.
6185 * May set @mas->status to ma_underflow.
6187 * Return: The entry or %NULL.
6189 void *mas_find_rev(struct ma_state
*mas
, unsigned long min
)
6193 if (mas_find_rev_setup(mas
, min
, &entry
))
6196 /* Retries on dead nodes handled by mas_prev_slot */
6197 return mas_prev_slot(mas
, min
, false);
6200 EXPORT_SYMBOL_GPL(mas_find_rev
);
6203 * mas_find_range_rev: On the first call, find the first non-null entry at or
6204 * below mas->index down to %min. Otherwise advance to the previous slot after
6205 * mas->index down to %min.
6206 * @mas: The maple state
6207 * @min: The minimum value to check.
6209 * Must hold rcu_read_lock or the write lock.
6210 * If an entry exists, last and index are updated accordingly.
6211 * May set @mas->status to ma_underflow.
6213 * Return: The entry or %NULL.
6215 void *mas_find_range_rev(struct ma_state
*mas
, unsigned long min
)
6219 if (mas_find_rev_setup(mas
, min
, &entry
))
6222 /* Retries on dead nodes handled by mas_prev_slot */
6223 return mas_prev_slot(mas
, min
, true);
6225 EXPORT_SYMBOL_GPL(mas_find_range_rev
);
6228 * mas_erase() - Find the range in which index resides and erase the entire
6230 * @mas: The maple state
6232 * Must hold the write lock.
6233 * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6234 * erases that range.
6236 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6238 void *mas_erase(struct ma_state
*mas
)
6241 unsigned long index
= mas
->index
;
6242 MA_WR_STATE(wr_mas
, mas
, NULL
);
6244 if (!mas_is_active(mas
) || !mas_is_start(mas
))
6245 mas
->status
= ma_start
;
6248 entry
= mas_state_walk(mas
);
6252 /* Must reset to ensure spanning writes of last slot are detected */
6254 mas_wr_preallocate(&wr_mas
, NULL
);
6255 if (mas_nomem(mas
, GFP_KERNEL
)) {
6256 /* in case the range of entry changed when unlocked */
6257 mas
->index
= mas
->last
= index
;
6261 if (mas_is_err(mas
))
6264 mas_wr_store_entry(&wr_mas
);
6269 EXPORT_SYMBOL_GPL(mas_erase
);
6272 * mas_nomem() - Check if there was an error allocating and do the allocation
6273 * if necessary If there are allocations, then free them.
6274 * @mas: The maple state
6275 * @gfp: The GFP_FLAGS to use for allocations
6276 * Return: true on allocation, false otherwise.
6278 bool mas_nomem(struct ma_state
*mas
, gfp_t gfp
)
6279 __must_hold(mas
->tree
->ma_lock
)
6281 if (likely(mas
->node
!= MA_ERROR(-ENOMEM
)))
6284 if (gfpflags_allow_blocking(gfp
) && !mt_external_lock(mas
->tree
)) {
6285 mtree_unlock(mas
->tree
);
6286 mas_alloc_nodes(mas
, gfp
);
6287 mtree_lock(mas
->tree
);
6289 mas_alloc_nodes(mas
, gfp
);
6292 if (!mas_allocated(mas
))
6295 mas
->status
= ma_start
;
6299 void __init
maple_tree_init(void)
6301 maple_node_cache
= kmem_cache_create("maple_node",
6302 sizeof(struct maple_node
), sizeof(struct maple_node
),
6307 * mtree_load() - Load a value stored in a maple tree
6308 * @mt: The maple tree
6309 * @index: The index to load
6311 * Return: the entry or %NULL
6313 void *mtree_load(struct maple_tree
*mt
, unsigned long index
)
6315 MA_STATE(mas
, mt
, index
, index
);
6318 trace_ma_read(__func__
, &mas
);
6321 entry
= mas_start(&mas
);
6322 if (unlikely(mas_is_none(&mas
)))
6325 if (unlikely(mas_is_ptr(&mas
))) {
6332 entry
= mtree_lookup_walk(&mas
);
6333 if (!entry
&& unlikely(mas_is_start(&mas
)))
6337 if (xa_is_zero(entry
))
6342 EXPORT_SYMBOL(mtree_load
);
6345 * mtree_store_range() - Store an entry at a given range.
6346 * @mt: The maple tree
6347 * @index: The start of the range
6348 * @last: The end of the range
6349 * @entry: The entry to store
6350 * @gfp: The GFP_FLAGS to use for allocations
6352 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6355 int mtree_store_range(struct maple_tree
*mt
, unsigned long index
,
6356 unsigned long last
, void *entry
, gfp_t gfp
)
6358 MA_STATE(mas
, mt
, index
, last
);
6361 trace_ma_write(__func__
, &mas
, 0, entry
);
6362 if (WARN_ON_ONCE(xa_is_advanced(entry
)))
6369 ret
= mas_store_gfp(&mas
, entry
, gfp
);
6374 EXPORT_SYMBOL(mtree_store_range
);
6377 * mtree_store() - Store an entry at a given index.
6378 * @mt: The maple tree
6379 * @index: The index to store the value
6380 * @entry: The entry to store
6381 * @gfp: The GFP_FLAGS to use for allocations
6383 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6386 int mtree_store(struct maple_tree
*mt
, unsigned long index
, void *entry
,
6389 return mtree_store_range(mt
, index
, index
, entry
, gfp
);
6391 EXPORT_SYMBOL(mtree_store
);
6394 * mtree_insert_range() - Insert an entry at a given range if there is no value.
6395 * @mt: The maple tree
6396 * @first: The start of the range
6397 * @last: The end of the range
6398 * @entry: The entry to store
6399 * @gfp: The GFP_FLAGS to use for allocations.
6401 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6402 * request, -ENOMEM if memory could not be allocated.
6404 int mtree_insert_range(struct maple_tree
*mt
, unsigned long first
,
6405 unsigned long last
, void *entry
, gfp_t gfp
)
6407 MA_STATE(ms
, mt
, first
, last
);
6410 if (WARN_ON_ONCE(xa_is_advanced(entry
)))
6418 mas_insert(&ms
, entry
);
6419 if (mas_nomem(&ms
, gfp
))
6423 if (mas_is_err(&ms
))
6424 ret
= xa_err(ms
.node
);
6429 EXPORT_SYMBOL(mtree_insert_range
);
6432 * mtree_insert() - Insert an entry at a given index if there is no value.
6433 * @mt: The maple tree
6434 * @index : The index to store the value
6435 * @entry: The entry to store
6436 * @gfp: The GFP_FLAGS to use for allocations.
6438 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6439 * request, -ENOMEM if memory could not be allocated.
6441 int mtree_insert(struct maple_tree
*mt
, unsigned long index
, void *entry
,
6444 return mtree_insert_range(mt
, index
, index
, entry
, gfp
);
6446 EXPORT_SYMBOL(mtree_insert
);
6448 int mtree_alloc_range(struct maple_tree
*mt
, unsigned long *startp
,
6449 void *entry
, unsigned long size
, unsigned long min
,
6450 unsigned long max
, gfp_t gfp
)
6454 MA_STATE(mas
, mt
, 0, 0);
6455 if (!mt_is_alloc(mt
))
6458 if (WARN_ON_ONCE(mt_is_reserved(entry
)))
6463 ret
= mas_empty_area(&mas
, min
, max
, size
);
6467 mas_insert(&mas
, entry
);
6469 * mas_nomem() may release the lock, causing the allocated area
6470 * to be unavailable, so try to allocate a free area again.
6472 if (mas_nomem(&mas
, gfp
))
6475 if (mas_is_err(&mas
))
6476 ret
= xa_err(mas
.node
);
6478 *startp
= mas
.index
;
6485 EXPORT_SYMBOL(mtree_alloc_range
);
6488 * mtree_alloc_cyclic() - Find somewhere to store this entry in the tree.
6489 * @mt: The maple tree.
6490 * @startp: Pointer to ID.
6491 * @range_lo: Lower bound of range to search.
6492 * @range_hi: Upper bound of range to search.
6493 * @entry: The entry to store.
6494 * @next: Pointer to next ID to allocate.
6495 * @gfp: The GFP_FLAGS to use for allocations.
6497 * Finds an empty entry in @mt after @next, stores the new index into
6498 * the @id pointer, stores the entry at that index, then updates @next.
6500 * @mt must be initialized with the MT_FLAGS_ALLOC_RANGE flag.
6502 * Context: Any context. Takes and releases the mt.lock. May sleep if
6503 * the @gfp flags permit.
6505 * Return: 0 if the allocation succeeded without wrapping, 1 if the
6506 * allocation succeeded after wrapping, -ENOMEM if memory could not be
6507 * allocated, -EINVAL if @mt cannot be used, or -EBUSY if there are no
6510 int mtree_alloc_cyclic(struct maple_tree
*mt
, unsigned long *startp
,
6511 void *entry
, unsigned long range_lo
, unsigned long range_hi
,
6512 unsigned long *next
, gfp_t gfp
)
6516 MA_STATE(mas
, mt
, 0, 0);
6518 if (!mt_is_alloc(mt
))
6520 if (WARN_ON_ONCE(mt_is_reserved(entry
)))
6523 ret
= mas_alloc_cyclic(&mas
, startp
, entry
, range_lo
, range_hi
,
6528 EXPORT_SYMBOL(mtree_alloc_cyclic
);
6530 int mtree_alloc_rrange(struct maple_tree
*mt
, unsigned long *startp
,
6531 void *entry
, unsigned long size
, unsigned long min
,
6532 unsigned long max
, gfp_t gfp
)
6536 MA_STATE(mas
, mt
, 0, 0);
6537 if (!mt_is_alloc(mt
))
6540 if (WARN_ON_ONCE(mt_is_reserved(entry
)))
6545 ret
= mas_empty_area_rev(&mas
, min
, max
, size
);
6549 mas_insert(&mas
, entry
);
6551 * mas_nomem() may release the lock, causing the allocated area
6552 * to be unavailable, so try to allocate a free area again.
6554 if (mas_nomem(&mas
, gfp
))
6557 if (mas_is_err(&mas
))
6558 ret
= xa_err(mas
.node
);
6560 *startp
= mas
.index
;
6567 EXPORT_SYMBOL(mtree_alloc_rrange
);
6570 * mtree_erase() - Find an index and erase the entire range.
6571 * @mt: The maple tree
6572 * @index: The index to erase
6574 * Erasing is the same as a walk to an entry then a store of a NULL to that
6575 * ENTIRE range. In fact, it is implemented as such using the advanced API.
6577 * Return: The entry stored at the @index or %NULL
6579 void *mtree_erase(struct maple_tree
*mt
, unsigned long index
)
6583 MA_STATE(mas
, mt
, index
, index
);
6584 trace_ma_op(__func__
, &mas
);
6587 entry
= mas_erase(&mas
);
6592 EXPORT_SYMBOL(mtree_erase
);
6595 * mas_dup_free() - Free an incomplete duplication of a tree.
6596 * @mas: The maple state of a incomplete tree.
6598 * The parameter @mas->node passed in indicates that the allocation failed on
6599 * this node. This function frees all nodes starting from @mas->node in the
6600 * reverse order of mas_dup_build(). There is no need to hold the source tree
6601 * lock at this time.
6603 static void mas_dup_free(struct ma_state
*mas
)
6605 struct maple_node
*node
;
6606 enum maple_type type
;
6608 unsigned char count
, i
;
6610 /* Maybe the first node allocation failed. */
6611 if (mas_is_none(mas
))
6614 while (!mte_is_root(mas
->node
)) {
6620 mas
->offset
= mas_data_end(mas
);
6621 } while (!mte_is_leaf(mas
->node
));
6626 node
= mte_to_node(mas
->node
);
6627 type
= mte_node_type(mas
->node
);
6628 slots
= ma_slots(node
, type
);
6629 count
= mas_data_end(mas
) + 1;
6630 for (i
= 0; i
< count
; i
++)
6631 ((unsigned long *)slots
)[i
] &= ~MAPLE_NODE_MASK
;
6632 mt_free_bulk(count
, slots
);
6635 node
= mte_to_node(mas
->node
);
6640 * mas_copy_node() - Copy a maple node and replace the parent.
6641 * @mas: The maple state of source tree.
6642 * @new_mas: The maple state of new tree.
6643 * @parent: The parent of the new node.
6645 * Copy @mas->node to @new_mas->node, set @parent to be the parent of
6646 * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM.
6648 static inline void mas_copy_node(struct ma_state
*mas
, struct ma_state
*new_mas
,
6649 struct maple_pnode
*parent
)
6651 struct maple_node
*node
= mte_to_node(mas
->node
);
6652 struct maple_node
*new_node
= mte_to_node(new_mas
->node
);
6655 /* Copy the node completely. */
6656 memcpy(new_node
, node
, sizeof(struct maple_node
));
6657 /* Update the parent node pointer. */
6658 val
= (unsigned long)node
->parent
& MAPLE_NODE_MASK
;
6659 new_node
->parent
= ma_parent_ptr(val
| (unsigned long)parent
);
6663 * mas_dup_alloc() - Allocate child nodes for a maple node.
6664 * @mas: The maple state of source tree.
6665 * @new_mas: The maple state of new tree.
6666 * @gfp: The GFP_FLAGS to use for allocations.
6668 * This function allocates child nodes for @new_mas->node during the duplication
6669 * process. If memory allocation fails, @mas is set to -ENOMEM.
6671 static inline void mas_dup_alloc(struct ma_state
*mas
, struct ma_state
*new_mas
,
6674 struct maple_node
*node
= mte_to_node(mas
->node
);
6675 struct maple_node
*new_node
= mte_to_node(new_mas
->node
);
6676 enum maple_type type
;
6677 unsigned char request
, count
, i
;
6679 void __rcu
**new_slots
;
6682 /* Allocate memory for child nodes. */
6683 type
= mte_node_type(mas
->node
);
6684 new_slots
= ma_slots(new_node
, type
);
6685 request
= mas_data_end(mas
) + 1;
6686 count
= mt_alloc_bulk(gfp
, request
, (void **)new_slots
);
6687 if (unlikely(count
< request
)) {
6688 memset(new_slots
, 0, request
* sizeof(void *));
6689 mas_set_err(mas
, -ENOMEM
);
6693 /* Restore node type information in slots. */
6694 slots
= ma_slots(node
, type
);
6695 for (i
= 0; i
< count
; i
++) {
6696 val
= (unsigned long)mt_slot_locked(mas
->tree
, slots
, i
);
6697 val
&= MAPLE_NODE_MASK
;
6698 ((unsigned long *)new_slots
)[i
] |= val
;
6703 * mas_dup_build() - Build a new maple tree from a source tree
6704 * @mas: The maple state of source tree, need to be in MAS_START state.
6705 * @new_mas: The maple state of new tree, need to be in MAS_START state.
6706 * @gfp: The GFP_FLAGS to use for allocations.
6708 * This function builds a new tree in DFS preorder. If the memory allocation
6709 * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the
6710 * last node. mas_dup_free() will free the incomplete duplication of a tree.
6712 * Note that the attributes of the two trees need to be exactly the same, and the
6713 * new tree needs to be empty, otherwise -EINVAL will be set in @mas.
6715 static inline void mas_dup_build(struct ma_state
*mas
, struct ma_state
*new_mas
,
6718 struct maple_node
*node
;
6719 struct maple_pnode
*parent
= NULL
;
6720 struct maple_enode
*root
;
6721 enum maple_type type
;
6723 if (unlikely(mt_attr(mas
->tree
) != mt_attr(new_mas
->tree
)) ||
6724 unlikely(!mtree_empty(new_mas
->tree
))) {
6725 mas_set_err(mas
, -EINVAL
);
6729 root
= mas_start(mas
);
6730 if (mas_is_ptr(mas
) || mas_is_none(mas
))
6733 node
= mt_alloc_one(gfp
);
6735 new_mas
->status
= ma_none
;
6736 mas_set_err(mas
, -ENOMEM
);
6740 type
= mte_node_type(mas
->node
);
6741 root
= mt_mk_node(node
, type
);
6742 new_mas
->node
= root
;
6744 new_mas
->max
= ULONG_MAX
;
6745 root
= mte_mk_root(root
);
6747 mas_copy_node(mas
, new_mas
, parent
);
6748 if (!mte_is_leaf(mas
->node
)) {
6749 /* Only allocate child nodes for non-leaf nodes. */
6750 mas_dup_alloc(mas
, new_mas
, gfp
);
6751 if (unlikely(mas_is_err(mas
)))
6755 * This is the last leaf node and duplication is
6758 if (mas
->max
== ULONG_MAX
)
6761 /* This is not the last leaf node and needs to go up. */
6764 mas_ascend(new_mas
);
6765 } while (mas
->offset
== mas_data_end(mas
));
6767 /* Move to the next subtree. */
6773 parent
= ma_parent_ptr(mte_to_node(new_mas
->node
));
6774 mas_descend(new_mas
);
6776 new_mas
->offset
= 0;
6779 /* Specially handle the parent of the root node. */
6780 mte_to_node(root
)->parent
= ma_parent_ptr(mas_tree_parent(new_mas
));
6782 /* Make them the same height */
6783 new_mas
->tree
->ma_flags
= mas
->tree
->ma_flags
;
6784 rcu_assign_pointer(new_mas
->tree
->ma_root
, root
);
6788 * __mt_dup(): Duplicate an entire maple tree
6789 * @mt: The source maple tree
6790 * @new: The new maple tree
6791 * @gfp: The GFP_FLAGS to use for allocations
6793 * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
6794 * traversal. It uses memcpy() to copy nodes in the source tree and allocate
6795 * new child nodes in non-leaf nodes. The new node is exactly the same as the
6796 * source node except for all the addresses stored in it. It will be faster than
6797 * traversing all elements in the source tree and inserting them one by one into
6799 * The user needs to ensure that the attributes of the source tree and the new
6800 * tree are the same, and the new tree needs to be an empty tree, otherwise
6801 * -EINVAL will be returned.
6802 * Note that the user needs to manually lock the source tree and the new tree.
6804 * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
6805 * the attributes of the two trees are different or the new tree is not an empty
6808 int __mt_dup(struct maple_tree
*mt
, struct maple_tree
*new, gfp_t gfp
)
6811 MA_STATE(mas
, mt
, 0, 0);
6812 MA_STATE(new_mas
, new, 0, 0);
6814 mas_dup_build(&mas
, &new_mas
, gfp
);
6815 if (unlikely(mas_is_err(&mas
))) {
6816 ret
= xa_err(mas
.node
);
6818 mas_dup_free(&new_mas
);
6823 EXPORT_SYMBOL(__mt_dup
);
6826 * mtree_dup(): Duplicate an entire maple tree
6827 * @mt: The source maple tree
6828 * @new: The new maple tree
6829 * @gfp: The GFP_FLAGS to use for allocations
6831 * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
6832 * traversal. It uses memcpy() to copy nodes in the source tree and allocate
6833 * new child nodes in non-leaf nodes. The new node is exactly the same as the
6834 * source node except for all the addresses stored in it. It will be faster than
6835 * traversing all elements in the source tree and inserting them one by one into
6837 * The user needs to ensure that the attributes of the source tree and the new
6838 * tree are the same, and the new tree needs to be an empty tree, otherwise
6839 * -EINVAL will be returned.
6841 * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
6842 * the attributes of the two trees are different or the new tree is not an empty
6845 int mtree_dup(struct maple_tree
*mt
, struct maple_tree
*new, gfp_t gfp
)
6848 MA_STATE(mas
, mt
, 0, 0);
6849 MA_STATE(new_mas
, new, 0, 0);
6852 mas_lock_nested(&mas
, SINGLE_DEPTH_NESTING
);
6853 mas_dup_build(&mas
, &new_mas
, gfp
);
6855 if (unlikely(mas_is_err(&mas
))) {
6856 ret
= xa_err(mas
.node
);
6858 mas_dup_free(&new_mas
);
6861 mas_unlock(&new_mas
);
6864 EXPORT_SYMBOL(mtree_dup
);
6867 * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6868 * @mt: The maple tree
6870 * Note: Does not handle locking.
6872 void __mt_destroy(struct maple_tree
*mt
)
6874 void *root
= mt_root_locked(mt
);
6876 rcu_assign_pointer(mt
->ma_root
, NULL
);
6877 if (xa_is_node(root
))
6878 mte_destroy_walk(root
, mt
);
6880 mt
->ma_flags
= mt_attr(mt
);
6882 EXPORT_SYMBOL_GPL(__mt_destroy
);
6885 * mtree_destroy() - Destroy a maple tree
6886 * @mt: The maple tree
6888 * Frees all resources used by the tree. Handles locking.
6890 void mtree_destroy(struct maple_tree
*mt
)
6896 EXPORT_SYMBOL(mtree_destroy
);
6899 * mt_find() - Search from the start up until an entry is found.
6900 * @mt: The maple tree
6901 * @index: Pointer which contains the start location of the search
6902 * @max: The maximum value of the search range
6904 * Takes RCU read lock internally to protect the search, which does not
6905 * protect the returned pointer after dropping RCU read lock.
6906 * See also: Documentation/core-api/maple_tree.rst
6908 * In case that an entry is found @index is updated to point to the next
6909 * possible entry independent whether the found entry is occupying a
6910 * single index or a range if indices.
6912 * Return: The entry at or after the @index or %NULL
6914 void *mt_find(struct maple_tree
*mt
, unsigned long *index
, unsigned long max
)
6916 MA_STATE(mas
, mt
, *index
, *index
);
6918 #ifdef CONFIG_DEBUG_MAPLE_TREE
6919 unsigned long copy
= *index
;
6922 trace_ma_read(__func__
, &mas
);
6929 entry
= mas_state_walk(&mas
);
6930 if (mas_is_start(&mas
))
6933 if (unlikely(xa_is_zero(entry
)))
6939 while (mas_is_active(&mas
) && (mas
.last
< max
)) {
6940 entry
= mas_next_entry(&mas
, max
);
6941 if (likely(entry
&& !xa_is_zero(entry
)))
6945 if (unlikely(xa_is_zero(entry
)))
6949 if (likely(entry
)) {
6950 *index
= mas
.last
+ 1;
6951 #ifdef CONFIG_DEBUG_MAPLE_TREE
6952 if (MT_WARN_ON(mt
, (*index
) && ((*index
) <= copy
)))
6953 pr_err("index not increased! %lx <= %lx\n",
6960 EXPORT_SYMBOL(mt_find
);
6963 * mt_find_after() - Search from the start up until an entry is found.
6964 * @mt: The maple tree
6965 * @index: Pointer which contains the start location of the search
6966 * @max: The maximum value to check
6968 * Same as mt_find() except that it checks @index for 0 before
6969 * searching. If @index == 0, the search is aborted. This covers a wrap
6970 * around of @index to 0 in an iterator loop.
6972 * Return: The entry at or after the @index or %NULL
6974 void *mt_find_after(struct maple_tree
*mt
, unsigned long *index
,
6980 return mt_find(mt
, index
, max
);
6982 EXPORT_SYMBOL(mt_find_after
);
6984 #ifdef CONFIG_DEBUG_MAPLE_TREE
6985 atomic_t maple_tree_tests_run
;
6986 EXPORT_SYMBOL_GPL(maple_tree_tests_run
);
6987 atomic_t maple_tree_tests_passed
;
6988 EXPORT_SYMBOL_GPL(maple_tree_tests_passed
);
6991 extern void kmem_cache_set_non_kernel(struct kmem_cache
*, unsigned int);
6992 void mt_set_non_kernel(unsigned int val
)
6994 kmem_cache_set_non_kernel(maple_node_cache
, val
);
6997 extern void kmem_cache_set_callback(struct kmem_cache
*cachep
,
6998 void (*callback
)(void *));
6999 void mt_set_callback(void (*callback
)(void *))
7001 kmem_cache_set_callback(maple_node_cache
, callback
);
7004 extern void kmem_cache_set_private(struct kmem_cache
*cachep
, void *private);
7005 void mt_set_private(void *private)
7007 kmem_cache_set_private(maple_node_cache
, private);
7010 extern unsigned long kmem_cache_get_alloc(struct kmem_cache
*);
7011 unsigned long mt_get_alloc_size(void)
7013 return kmem_cache_get_alloc(maple_node_cache
);
7016 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache
*);
7017 void mt_zero_nr_tallocated(void)
7019 kmem_cache_zero_nr_tallocated(maple_node_cache
);
7022 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache
*);
7023 unsigned int mt_nr_tallocated(void)
7025 return kmem_cache_nr_tallocated(maple_node_cache
);
7028 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache
*);
7029 unsigned int mt_nr_allocated(void)
7031 return kmem_cache_nr_allocated(maple_node_cache
);
7034 void mt_cache_shrink(void)
7039 * mt_cache_shrink() - For testing, don't use this.
7041 * Certain testcases can trigger an OOM when combined with other memory
7042 * debugging configuration options. This function is used to reduce the
7043 * possibility of an out of memory even due to kmem_cache objects remaining
7044 * around for longer than usual.
7046 void mt_cache_shrink(void)
7048 kmem_cache_shrink(maple_node_cache
);
7051 EXPORT_SYMBOL_GPL(mt_cache_shrink
);
7053 #endif /* not defined __KERNEL__ */
7055 * mas_get_slot() - Get the entry in the maple state node stored at @offset.
7056 * @mas: The maple state
7057 * @offset: The offset into the slot array to fetch.
7059 * Return: The entry stored at @offset.
7061 static inline struct maple_enode
*mas_get_slot(struct ma_state
*mas
,
7062 unsigned char offset
)
7064 return mas_slot(mas
, ma_slots(mas_mn(mas
), mte_node_type(mas
->node
)),
7068 /* Depth first search, post-order */
7069 static void mas_dfs_postorder(struct ma_state
*mas
, unsigned long max
)
7072 struct maple_enode
*p
, *mn
= mas
->node
;
7073 unsigned long p_min
, p_max
;
7075 mas_next_node(mas
, mas_mn(mas
), max
);
7076 if (!mas_is_overflow(mas
))
7079 if (mte_is_root(mn
))
7088 mas_prev_node(mas
, 0);
7089 } while (!mas_is_underflow(mas
));
7096 /* Tree validations */
7097 static void mt_dump_node(const struct maple_tree
*mt
, void *entry
,
7098 unsigned long min
, unsigned long max
, unsigned int depth
,
7099 enum mt_dump_format format
);
7100 static void mt_dump_range(unsigned long min
, unsigned long max
,
7101 unsigned int depth
, enum mt_dump_format format
)
7103 static const char spaces
[] = " ";
7108 pr_info("%.*s%lx: ", depth
* 2, spaces
, min
);
7110 pr_info("%.*s%lx-%lx: ", depth
* 2, spaces
, min
, max
);
7114 pr_info("%.*s%lu: ", depth
* 2, spaces
, min
);
7116 pr_info("%.*s%lu-%lu: ", depth
* 2, spaces
, min
, max
);
7120 static void mt_dump_entry(void *entry
, unsigned long min
, unsigned long max
,
7121 unsigned int depth
, enum mt_dump_format format
)
7123 mt_dump_range(min
, max
, depth
, format
);
7125 if (xa_is_value(entry
))
7126 pr_cont("value %ld (0x%lx) [" PTR_FMT
"]\n", xa_to_value(entry
),
7127 xa_to_value(entry
), entry
);
7128 else if (xa_is_zero(entry
))
7129 pr_cont("zero (%ld)\n", xa_to_internal(entry
));
7130 else if (mt_is_reserved(entry
))
7131 pr_cont("UNKNOWN ENTRY (" PTR_FMT
")\n", entry
);
7133 pr_cont(PTR_FMT
"\n", entry
);
7136 static void mt_dump_range64(const struct maple_tree
*mt
, void *entry
,
7137 unsigned long min
, unsigned long max
, unsigned int depth
,
7138 enum mt_dump_format format
)
7140 struct maple_range_64
*node
= &mte_to_node(entry
)->mr64
;
7141 bool leaf
= mte_is_leaf(entry
);
7142 unsigned long first
= min
;
7145 pr_cont(" contents: ");
7146 for (i
= 0; i
< MAPLE_RANGE64_SLOTS
- 1; i
++) {
7149 pr_cont(PTR_FMT
" %lX ", node
->slot
[i
], node
->pivot
[i
]);
7152 pr_cont(PTR_FMT
" %lu ", node
->slot
[i
], node
->pivot
[i
]);
7155 pr_cont(PTR_FMT
"\n", node
->slot
[i
]);
7156 for (i
= 0; i
< MAPLE_RANGE64_SLOTS
; i
++) {
7157 unsigned long last
= max
;
7159 if (i
< (MAPLE_RANGE64_SLOTS
- 1))
7160 last
= node
->pivot
[i
];
7161 else if (!node
->slot
[i
] && max
!= mt_node_max(entry
))
7163 if (last
== 0 && i
> 0)
7166 mt_dump_entry(mt_slot(mt
, node
->slot
, i
),
7167 first
, last
, depth
+ 1, format
);
7168 else if (node
->slot
[i
])
7169 mt_dump_node(mt
, mt_slot(mt
, node
->slot
, i
),
7170 first
, last
, depth
+ 1, format
);
7177 pr_err("node " PTR_FMT
" last (%lx) > max (%lx) at pivot %d!\n",
7178 node
, last
, max
, i
);
7181 pr_err("node " PTR_FMT
" last (%lu) > max (%lu) at pivot %d!\n",
7182 node
, last
, max
, i
);
7189 static void mt_dump_arange64(const struct maple_tree
*mt
, void *entry
,
7190 unsigned long min
, unsigned long max
, unsigned int depth
,
7191 enum mt_dump_format format
)
7193 struct maple_arange_64
*node
= &mte_to_node(entry
)->ma64
;
7194 unsigned long first
= min
;
7197 pr_cont(" contents: ");
7198 for (i
= 0; i
< MAPLE_ARANGE64_SLOTS
; i
++) {
7201 pr_cont("%lx ", node
->gap
[i
]);
7204 pr_cont("%lu ", node
->gap
[i
]);
7207 pr_cont("| %02X %02X| ", node
->meta
.end
, node
->meta
.gap
);
7208 for (i
= 0; i
< MAPLE_ARANGE64_SLOTS
- 1; i
++) {
7211 pr_cont(PTR_FMT
" %lX ", node
->slot
[i
], node
->pivot
[i
]);
7214 pr_cont(PTR_FMT
" %lu ", node
->slot
[i
], node
->pivot
[i
]);
7217 pr_cont(PTR_FMT
"\n", node
->slot
[i
]);
7218 for (i
= 0; i
< MAPLE_ARANGE64_SLOTS
; i
++) {
7219 unsigned long last
= max
;
7221 if (i
< (MAPLE_ARANGE64_SLOTS
- 1))
7222 last
= node
->pivot
[i
];
7223 else if (!node
->slot
[i
])
7225 if (last
== 0 && i
> 0)
7228 mt_dump_node(mt
, mt_slot(mt
, node
->slot
, i
),
7229 first
, last
, depth
+ 1, format
);
7236 pr_err("node " PTR_FMT
" last (%lx) > max (%lx) at pivot %d!\n",
7237 node
, last
, max
, i
);
7240 pr_err("node " PTR_FMT
" last (%lu) > max (%lu) at pivot %d!\n",
7241 node
, last
, max
, i
);
7248 static void mt_dump_node(const struct maple_tree
*mt
, void *entry
,
7249 unsigned long min
, unsigned long max
, unsigned int depth
,
7250 enum mt_dump_format format
)
7252 struct maple_node
*node
= mte_to_node(entry
);
7253 unsigned int type
= mte_node_type(entry
);
7256 mt_dump_range(min
, max
, depth
, format
);
7258 pr_cont("node " PTR_FMT
" depth %d type %d parent " PTR_FMT
, node
,
7259 depth
, type
, node
? node
->parent
: NULL
);
7263 for (i
= 0; i
< MAPLE_NODE_SLOTS
; i
++) {
7265 pr_cont("OUT OF RANGE: ");
7266 mt_dump_entry(mt_slot(mt
, node
->slot
, i
),
7267 min
+ i
, min
+ i
, depth
, format
);
7271 case maple_range_64
:
7272 mt_dump_range64(mt
, entry
, min
, max
, depth
, format
);
7274 case maple_arange_64
:
7275 mt_dump_arange64(mt
, entry
, min
, max
, depth
, format
);
7279 pr_cont(" UNKNOWN TYPE\n");
7283 void mt_dump(const struct maple_tree
*mt
, enum mt_dump_format format
)
7285 void *entry
= rcu_dereference_check(mt
->ma_root
, mt_locked(mt
));
7287 pr_info("maple_tree(" PTR_FMT
") flags %X, height %u root " PTR_FMT
"\n",
7288 mt
, mt
->ma_flags
, mt_height(mt
), entry
);
7289 if (xa_is_node(entry
))
7290 mt_dump_node(mt
, entry
, 0, mt_node_max(entry
), 0, format
);
7292 mt_dump_entry(entry
, 0, 0, 0, format
);
7294 pr_info("(empty)\n");
7296 EXPORT_SYMBOL_GPL(mt_dump
);
7299 * Calculate the maximum gap in a node and check if that's what is reported in
7300 * the parent (unless root).
7302 static void mas_validate_gaps(struct ma_state
*mas
)
7304 struct maple_enode
*mte
= mas
->node
;
7305 struct maple_node
*p_mn
, *node
= mte_to_node(mte
);
7306 enum maple_type mt
= mte_node_type(mas
->node
);
7307 unsigned long gap
= 0, max_gap
= 0;
7308 unsigned long p_end
, p_start
= mas
->min
;
7309 unsigned char p_slot
, offset
;
7310 unsigned long *gaps
= NULL
;
7311 unsigned long *pivots
= ma_pivots(node
, mt
);
7314 if (ma_is_dense(mt
)) {
7315 for (i
= 0; i
< mt_slot_count(mte
); i
++) {
7316 if (mas_get_slot(mas
, i
)) {
7327 gaps
= ma_gaps(node
, mt
);
7328 for (i
= 0; i
< mt_slot_count(mte
); i
++) {
7329 p_end
= mas_safe_pivot(mas
, pivots
, i
, mt
);
7332 if (!mas_get_slot(mas
, i
))
7333 gap
= p_end
- p_start
+ 1;
7335 void *entry
= mas_get_slot(mas
, i
);
7338 MT_BUG_ON(mas
->tree
, !entry
);
7340 if (gap
> p_end
- p_start
+ 1) {
7341 pr_err(PTR_FMT
"[%u] %lu >= %lu - %lu + 1 (%lu)\n",
7342 mas_mn(mas
), i
, gap
, p_end
, p_start
,
7343 p_end
- p_start
+ 1);
7344 MT_BUG_ON(mas
->tree
, gap
> p_end
- p_start
+ 1);
7351 p_start
= p_end
+ 1;
7352 if (p_end
>= mas
->max
)
7357 if (mt
== maple_arange_64
) {
7358 MT_BUG_ON(mas
->tree
, !gaps
);
7359 offset
= ma_meta_gap(node
);
7361 pr_err("gap offset " PTR_FMT
"[%u] is invalid\n", node
, offset
);
7362 MT_BUG_ON(mas
->tree
, 1);
7365 if (gaps
[offset
] != max_gap
) {
7366 pr_err("gap " PTR_FMT
"[%u] is not the largest gap %lu\n",
7367 node
, offset
, max_gap
);
7368 MT_BUG_ON(mas
->tree
, 1);
7371 for (i
++ ; i
< mt_slot_count(mte
); i
++) {
7373 pr_err("gap " PTR_FMT
"[%u] beyond node limit != 0\n",
7375 MT_BUG_ON(mas
->tree
, 1);
7380 if (mte_is_root(mte
))
7383 p_slot
= mte_parent_slot(mas
->node
);
7384 p_mn
= mte_parent(mte
);
7385 MT_BUG_ON(mas
->tree
, max_gap
> mas
->max
);
7386 if (ma_gaps(p_mn
, mas_parent_type(mas
, mte
))[p_slot
] != max_gap
) {
7387 pr_err("gap " PTR_FMT
"[%u] != %lu\n", p_mn
, p_slot
, max_gap
);
7388 mt_dump(mas
->tree
, mt_dump_hex
);
7389 MT_BUG_ON(mas
->tree
, 1);
7393 static void mas_validate_parent_slot(struct ma_state
*mas
)
7395 struct maple_node
*parent
;
7396 struct maple_enode
*node
;
7397 enum maple_type p_type
;
7398 unsigned char p_slot
;
7402 if (mte_is_root(mas
->node
))
7405 p_slot
= mte_parent_slot(mas
->node
);
7406 p_type
= mas_parent_type(mas
, mas
->node
);
7407 parent
= mte_parent(mas
->node
);
7408 slots
= ma_slots(parent
, p_type
);
7409 MT_BUG_ON(mas
->tree
, mas_mn(mas
) == parent
);
7411 /* Check prev/next parent slot for duplicate node entry */
7413 for (i
= 0; i
< mt_slots
[p_type
]; i
++) {
7414 node
= mas_slot(mas
, slots
, i
);
7416 if (node
!= mas
->node
)
7417 pr_err("parent " PTR_FMT
"[%u] does not have " PTR_FMT
"\n",
7418 parent
, i
, mas_mn(mas
));
7419 MT_BUG_ON(mas
->tree
, node
!= mas
->node
);
7420 } else if (node
== mas
->node
) {
7421 pr_err("Invalid child " PTR_FMT
" at parent " PTR_FMT
"[%u] p_slot %u\n",
7422 mas_mn(mas
), parent
, i
, p_slot
);
7423 MT_BUG_ON(mas
->tree
, node
== mas
->node
);
7428 static void mas_validate_child_slot(struct ma_state
*mas
)
7430 enum maple_type type
= mte_node_type(mas
->node
);
7431 void __rcu
**slots
= ma_slots(mte_to_node(mas
->node
), type
);
7432 unsigned long *pivots
= ma_pivots(mte_to_node(mas
->node
), type
);
7433 struct maple_enode
*child
;
7436 if (mte_is_leaf(mas
->node
))
7439 for (i
= 0; i
< mt_slots
[type
]; i
++) {
7440 child
= mas_slot(mas
, slots
, i
);
7443 pr_err("Non-leaf node lacks child at " PTR_FMT
"[%u]\n",
7445 MT_BUG_ON(mas
->tree
, 1);
7448 if (mte_parent_slot(child
) != i
) {
7449 pr_err("Slot error at " PTR_FMT
"[%u]: child " PTR_FMT
" has pslot %u\n",
7450 mas_mn(mas
), i
, mte_to_node(child
),
7451 mte_parent_slot(child
));
7452 MT_BUG_ON(mas
->tree
, 1);
7455 if (mte_parent(child
) != mte_to_node(mas
->node
)) {
7456 pr_err("child " PTR_FMT
" has parent " PTR_FMT
" not " PTR_FMT
"\n",
7457 mte_to_node(child
), mte_parent(child
),
7458 mte_to_node(mas
->node
));
7459 MT_BUG_ON(mas
->tree
, 1);
7462 if (i
< mt_pivots
[type
] && pivots
[i
] == mas
->max
)
7468 * Validate all pivots are within mas->min and mas->max, check metadata ends
7469 * where the maximum ends and ensure there is no slots or pivots set outside of
7470 * the end of the data.
7472 static void mas_validate_limits(struct ma_state
*mas
)
7475 unsigned long prev_piv
= 0;
7476 enum maple_type type
= mte_node_type(mas
->node
);
7477 void __rcu
**slots
= ma_slots(mte_to_node(mas
->node
), type
);
7478 unsigned long *pivots
= ma_pivots(mas_mn(mas
), type
);
7480 for (i
= 0; i
< mt_slots
[type
]; i
++) {
7483 piv
= mas_safe_pivot(mas
, pivots
, i
, type
);
7485 if (!piv
&& (i
!= 0)) {
7486 pr_err("Missing node limit pivot at " PTR_FMT
"[%u]",
7488 MAS_WARN_ON(mas
, 1);
7491 if (prev_piv
> piv
) {
7492 pr_err(PTR_FMT
"[%u] piv %lu < prev_piv %lu\n",
7493 mas_mn(mas
), i
, piv
, prev_piv
);
7494 MAS_WARN_ON(mas
, piv
< prev_piv
);
7497 if (piv
< mas
->min
) {
7498 pr_err(PTR_FMT
"[%u] %lu < %lu\n", mas_mn(mas
), i
,
7500 MAS_WARN_ON(mas
, piv
< mas
->min
);
7502 if (piv
> mas
->max
) {
7503 pr_err(PTR_FMT
"[%u] %lu > %lu\n", mas_mn(mas
), i
,
7505 MAS_WARN_ON(mas
, piv
> mas
->max
);
7508 if (piv
== mas
->max
)
7512 if (mas_data_end(mas
) != i
) {
7513 pr_err("node" PTR_FMT
": data_end %u != the last slot offset %u\n",
7514 mas_mn(mas
), mas_data_end(mas
), i
);
7515 MT_BUG_ON(mas
->tree
, 1);
7518 for (i
+= 1; i
< mt_slots
[type
]; i
++) {
7519 void *entry
= mas_slot(mas
, slots
, i
);
7521 if (entry
&& (i
!= mt_slots
[type
] - 1)) {
7522 pr_err(PTR_FMT
"[%u] should not have entry " PTR_FMT
"\n",
7523 mas_mn(mas
), i
, entry
);
7524 MT_BUG_ON(mas
->tree
, entry
!= NULL
);
7527 if (i
< mt_pivots
[type
]) {
7528 unsigned long piv
= pivots
[i
];
7533 pr_err(PTR_FMT
"[%u] should not have piv %lu\n",
7534 mas_mn(mas
), i
, piv
);
7535 MAS_WARN_ON(mas
, i
< mt_pivots
[type
] - 1);
7540 static void mt_validate_nulls(struct maple_tree
*mt
)
7542 void *entry
, *last
= (void *)1;
7543 unsigned char offset
= 0;
7545 MA_STATE(mas
, mt
, 0, 0);
7548 if (mas_is_none(&mas
) || (mas_is_ptr(&mas
)))
7551 while (!mte_is_leaf(mas
.node
))
7554 slots
= ma_slots(mte_to_node(mas
.node
), mte_node_type(mas
.node
));
7556 entry
= mas_slot(&mas
, slots
, offset
);
7557 if (!last
&& !entry
) {
7558 pr_err("Sequential nulls end at " PTR_FMT
"[%u]\n",
7559 mas_mn(&mas
), offset
);
7561 MT_BUG_ON(mt
, !last
&& !entry
);
7563 if (offset
== mas_data_end(&mas
)) {
7564 mas_next_node(&mas
, mas_mn(&mas
), ULONG_MAX
);
7565 if (mas_is_overflow(&mas
))
7568 slots
= ma_slots(mte_to_node(mas
.node
),
7569 mte_node_type(mas
.node
));
7574 } while (!mas_is_overflow(&mas
));
7578 * validate a maple tree by checking:
7579 * 1. The limits (pivots are within mas->min to mas->max)
7580 * 2. The gap is correctly set in the parents
7582 void mt_validate(struct maple_tree
*mt
)
7583 __must_hold(mas
->tree
->ma_lock
)
7587 MA_STATE(mas
, mt
, 0, 0);
7589 if (!mas_is_active(&mas
))
7592 while (!mte_is_leaf(mas
.node
))
7595 while (!mas_is_overflow(&mas
)) {
7596 MAS_WARN_ON(&mas
, mte_dead_node(mas
.node
));
7597 end
= mas_data_end(&mas
);
7598 if (MAS_WARN_ON(&mas
, (end
< mt_min_slot_count(mas
.node
)) &&
7599 (mas
.max
!= ULONG_MAX
))) {
7600 pr_err("Invalid size %u of " PTR_FMT
"\n",
7604 mas_validate_parent_slot(&mas
);
7605 mas_validate_limits(&mas
);
7606 mas_validate_child_slot(&mas
);
7607 if (mt_is_alloc(mt
))
7608 mas_validate_gaps(&mas
);
7609 mas_dfs_postorder(&mas
, ULONG_MAX
);
7611 mt_validate_nulls(mt
);
7613 EXPORT_SYMBOL_GPL(mt_validate
);
7615 void mas_dump(const struct ma_state
*mas
)
7617 pr_err("MAS: tree=" PTR_FMT
" enode=" PTR_FMT
" ",
7618 mas
->tree
, mas
->node
);
7619 switch (mas
->status
) {
7621 pr_err("(ma_active)");
7624 pr_err("(ma_none)");
7627 pr_err("(ma_root)");
7630 pr_err("(ma_start) ");
7633 pr_err("(ma_pause) ");
7636 pr_err("(ma_overflow) ");
7639 pr_err("(ma_underflow) ");
7642 pr_err("(ma_error) ");
7646 pr_err("Store Type: ");
7647 switch (mas
->store_type
) {
7649 pr_err("invalid store type\n");
7652 pr_err("new_root\n");
7655 pr_err("store_root\n");
7658 pr_err("exact_fit\n");
7660 case wr_split_store
:
7661 pr_err("split_store\n");
7664 pr_err("slot_store\n");
7670 pr_err("node_store\n");
7672 case wr_spanning_store
:
7673 pr_err("spanning_store\n");
7676 pr_err("rebalance\n");
7680 pr_err("[%u/%u] index=%lx last=%lx\n", mas
->offset
, mas
->end
,
7681 mas
->index
, mas
->last
);
7682 pr_err(" min=%lx max=%lx alloc=" PTR_FMT
", depth=%u, flags=%x\n",
7683 mas
->min
, mas
->max
, mas
->alloc
, mas
->depth
, mas
->mas_flags
);
7684 if (mas
->index
> mas
->last
)
7685 pr_err("Check index & last\n");
7687 EXPORT_SYMBOL_GPL(mas_dump
);
7689 void mas_wr_dump(const struct ma_wr_state
*wr_mas
)
7691 pr_err("WR_MAS: node=" PTR_FMT
" r_min=%lx r_max=%lx\n",
7692 wr_mas
->node
, wr_mas
->r_min
, wr_mas
->r_max
);
7693 pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
7694 wr_mas
->type
, wr_mas
->offset_end
, wr_mas
->mas
->end
,
7697 EXPORT_SYMBOL_GPL(mas_wr_dump
);
7699 #endif /* CONFIG_DEBUG_MAPLE_TREE */