1 // SPDX-License-Identifier: GPL-2.0+
3 * XArray implementation
4 * Copyright (c) 2017 Microsoft Corporation
5 * Author: Matthew Wilcox <willy@infradead.org>
8 #include <linux/bitmap.h>
9 #include <linux/export.h>
10 #include <linux/list.h>
11 #include <linux/slab.h>
12 #include <linux/xarray.h>
15 * Coding conventions in this file:
17 * @xa is used to refer to the entire xarray.
18 * @xas is the 'xarray operation state'. It may be either a pointer to
19 * an xa_state, or an xa_state stored on the stack. This is an unfortunate
21 * @index is the index of the entry being operated on
22 * @mark is an xa_mark_t; a small number indicating one of the mark bits.
23 * @node refers to an xa_node; usually the primary one being operated on by
25 * @offset is the index into the slots array inside an xa_node.
26 * @parent refers to the @xa_node closer to the head than @node.
27 * @entry refers to something stored in a slot in the xarray
30 static inline unsigned int xa_lock_type(const struct xarray
*xa
)
32 return (__force
unsigned int)xa
->xa_flags
& 3;
35 static inline void xas_lock_type(struct xa_state
*xas
, unsigned int lock_type
)
37 if (lock_type
== XA_LOCK_IRQ
)
39 else if (lock_type
== XA_LOCK_BH
)
45 static inline void xas_unlock_type(struct xa_state
*xas
, unsigned int lock_type
)
47 if (lock_type
== XA_LOCK_IRQ
)
49 else if (lock_type
== XA_LOCK_BH
)
55 static inline bool xa_track_free(const struct xarray
*xa
)
57 return xa
->xa_flags
& XA_FLAGS_TRACK_FREE
;
60 static inline void xa_mark_set(struct xarray
*xa
, xa_mark_t mark
)
62 if (!(xa
->xa_flags
& XA_FLAGS_MARK(mark
)))
63 xa
->xa_flags
|= XA_FLAGS_MARK(mark
);
66 static inline void xa_mark_clear(struct xarray
*xa
, xa_mark_t mark
)
68 if (xa
->xa_flags
& XA_FLAGS_MARK(mark
))
69 xa
->xa_flags
&= ~(XA_FLAGS_MARK(mark
));
72 static inline unsigned long *node_marks(struct xa_node
*node
, xa_mark_t mark
)
74 return node
->marks
[(__force
unsigned)mark
];
77 static inline bool node_get_mark(struct xa_node
*node
,
78 unsigned int offset
, xa_mark_t mark
)
80 return test_bit(offset
, node_marks(node
, mark
));
83 /* returns true if the bit was set */
84 static inline bool node_set_mark(struct xa_node
*node
, unsigned int offset
,
87 return __test_and_set_bit(offset
, node_marks(node
, mark
));
90 /* returns true if the bit was set */
91 static inline bool node_clear_mark(struct xa_node
*node
, unsigned int offset
,
94 return __test_and_clear_bit(offset
, node_marks(node
, mark
));
97 static inline bool node_any_mark(struct xa_node
*node
, xa_mark_t mark
)
99 return !bitmap_empty(node_marks(node
, mark
), XA_CHUNK_SIZE
);
102 static inline void node_mark_all(struct xa_node
*node
, xa_mark_t mark
)
104 bitmap_fill(node_marks(node
, mark
), XA_CHUNK_SIZE
);
107 #define mark_inc(mark) do { \
108 mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
112 * xas_squash_marks() - Merge all marks to the first entry
113 * @xas: Array operation state.
115 * Set a mark on the first entry if any entry has it set. Clear marks on
116 * all sibling entries.
118 static void xas_squash_marks(const struct xa_state
*xas
)
120 unsigned int mark
= 0;
121 unsigned int limit
= xas
->xa_offset
+ xas
->xa_sibs
+ 1;
127 unsigned long *marks
= xas
->xa_node
->marks
[mark
];
128 if (find_next_bit(marks
, limit
, xas
->xa_offset
+ 1) == limit
)
130 __set_bit(xas
->xa_offset
, marks
);
131 bitmap_clear(marks
, xas
->xa_offset
+ 1, xas
->xa_sibs
);
132 } while (mark
++ != (__force
unsigned)XA_MARK_MAX
);
135 /* extracts the offset within this node from the index */
136 static unsigned int get_offset(unsigned long index
, struct xa_node
*node
)
138 return (index
>> node
->shift
) & XA_CHUNK_MASK
;
141 static void xas_set_offset(struct xa_state
*xas
)
143 xas
->xa_offset
= get_offset(xas
->xa_index
, xas
->xa_node
);
146 /* move the index either forwards (find) or backwards (sibling slot) */
147 static void xas_move_index(struct xa_state
*xas
, unsigned long offset
)
149 unsigned int shift
= xas
->xa_node
->shift
;
150 xas
->xa_index
&= ~XA_CHUNK_MASK
<< shift
;
151 xas
->xa_index
+= offset
<< shift
;
154 static void xas_advance(struct xa_state
*xas
)
157 xas_move_index(xas
, xas
->xa_offset
);
160 static void *set_bounds(struct xa_state
*xas
)
162 xas
->xa_node
= XAS_BOUNDS
;
167 * Starts a walk. If the @xas is already valid, we assume that it's on
168 * the right path and just return where we've got to. If we're in an
169 * error state, return NULL. If the index is outside the current scope
170 * of the xarray, return NULL without changing @xas->xa_node. Otherwise
171 * set @xas->xa_node to NULL and return the current head of the array.
173 static void *xas_start(struct xa_state
*xas
)
178 return xas_reload(xas
);
182 entry
= xa_head(xas
->xa
);
183 if (!xa_is_node(entry
)) {
185 return set_bounds(xas
);
187 if ((xas
->xa_index
>> xa_to_node(entry
)->shift
) > XA_CHUNK_MASK
)
188 return set_bounds(xas
);
195 static void *xas_descend(struct xa_state
*xas
, struct xa_node
*node
)
197 unsigned int offset
= get_offset(xas
->xa_index
, node
);
198 void *entry
= xa_entry(xas
->xa
, node
, offset
);
201 if (xa_is_sibling(entry
)) {
202 offset
= xa_to_sibling(entry
);
203 entry
= xa_entry(xas
->xa
, node
, offset
);
206 xas
->xa_offset
= offset
;
211 * xas_load() - Load an entry from the XArray (advanced).
212 * @xas: XArray operation state.
214 * Usually walks the @xas to the appropriate state to load the entry
215 * stored at xa_index. However, it will do nothing and return %NULL if
216 * @xas is in an error state. xas_load() will never expand the tree.
218 * If the xa_state is set up to operate on a multi-index entry, xas_load()
219 * may return %NULL or an internal entry, even if there are entries
220 * present within the range specified by @xas.
222 * Context: Any context. The caller should hold the xa_lock or the RCU lock.
223 * Return: Usually an entry in the XArray, but see description for exceptions.
225 void *xas_load(struct xa_state
*xas
)
227 void *entry
= xas_start(xas
);
229 while (xa_is_node(entry
)) {
230 struct xa_node
*node
= xa_to_node(entry
);
232 if (xas
->xa_shift
> node
->shift
)
234 entry
= xas_descend(xas
, node
);
238 EXPORT_SYMBOL_GPL(xas_load
);
240 /* Move the radix tree node cache here */
241 extern struct kmem_cache
*radix_tree_node_cachep
;
242 extern void radix_tree_node_rcu_free(struct rcu_head
*head
);
244 #define XA_RCU_FREE ((struct xarray *)1)
246 static void xa_node_free(struct xa_node
*node
)
248 XA_NODE_BUG_ON(node
, !list_empty(&node
->private_list
));
249 node
->array
= XA_RCU_FREE
;
250 call_rcu(&node
->rcu_head
, radix_tree_node_rcu_free
);
254 * xas_destroy() - Free any resources allocated during the XArray operation.
255 * @xas: XArray operation state.
257 * This function is now internal-only.
259 static void xas_destroy(struct xa_state
*xas
)
261 struct xa_node
*node
= xas
->xa_alloc
;
265 XA_NODE_BUG_ON(node
, !list_empty(&node
->private_list
));
266 kmem_cache_free(radix_tree_node_cachep
, node
);
267 xas
->xa_alloc
= NULL
;
271 * xas_nomem() - Allocate memory if needed.
272 * @xas: XArray operation state.
273 * @gfp: Memory allocation flags.
275 * If we need to add new nodes to the XArray, we try to allocate memory
276 * with GFP_NOWAIT while holding the lock, which will usually succeed.
277 * If it fails, @xas is flagged as needing memory to continue. The caller
278 * should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
279 * the caller should retry the operation.
281 * Forward progress is guaranteed as one node is allocated here and
282 * stored in the xa_state where it will be found by xas_alloc(). More
283 * nodes will likely be found in the slab allocator, but we do not tie
286 * Return: true if memory was needed, and was successfully allocated.
288 bool xas_nomem(struct xa_state
*xas
, gfp_t gfp
)
290 if (xas
->xa_node
!= XA_ERROR(-ENOMEM
)) {
294 xas
->xa_alloc
= kmem_cache_alloc(radix_tree_node_cachep
, gfp
);
297 XA_NODE_BUG_ON(xas
->xa_alloc
, !list_empty(&xas
->xa_alloc
->private_list
));
298 xas
->xa_node
= XAS_RESTART
;
301 EXPORT_SYMBOL_GPL(xas_nomem
);
304 * __xas_nomem() - Drop locks and allocate memory if needed.
305 * @xas: XArray operation state.
306 * @gfp: Memory allocation flags.
308 * Internal variant of xas_nomem().
310 * Return: true if memory was needed, and was successfully allocated.
312 static bool __xas_nomem(struct xa_state
*xas
, gfp_t gfp
)
313 __must_hold(xas
->xa
->xa_lock
)
315 unsigned int lock_type
= xa_lock_type(xas
->xa
);
317 if (xas
->xa_node
!= XA_ERROR(-ENOMEM
)) {
321 if (gfpflags_allow_blocking(gfp
)) {
322 xas_unlock_type(xas
, lock_type
);
323 xas
->xa_alloc
= kmem_cache_alloc(radix_tree_node_cachep
, gfp
);
324 xas_lock_type(xas
, lock_type
);
326 xas
->xa_alloc
= kmem_cache_alloc(radix_tree_node_cachep
, gfp
);
330 XA_NODE_BUG_ON(xas
->xa_alloc
, !list_empty(&xas
->xa_alloc
->private_list
));
331 xas
->xa_node
= XAS_RESTART
;
335 static void xas_update(struct xa_state
*xas
, struct xa_node
*node
)
338 xas
->xa_update(node
);
340 XA_NODE_BUG_ON(node
, !list_empty(&node
->private_list
));
343 static void *xas_alloc(struct xa_state
*xas
, unsigned int shift
)
345 struct xa_node
*parent
= xas
->xa_node
;
346 struct xa_node
*node
= xas
->xa_alloc
;
348 if (xas_invalid(xas
))
352 xas
->xa_alloc
= NULL
;
354 node
= kmem_cache_alloc(radix_tree_node_cachep
,
355 GFP_NOWAIT
| __GFP_NOWARN
);
357 xas_set_err(xas
, -ENOMEM
);
363 node
->offset
= xas
->xa_offset
;
365 XA_NODE_BUG_ON(node
, parent
->count
> XA_CHUNK_SIZE
);
366 xas_update(xas
, parent
);
368 XA_NODE_BUG_ON(node
, shift
> BITS_PER_LONG
);
369 XA_NODE_BUG_ON(node
, !list_empty(&node
->private_list
));
373 RCU_INIT_POINTER(node
->parent
, xas
->xa_node
);
374 node
->array
= xas
->xa
;
380 * Use this to calculate the maximum index that will need to be created
381 * in order to add the entry described by @xas. Because we cannot store a
382 * multiple-index entry at index 0, the calculation is a little more complex
383 * than you might expect.
385 static unsigned long xas_max(struct xa_state
*xas
)
387 unsigned long max
= xas
->xa_index
;
389 #ifdef CONFIG_XARRAY_MULTI
390 if (xas
->xa_shift
|| xas
->xa_sibs
) {
392 mask
= (((xas
->xa_sibs
+ 1UL) << xas
->xa_shift
) - 1);
402 /* The maximum index that can be contained in the array without expanding it */
403 static unsigned long max_index(void *entry
)
405 if (!xa_is_node(entry
))
407 return (XA_CHUNK_SIZE
<< xa_to_node(entry
)->shift
) - 1;
410 static void xas_shrink(struct xa_state
*xas
)
412 struct xarray
*xa
= xas
->xa
;
413 struct xa_node
*node
= xas
->xa_node
;
418 XA_NODE_BUG_ON(node
, node
->count
> XA_CHUNK_SIZE
);
419 if (node
->count
!= 1)
421 entry
= xa_entry_locked(xa
, node
, 0);
424 if (!xa_is_node(entry
) && node
->shift
)
426 xas
->xa_node
= XAS_BOUNDS
;
428 RCU_INIT_POINTER(xa
->xa_head
, entry
);
429 if (xa_track_free(xa
) && !node_get_mark(node
, 0, XA_FREE_MARK
))
430 xa_mark_clear(xa
, XA_FREE_MARK
);
434 if (!xa_is_node(entry
))
435 RCU_INIT_POINTER(node
->slots
[0], XA_RETRY_ENTRY
);
436 xas_update(xas
, node
);
438 if (!xa_is_node(entry
))
440 node
= xa_to_node(entry
);
446 * xas_delete_node() - Attempt to delete an xa_node
447 * @xas: Array operation state.
449 * Attempts to delete the @xas->xa_node. This will fail if xa->node has
450 * a non-zero reference count.
452 static void xas_delete_node(struct xa_state
*xas
)
454 struct xa_node
*node
= xas
->xa_node
;
457 struct xa_node
*parent
;
459 XA_NODE_BUG_ON(node
, node
->count
> XA_CHUNK_SIZE
);
463 parent
= xa_parent_locked(xas
->xa
, node
);
464 xas
->xa_node
= parent
;
465 xas
->xa_offset
= node
->offset
;
469 xas
->xa
->xa_head
= NULL
;
470 xas
->xa_node
= XAS_BOUNDS
;
474 parent
->slots
[xas
->xa_offset
] = NULL
;
476 XA_NODE_BUG_ON(parent
, parent
->count
> XA_CHUNK_SIZE
);
478 xas_update(xas
, node
);
486 * xas_free_nodes() - Free this node and all nodes that it references
487 * @xas: Array operation state.
490 * This node has been removed from the tree. We must now free it and all
491 * of its subnodes. There may be RCU walkers with references into the tree,
492 * so we must replace all entries with retry markers.
494 static void xas_free_nodes(struct xa_state
*xas
, struct xa_node
*top
)
496 unsigned int offset
= 0;
497 struct xa_node
*node
= top
;
500 void *entry
= xa_entry_locked(xas
->xa
, node
, offset
);
502 if (xa_is_node(entry
)) {
503 node
= xa_to_node(entry
);
508 RCU_INIT_POINTER(node
->slots
[offset
], XA_RETRY_ENTRY
);
510 while (offset
== XA_CHUNK_SIZE
) {
511 struct xa_node
*parent
;
513 parent
= xa_parent_locked(xas
->xa
, node
);
514 offset
= node
->offset
+ 1;
517 xas_update(xas
, node
);
527 * xas_expand adds nodes to the head of the tree until it has reached
528 * sufficient height to be able to contain @xas->xa_index
530 static int xas_expand(struct xa_state
*xas
, void *head
)
532 struct xarray
*xa
= xas
->xa
;
533 struct xa_node
*node
= NULL
;
534 unsigned int shift
= 0;
535 unsigned long max
= xas_max(xas
);
540 while ((max
>> shift
) >= XA_CHUNK_SIZE
)
541 shift
+= XA_CHUNK_SHIFT
;
542 return shift
+ XA_CHUNK_SHIFT
;
543 } else if (xa_is_node(head
)) {
544 node
= xa_to_node(head
);
545 shift
= node
->shift
+ XA_CHUNK_SHIFT
;
549 while (max
> max_index(head
)) {
552 XA_NODE_BUG_ON(node
, shift
> BITS_PER_LONG
);
553 node
= xas_alloc(xas
, shift
);
558 if (xa_is_value(head
))
560 RCU_INIT_POINTER(node
->slots
[0], head
);
562 /* Propagate the aggregated mark info to the new child */
564 if (xa_track_free(xa
) && mark
== XA_FREE_MARK
) {
565 node_mark_all(node
, XA_FREE_MARK
);
566 if (!xa_marked(xa
, XA_FREE_MARK
)) {
567 node_clear_mark(node
, 0, XA_FREE_MARK
);
568 xa_mark_set(xa
, XA_FREE_MARK
);
570 } else if (xa_marked(xa
, mark
)) {
571 node_set_mark(node
, 0, mark
);
573 if (mark
== XA_MARK_MAX
)
579 * Now that the new node is fully initialised, we can add
582 if (xa_is_node(head
)) {
583 xa_to_node(head
)->offset
= 0;
584 rcu_assign_pointer(xa_to_node(head
)->parent
, node
);
586 head
= xa_mk_node(node
);
587 rcu_assign_pointer(xa
->xa_head
, head
);
588 xas_update(xas
, node
);
590 shift
+= XA_CHUNK_SHIFT
;
598 * xas_create() - Create a slot to store an entry in.
599 * @xas: XArray operation state.
601 * Most users will not need to call this function directly, as it is called
602 * by xas_store(). It is useful for doing conditional store operations
603 * (see the xa_cmpxchg() implementation for an example).
605 * Return: If the slot already existed, returns the contents of this slot.
606 * If the slot was newly created, returns NULL. If it failed to create the
607 * slot, returns NULL and indicates the error in @xas.
609 static void *xas_create(struct xa_state
*xas
)
611 struct xarray
*xa
= xas
->xa
;
614 struct xa_node
*node
= xas
->xa_node
;
616 unsigned int order
= xas
->xa_shift
;
619 entry
= xa_head_locked(xa
);
621 shift
= xas_expand(xas
, entry
);
624 entry
= xa_head_locked(xa
);
626 } else if (xas_error(xas
)) {
629 unsigned int offset
= xas
->xa_offset
;
632 entry
= xa_entry_locked(xa
, node
, offset
);
633 slot
= &node
->slots
[offset
];
636 entry
= xa_head_locked(xa
);
640 while (shift
> order
) {
641 shift
-= XA_CHUNK_SHIFT
;
643 node
= xas_alloc(xas
, shift
);
646 if (xa_track_free(xa
))
647 node_mark_all(node
, XA_FREE_MARK
);
648 rcu_assign_pointer(*slot
, xa_mk_node(node
));
649 } else if (xa_is_node(entry
)) {
650 node
= xa_to_node(entry
);
654 entry
= xas_descend(xas
, node
);
655 slot
= &node
->slots
[xas
->xa_offset
];
662 * xas_create_range() - Ensure that stores to this range will succeed
663 * @xas: XArray operation state.
665 * Creates all of the slots in the range covered by @xas. Sets @xas to
666 * create single-index entries and positions it at the beginning of the
667 * range. This is for the benefit of users which have not yet been
668 * converted to use multi-index entries.
670 void xas_create_range(struct xa_state
*xas
)
672 unsigned long index
= xas
->xa_index
;
673 unsigned char shift
= xas
->xa_shift
;
674 unsigned char sibs
= xas
->xa_sibs
;
676 xas
->xa_index
|= ((sibs
+ 1) << shift
) - 1;
677 if (xas_is_node(xas
) && xas
->xa_node
->shift
== xas
->xa_shift
)
678 xas
->xa_offset
|= sibs
;
686 if (xas
->xa_index
<= (index
| XA_CHUNK_MASK
))
688 xas
->xa_index
-= XA_CHUNK_SIZE
;
691 struct xa_node
*node
= xas
->xa_node
;
692 xas
->xa_node
= xa_parent_locked(xas
->xa
, node
);
693 xas
->xa_offset
= node
->offset
- 1;
694 if (node
->offset
!= 0)
700 xas
->xa_shift
= shift
;
702 xas
->xa_index
= index
;
705 xas
->xa_index
= index
;
709 EXPORT_SYMBOL_GPL(xas_create_range
);
711 static void update_node(struct xa_state
*xas
, struct xa_node
*node
,
712 int count
, int values
)
714 if (!node
|| (!count
&& !values
))
717 node
->count
+= count
;
718 node
->nr_values
+= values
;
719 XA_NODE_BUG_ON(node
, node
->count
> XA_CHUNK_SIZE
);
720 XA_NODE_BUG_ON(node
, node
->nr_values
> XA_CHUNK_SIZE
);
721 xas_update(xas
, node
);
723 xas_delete_node(xas
);
727 * xas_store() - Store this entry in the XArray.
728 * @xas: XArray operation state.
731 * If @xas is operating on a multi-index entry, the entry returned by this
732 * function is essentially meaningless (it may be an internal entry or it
733 * may be %NULL, even if there are non-NULL entries at some of the indices
734 * covered by the range). This is not a problem for any current users,
735 * and can be changed if needed.
737 * Return: The old entry at this index.
739 void *xas_store(struct xa_state
*xas
, void *entry
)
741 struct xa_node
*node
;
742 void __rcu
**slot
= &xas
->xa
->xa_head
;
743 unsigned int offset
, max
;
747 bool value
= xa_is_value(entry
);
750 first
= xas_create(xas
);
752 first
= xas_load(xas
);
754 if (xas_invalid(xas
))
757 if (node
&& (xas
->xa_shift
< node
->shift
))
759 if ((first
== entry
) && !xas
->xa_sibs
)
763 offset
= xas
->xa_offset
;
764 max
= xas
->xa_offset
+ xas
->xa_sibs
;
766 slot
= &node
->slots
[offset
];
768 xas_squash_marks(xas
);
775 * Must clear the marks before setting the entry to NULL,
776 * otherwise xas_for_each_marked may find a NULL entry and
777 * stop early. rcu_assign_pointer contains a release barrier
778 * so the mark clearing will appear to happen before the
779 * entry is set to NULL.
781 rcu_assign_pointer(*slot
, entry
);
782 if (xa_is_node(next
))
783 xas_free_nodes(xas
, xa_to_node(next
));
786 count
+= !next
- !entry
;
787 values
+= !xa_is_value(first
) - !value
;
791 if (!xa_is_sibling(entry
))
792 entry
= xa_mk_sibling(xas
->xa_offset
);
794 if (offset
== XA_CHUNK_MASK
)
797 next
= xa_entry_locked(xas
->xa
, node
, ++offset
);
798 if (!xa_is_sibling(next
)) {
799 if (!entry
&& (offset
> max
))
806 update_node(xas
, node
, count
, values
);
809 EXPORT_SYMBOL_GPL(xas_store
);
812 * xas_get_mark() - Returns the state of this mark.
813 * @xas: XArray operation state.
814 * @mark: Mark number.
816 * Return: true if the mark is set, false if the mark is clear or @xas
817 * is in an error state.
819 bool xas_get_mark(const struct xa_state
*xas
, xa_mark_t mark
)
821 if (xas_invalid(xas
))
824 return xa_marked(xas
->xa
, mark
);
825 return node_get_mark(xas
->xa_node
, xas
->xa_offset
, mark
);
827 EXPORT_SYMBOL_GPL(xas_get_mark
);
830 * xas_set_mark() - Sets the mark on this entry and its parents.
831 * @xas: XArray operation state.
832 * @mark: Mark number.
834 * Sets the specified mark on this entry, and walks up the tree setting it
835 * on all the ancestor entries. Does nothing if @xas has not been walked to
836 * an entry, or is in an error state.
838 void xas_set_mark(const struct xa_state
*xas
, xa_mark_t mark
)
840 struct xa_node
*node
= xas
->xa_node
;
841 unsigned int offset
= xas
->xa_offset
;
843 if (xas_invalid(xas
))
847 if (node_set_mark(node
, offset
, mark
))
849 offset
= node
->offset
;
850 node
= xa_parent_locked(xas
->xa
, node
);
853 if (!xa_marked(xas
->xa
, mark
))
854 xa_mark_set(xas
->xa
, mark
);
856 EXPORT_SYMBOL_GPL(xas_set_mark
);
859 * xas_clear_mark() - Clears the mark on this entry and its parents.
860 * @xas: XArray operation state.
861 * @mark: Mark number.
863 * Clears the specified mark on this entry, and walks back to the head
864 * attempting to clear it on all the ancestor entries. Does nothing if
865 * @xas has not been walked to an entry, or is in an error state.
867 void xas_clear_mark(const struct xa_state
*xas
, xa_mark_t mark
)
869 struct xa_node
*node
= xas
->xa_node
;
870 unsigned int offset
= xas
->xa_offset
;
872 if (xas_invalid(xas
))
876 if (!node_clear_mark(node
, offset
, mark
))
878 if (node_any_mark(node
, mark
))
881 offset
= node
->offset
;
882 node
= xa_parent_locked(xas
->xa
, node
);
885 if (xa_marked(xas
->xa
, mark
))
886 xa_mark_clear(xas
->xa
, mark
);
888 EXPORT_SYMBOL_GPL(xas_clear_mark
);
891 * xas_init_marks() - Initialise all marks for the entry
892 * @xas: Array operations state.
894 * Initialise all marks for the entry specified by @xas. If we're tracking
895 * free entries with a mark, we need to set it on all entries. All other
898 * This implementation is not as efficient as it could be; we may walk
899 * up the tree multiple times.
901 void xas_init_marks(const struct xa_state
*xas
)
906 if (xa_track_free(xas
->xa
) && mark
== XA_FREE_MARK
)
907 xas_set_mark(xas
, mark
);
909 xas_clear_mark(xas
, mark
);
910 if (mark
== XA_MARK_MAX
)
915 EXPORT_SYMBOL_GPL(xas_init_marks
);
918 * xas_pause() - Pause a walk to drop a lock.
919 * @xas: XArray operation state.
921 * Some users need to pause a walk and drop the lock they're holding in
922 * order to yield to a higher priority thread or carry out an operation
923 * on an entry. Those users should call this function before they drop
924 * the lock. It resets the @xas to be suitable for the next iteration
925 * of the loop after the user has reacquired the lock. If most entries
926 * found during a walk require you to call xas_pause(), the xa_for_each()
927 * iterator may be more appropriate.
929 * Note that xas_pause() only works for forward iteration. If a user needs
930 * to pause a reverse iteration, we will need a xas_pause_rev().
932 void xas_pause(struct xa_state
*xas
)
934 struct xa_node
*node
= xas
->xa_node
;
936 if (xas_invalid(xas
))
940 unsigned int offset
= xas
->xa_offset
;
941 while (++offset
< XA_CHUNK_SIZE
) {
942 if (!xa_is_sibling(xa_entry(xas
->xa
, node
, offset
)))
945 xas
->xa_index
+= (offset
- xas
->xa_offset
) << node
->shift
;
949 xas
->xa_node
= XAS_RESTART
;
951 EXPORT_SYMBOL_GPL(xas_pause
);
954 * __xas_prev() - Find the previous entry in the XArray.
955 * @xas: XArray operation state.
957 * Helper function for xas_prev() which handles all the complex cases
960 void *__xas_prev(struct xa_state
*xas
)
964 if (!xas_frozen(xas
->xa_node
))
966 if (xas_not_node(xas
->xa_node
))
967 return xas_load(xas
);
969 if (xas
->xa_offset
!= get_offset(xas
->xa_index
, xas
->xa_node
))
972 while (xas
->xa_offset
== 255) {
973 xas
->xa_offset
= xas
->xa_node
->offset
- 1;
974 xas
->xa_node
= xa_parent(xas
->xa
, xas
->xa_node
);
976 return set_bounds(xas
);
980 entry
= xa_entry(xas
->xa
, xas
->xa_node
, xas
->xa_offset
);
981 if (!xa_is_node(entry
))
984 xas
->xa_node
= xa_to_node(entry
);
988 EXPORT_SYMBOL_GPL(__xas_prev
);
991 * __xas_next() - Find the next entry in the XArray.
992 * @xas: XArray operation state.
994 * Helper function for xas_next() which handles all the complex cases
997 void *__xas_next(struct xa_state
*xas
)
1001 if (!xas_frozen(xas
->xa_node
))
1003 if (xas_not_node(xas
->xa_node
))
1004 return xas_load(xas
);
1006 if (xas
->xa_offset
!= get_offset(xas
->xa_index
, xas
->xa_node
))
1009 while (xas
->xa_offset
== XA_CHUNK_SIZE
) {
1010 xas
->xa_offset
= xas
->xa_node
->offset
+ 1;
1011 xas
->xa_node
= xa_parent(xas
->xa
, xas
->xa_node
);
1013 return set_bounds(xas
);
1017 entry
= xa_entry(xas
->xa
, xas
->xa_node
, xas
->xa_offset
);
1018 if (!xa_is_node(entry
))
1021 xas
->xa_node
= xa_to_node(entry
);
1022 xas_set_offset(xas
);
1025 EXPORT_SYMBOL_GPL(__xas_next
);
1028 * xas_find() - Find the next present entry in the XArray.
1029 * @xas: XArray operation state.
1030 * @max: Highest index to return.
1032 * If the @xas has not yet been walked to an entry, return the entry
1033 * which has an index >= xas.xa_index. If it has been walked, the entry
1034 * currently being pointed at has been processed, and so we move to the
1037 * If no entry is found and the array is smaller than @max, the iterator
1038 * is set to the smallest index not yet in the array. This allows @xas
1039 * to be immediately passed to xas_store().
1041 * Return: The entry, if found, otherwise %NULL.
1043 void *xas_find(struct xa_state
*xas
, unsigned long max
)
1050 if (!xas
->xa_node
) {
1052 return set_bounds(xas
);
1053 } else if (xas_top(xas
->xa_node
)) {
1054 entry
= xas_load(xas
);
1055 if (entry
|| xas_not_node(xas
->xa_node
))
1057 } else if (!xas
->xa_node
->shift
&&
1058 xas
->xa_offset
!= (xas
->xa_index
& XA_CHUNK_MASK
)) {
1059 xas
->xa_offset
= ((xas
->xa_index
- 1) & XA_CHUNK_MASK
) + 1;
1064 while (xas
->xa_node
&& (xas
->xa_index
<= max
)) {
1065 if (unlikely(xas
->xa_offset
== XA_CHUNK_SIZE
)) {
1066 xas
->xa_offset
= xas
->xa_node
->offset
+ 1;
1067 xas
->xa_node
= xa_parent(xas
->xa
, xas
->xa_node
);
1071 entry
= xa_entry(xas
->xa
, xas
->xa_node
, xas
->xa_offset
);
1072 if (xa_is_node(entry
)) {
1073 xas
->xa_node
= xa_to_node(entry
);
1077 if (entry
&& !xa_is_sibling(entry
))
1084 xas
->xa_node
= XAS_BOUNDS
;
1087 EXPORT_SYMBOL_GPL(xas_find
);
1090 * xas_find_marked() - Find the next marked entry in the XArray.
1091 * @xas: XArray operation state.
1092 * @max: Highest index to return.
1093 * @mark: Mark number to search for.
1095 * If the @xas has not yet been walked to an entry, return the marked entry
1096 * which has an index >= xas.xa_index. If it has been walked, the entry
1097 * currently being pointed at has been processed, and so we return the
1098 * first marked entry with an index > xas.xa_index.
1100 * If no marked entry is found and the array is smaller than @max, @xas is
1101 * set to the bounds state and xas->xa_index is set to the smallest index
1102 * not yet in the array. This allows @xas to be immediately passed to
1105 * If no entry is found before @max is reached, @xas is set to the restart
1108 * Return: The entry, if found, otherwise %NULL.
1110 void *xas_find_marked(struct xa_state
*xas
, unsigned long max
, xa_mark_t mark
)
1112 bool advance
= true;
1113 unsigned int offset
;
1119 if (!xas
->xa_node
) {
1122 } else if (xas_top(xas
->xa_node
)) {
1124 entry
= xa_head(xas
->xa
);
1125 xas
->xa_node
= NULL
;
1126 if (xas
->xa_index
> max_index(entry
))
1128 if (!xa_is_node(entry
)) {
1129 if (xa_marked(xas
->xa
, mark
))
1134 xas
->xa_node
= xa_to_node(entry
);
1135 xas
->xa_offset
= xas
->xa_index
>> xas
->xa_node
->shift
;
1138 while (xas
->xa_index
<= max
) {
1139 if (unlikely(xas
->xa_offset
== XA_CHUNK_SIZE
)) {
1140 xas
->xa_offset
= xas
->xa_node
->offset
+ 1;
1141 xas
->xa_node
= xa_parent(xas
->xa
, xas
->xa_node
);
1149 entry
= xa_entry(xas
->xa
, xas
->xa_node
, xas
->xa_offset
);
1150 if (xa_is_sibling(entry
)) {
1151 xas
->xa_offset
= xa_to_sibling(entry
);
1152 xas_move_index(xas
, xas
->xa_offset
);
1156 offset
= xas_find_chunk(xas
, advance
, mark
);
1157 if (offset
> xas
->xa_offset
) {
1159 xas_move_index(xas
, offset
);
1161 if ((xas
->xa_index
- 1) >= max
)
1163 xas
->xa_offset
= offset
;
1164 if (offset
== XA_CHUNK_SIZE
)
1168 entry
= xa_entry(xas
->xa
, xas
->xa_node
, xas
->xa_offset
);
1169 if (!xa_is_node(entry
))
1171 xas
->xa_node
= xa_to_node(entry
);
1172 xas_set_offset(xas
);
1179 xas
->xa_node
= XAS_BOUNDS
;
1182 xas
->xa_node
= XAS_RESTART
;
1185 EXPORT_SYMBOL_GPL(xas_find_marked
);
1188 * xas_find_conflict() - Find the next present entry in a range.
1189 * @xas: XArray operation state.
1191 * The @xas describes both a range and a position within that range.
1193 * Context: Any context. Expects xa_lock to be held.
1194 * Return: The next entry in the range covered by @xas or %NULL.
1196 void *xas_find_conflict(struct xa_state
*xas
)
1206 if (xas_top(xas
->xa_node
)) {
1207 curr
= xas_start(xas
);
1210 while (xa_is_node(curr
)) {
1211 struct xa_node
*node
= xa_to_node(curr
);
1212 curr
= xas_descend(xas
, node
);
1218 if (xas
->xa_node
->shift
> xas
->xa_shift
)
1222 if (xas
->xa_node
->shift
== xas
->xa_shift
) {
1223 if ((xas
->xa_offset
& xas
->xa_sibs
) == xas
->xa_sibs
)
1225 } else if (xas
->xa_offset
== XA_CHUNK_MASK
) {
1226 xas
->xa_offset
= xas
->xa_node
->offset
;
1227 xas
->xa_node
= xa_parent_locked(xas
->xa
, xas
->xa_node
);
1232 curr
= xa_entry_locked(xas
->xa
, xas
->xa_node
, ++xas
->xa_offset
);
1233 if (xa_is_sibling(curr
))
1235 while (xa_is_node(curr
)) {
1236 xas
->xa_node
= xa_to_node(curr
);
1238 curr
= xa_entry_locked(xas
->xa
, xas
->xa_node
, 0);
1243 xas
->xa_offset
-= xas
->xa_sibs
;
1246 EXPORT_SYMBOL_GPL(xas_find_conflict
);
1249 * xa_init_flags() - Initialise an empty XArray with flags.
1251 * @flags: XA_FLAG values.
1253 * If you need to initialise an XArray with special flags (eg you need
1254 * to take the lock from interrupt context), use this function instead
1257 * Context: Any context.
1259 void xa_init_flags(struct xarray
*xa
, gfp_t flags
)
1261 unsigned int lock_type
;
1262 static struct lock_class_key xa_lock_irq
;
1263 static struct lock_class_key xa_lock_bh
;
1265 spin_lock_init(&xa
->xa_lock
);
1266 xa
->xa_flags
= flags
;
1269 lock_type
= xa_lock_type(xa
);
1270 if (lock_type
== XA_LOCK_IRQ
)
1271 lockdep_set_class(&xa
->xa_lock
, &xa_lock_irq
);
1272 else if (lock_type
== XA_LOCK_BH
)
1273 lockdep_set_class(&xa
->xa_lock
, &xa_lock_bh
);
1275 EXPORT_SYMBOL(xa_init_flags
);
1278 * xa_load() - Load an entry from an XArray.
1280 * @index: index into array.
1282 * Context: Any context. Takes and releases the RCU lock.
1283 * Return: The entry at @index in @xa.
1285 void *xa_load(struct xarray
*xa
, unsigned long index
)
1287 XA_STATE(xas
, xa
, index
);
1292 entry
= xas_load(&xas
);
1293 if (xa_is_zero(entry
))
1295 } while (xas_retry(&xas
, entry
));
1300 EXPORT_SYMBOL(xa_load
);
1302 static void *xas_result(struct xa_state
*xas
, void *curr
)
1304 if (xa_is_zero(curr
))
1306 XA_NODE_BUG_ON(xas
->xa_node
, xa_is_internal(curr
));
1308 curr
= xas
->xa_node
;
1313 * __xa_erase() - Erase this entry from the XArray while locked.
1315 * @index: Index into array.
1317 * If the entry at this index is a multi-index entry then all indices will
1318 * be erased, and the entry will no longer be a multi-index entry.
1319 * This function expects the xa_lock to be held on entry.
1321 * Context: Any context. Expects xa_lock to be held on entry. May
1322 * release and reacquire xa_lock if @gfp flags permit.
1323 * Return: The old entry at this index.
1325 void *__xa_erase(struct xarray
*xa
, unsigned long index
)
1327 XA_STATE(xas
, xa
, index
);
1328 return xas_result(&xas
, xas_store(&xas
, NULL
));
1330 EXPORT_SYMBOL_GPL(__xa_erase
);
1333 * xa_store() - Store this entry in the XArray.
1335 * @index: Index into array.
1336 * @entry: New entry.
1337 * @gfp: Memory allocation flags.
1339 * After this function returns, loads from this index will return @entry.
1340 * Storing into an existing multislot entry updates the entry of every index.
1341 * The marks associated with @index are unaffected unless @entry is %NULL.
1343 * Context: Process context. Takes and releases the xa_lock. May sleep
1344 * if the @gfp flags permit.
1345 * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1346 * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1349 void *xa_store(struct xarray
*xa
, unsigned long index
, void *entry
, gfp_t gfp
)
1351 XA_STATE(xas
, xa
, index
);
1354 if (WARN_ON_ONCE(xa_is_internal(entry
)))
1355 return XA_ERROR(-EINVAL
);
1359 curr
= xas_store(&xas
, entry
);
1360 if (xa_track_free(xa
) && entry
)
1361 xas_clear_mark(&xas
, XA_FREE_MARK
);
1363 } while (xas_nomem(&xas
, gfp
));
1365 return xas_result(&xas
, curr
);
1367 EXPORT_SYMBOL(xa_store
);
1370 * __xa_store() - Store this entry in the XArray.
1372 * @index: Index into array.
1373 * @entry: New entry.
1374 * @gfp: Memory allocation flags.
1376 * You must already be holding the xa_lock when calling this function.
1377 * It will drop the lock if needed to allocate memory, and then reacquire
1380 * Context: Any context. Expects xa_lock to be held on entry. May
1381 * release and reacquire xa_lock if @gfp flags permit.
1382 * Return: The old entry at this index or xa_err() if an error happened.
1384 void *__xa_store(struct xarray
*xa
, unsigned long index
, void *entry
, gfp_t gfp
)
1386 XA_STATE(xas
, xa
, index
);
1389 if (WARN_ON_ONCE(xa_is_internal(entry
)))
1390 return XA_ERROR(-EINVAL
);
1393 curr
= xas_store(&xas
, entry
);
1394 if (xa_track_free(xa
) && entry
)
1395 xas_clear_mark(&xas
, XA_FREE_MARK
);
1396 } while (__xas_nomem(&xas
, gfp
));
1398 return xas_result(&xas
, curr
);
1400 EXPORT_SYMBOL(__xa_store
);
1403 * xa_cmpxchg() - Conditionally replace an entry in the XArray.
1405 * @index: Index into array.
1406 * @old: Old value to test against.
1407 * @entry: New value to place in array.
1408 * @gfp: Memory allocation flags.
1410 * If the entry at @index is the same as @old, replace it with @entry.
1411 * If the return value is equal to @old, then the exchange was successful.
1413 * Context: Process context. Takes and releases the xa_lock. May sleep
1414 * if the @gfp flags permit.
1415 * Return: The old value at this index or xa_err() if an error happened.
1417 void *xa_cmpxchg(struct xarray
*xa
, unsigned long index
,
1418 void *old
, void *entry
, gfp_t gfp
)
1420 XA_STATE(xas
, xa
, index
);
1423 if (WARN_ON_ONCE(xa_is_internal(entry
)))
1424 return XA_ERROR(-EINVAL
);
1428 curr
= xas_load(&xas
);
1429 if (curr
== XA_ZERO_ENTRY
)
1432 xas_store(&xas
, entry
);
1433 if (xa_track_free(xa
) && entry
)
1434 xas_clear_mark(&xas
, XA_FREE_MARK
);
1437 } while (xas_nomem(&xas
, gfp
));
1439 return xas_result(&xas
, curr
);
1441 EXPORT_SYMBOL(xa_cmpxchg
);
1444 * __xa_cmpxchg() - Store this entry in the XArray.
1446 * @index: Index into array.
1447 * @old: Old value to test against.
1448 * @entry: New entry.
1449 * @gfp: Memory allocation flags.
1451 * You must already be holding the xa_lock when calling this function.
1452 * It will drop the lock if needed to allocate memory, and then reacquire
1455 * Context: Any context. Expects xa_lock to be held on entry. May
1456 * release and reacquire xa_lock if @gfp flags permit.
1457 * Return: The old entry at this index or xa_err() if an error happened.
1459 void *__xa_cmpxchg(struct xarray
*xa
, unsigned long index
,
1460 void *old
, void *entry
, gfp_t gfp
)
1462 XA_STATE(xas
, xa
, index
);
1465 if (WARN_ON_ONCE(xa_is_internal(entry
)))
1466 return XA_ERROR(-EINVAL
);
1469 curr
= xas_load(&xas
);
1470 if (curr
== XA_ZERO_ENTRY
)
1473 xas_store(&xas
, entry
);
1474 if (xa_track_free(xa
) && entry
)
1475 xas_clear_mark(&xas
, XA_FREE_MARK
);
1477 } while (__xas_nomem(&xas
, gfp
));
1479 return xas_result(&xas
, curr
);
1481 EXPORT_SYMBOL(__xa_cmpxchg
);
1484 * xa_reserve() - Reserve this index in the XArray.
1486 * @index: Index into array.
1487 * @gfp: Memory allocation flags.
1489 * Ensures there is somewhere to store an entry at @index in the array.
1490 * If there is already something stored at @index, this function does
1491 * nothing. If there was nothing there, the entry is marked as reserved.
1492 * Loads from @index will continue to see a %NULL pointer until a
1493 * subsequent store to @index.
1495 * If you do not use the entry that you have reserved, call xa_release()
1496 * or xa_erase() to free any unnecessary memory.
1498 * Context: Process context. Takes and releases the xa_lock, IRQ or BH safe
1499 * if specified in XArray flags. May sleep if the @gfp flags permit.
1500 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1502 int xa_reserve(struct xarray
*xa
, unsigned long index
, gfp_t gfp
)
1504 XA_STATE(xas
, xa
, index
);
1505 unsigned int lock_type
= xa_lock_type(xa
);
1509 xas_lock_type(&xas
, lock_type
);
1510 curr
= xas_load(&xas
);
1512 xas_store(&xas
, XA_ZERO_ENTRY
);
1513 xas_unlock_type(&xas
, lock_type
);
1514 } while (xas_nomem(&xas
, gfp
));
1516 return xas_error(&xas
);
1518 EXPORT_SYMBOL(xa_reserve
);
1521 * __xa_alloc() - Find somewhere to store this entry in the XArray.
1523 * @id: Pointer to ID.
1524 * @max: Maximum ID to allocate (inclusive).
1525 * @entry: New entry.
1526 * @gfp: Memory allocation flags.
1528 * Allocates an unused ID in the range specified by @id and @max.
1529 * Updates the @id pointer with the index, then stores the entry at that
1530 * index. A concurrent lookup will not see an uninitialised @id.
1532 * Context: Any context. Expects xa_lock to be held on entry. May
1533 * release and reacquire xa_lock if @gfp flags permit.
1534 * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
1535 * there is no more space in the XArray.
1537 int __xa_alloc(struct xarray
*xa
, u32
*id
, u32 max
, void *entry
, gfp_t gfp
)
1539 XA_STATE(xas
, xa
, 0);
1542 if (WARN_ON_ONCE(xa_is_internal(entry
)))
1544 if (WARN_ON_ONCE(!xa_track_free(xa
)))
1548 entry
= XA_ZERO_ENTRY
;
1552 xas_find_marked(&xas
, max
, XA_FREE_MARK
);
1553 if (xas
.xa_node
== XAS_RESTART
)
1554 xas_set_err(&xas
, -ENOSPC
);
1555 xas_store(&xas
, entry
);
1556 xas_clear_mark(&xas
, XA_FREE_MARK
);
1557 } while (__xas_nomem(&xas
, gfp
));
1559 err
= xas_error(&xas
);
1564 EXPORT_SYMBOL(__xa_alloc
);
1567 * __xa_set_mark() - Set this mark on this entry while locked.
1569 * @index: Index of entry.
1570 * @mark: Mark number.
1572 * Attempting to set a mark on a NULL entry does not succeed.
1574 * Context: Any context. Expects xa_lock to be held on entry.
1576 void __xa_set_mark(struct xarray
*xa
, unsigned long index
, xa_mark_t mark
)
1578 XA_STATE(xas
, xa
, index
);
1579 void *entry
= xas_load(&xas
);
1582 xas_set_mark(&xas
, mark
);
1584 EXPORT_SYMBOL_GPL(__xa_set_mark
);
1587 * __xa_clear_mark() - Clear this mark on this entry while locked.
1589 * @index: Index of entry.
1590 * @mark: Mark number.
1592 * Context: Any context. Expects xa_lock to be held on entry.
1594 void __xa_clear_mark(struct xarray
*xa
, unsigned long index
, xa_mark_t mark
)
1596 XA_STATE(xas
, xa
, index
);
1597 void *entry
= xas_load(&xas
);
1600 xas_clear_mark(&xas
, mark
);
1602 EXPORT_SYMBOL_GPL(__xa_clear_mark
);
1605 * xa_get_mark() - Inquire whether this mark is set on this entry.
1607 * @index: Index of entry.
1608 * @mark: Mark number.
1610 * This function uses the RCU read lock, so the result may be out of date
1611 * by the time it returns. If you need the result to be stable, use a lock.
1613 * Context: Any context. Takes and releases the RCU lock.
1614 * Return: True if the entry at @index has this mark set, false if it doesn't.
1616 bool xa_get_mark(struct xarray
*xa
, unsigned long index
, xa_mark_t mark
)
1618 XA_STATE(xas
, xa
, index
);
1622 entry
= xas_start(&xas
);
1623 while (xas_get_mark(&xas
, mark
)) {
1624 if (!xa_is_node(entry
))
1626 entry
= xas_descend(&xas
, xa_to_node(entry
));
1634 EXPORT_SYMBOL(xa_get_mark
);
1637 * xa_set_mark() - Set this mark on this entry.
1639 * @index: Index of entry.
1640 * @mark: Mark number.
1642 * Attempting to set a mark on a NULL entry does not succeed.
1644 * Context: Process context. Takes and releases the xa_lock.
1646 void xa_set_mark(struct xarray
*xa
, unsigned long index
, xa_mark_t mark
)
1649 __xa_set_mark(xa
, index
, mark
);
1652 EXPORT_SYMBOL(xa_set_mark
);
1655 * xa_clear_mark() - Clear this mark on this entry.
1657 * @index: Index of entry.
1658 * @mark: Mark number.
1660 * Clearing a mark always succeeds.
1662 * Context: Process context. Takes and releases the xa_lock.
1664 void xa_clear_mark(struct xarray
*xa
, unsigned long index
, xa_mark_t mark
)
1667 __xa_clear_mark(xa
, index
, mark
);
1670 EXPORT_SYMBOL(xa_clear_mark
);
1673 * xa_find() - Search the XArray for an entry.
1675 * @indexp: Pointer to an index.
1676 * @max: Maximum index to search to.
1677 * @filter: Selection criterion.
1679 * Finds the entry in @xa which matches the @filter, and has the lowest
1680 * index that is at least @indexp and no more than @max.
1681 * If an entry is found, @indexp is updated to be the index of the entry.
1682 * This function is protected by the RCU read lock, so it may not find
1683 * entries which are being simultaneously added. It will not return an
1684 * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1686 * Context: Any context. Takes and releases the RCU lock.
1687 * Return: The entry, if found, otherwise %NULL.
1689 void *xa_find(struct xarray
*xa
, unsigned long *indexp
,
1690 unsigned long max
, xa_mark_t filter
)
1692 XA_STATE(xas
, xa
, *indexp
);
1697 if ((__force
unsigned int)filter
< XA_MAX_MARKS
)
1698 entry
= xas_find_marked(&xas
, max
, filter
);
1700 entry
= xas_find(&xas
, max
);
1701 } while (xas_retry(&xas
, entry
));
1705 *indexp
= xas
.xa_index
;
1708 EXPORT_SYMBOL(xa_find
);
1711 * xa_find_after() - Search the XArray for a present entry.
1713 * @indexp: Pointer to an index.
1714 * @max: Maximum index to search to.
1715 * @filter: Selection criterion.
1717 * Finds the entry in @xa which matches the @filter and has the lowest
1718 * index that is above @indexp and no more than @max.
1719 * If an entry is found, @indexp is updated to be the index of the entry.
1720 * This function is protected by the RCU read lock, so it may miss entries
1721 * which are being simultaneously added. It will not return an
1722 * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1724 * Context: Any context. Takes and releases the RCU lock.
1725 * Return: The pointer, if found, otherwise %NULL.
1727 void *xa_find_after(struct xarray
*xa
, unsigned long *indexp
,
1728 unsigned long max
, xa_mark_t filter
)
1730 XA_STATE(xas
, xa
, *indexp
+ 1);
1735 if ((__force
unsigned int)filter
< XA_MAX_MARKS
)
1736 entry
= xas_find_marked(&xas
, max
, filter
);
1738 entry
= xas_find(&xas
, max
);
1740 if (xas
.xa_index
& ((1UL << xas
.xa_shift
) - 1))
1743 if (xas
.xa_offset
< (xas
.xa_index
& XA_CHUNK_MASK
))
1746 if (!xas_retry(&xas
, entry
))
1752 *indexp
= xas
.xa_index
;
1755 EXPORT_SYMBOL(xa_find_after
);
1757 static unsigned int xas_extract_present(struct xa_state
*xas
, void **dst
,
1758 unsigned long max
, unsigned int n
)
1764 xas_for_each(xas
, entry
, max
) {
1765 if (xas_retry(xas
, entry
))
1776 static unsigned int xas_extract_marked(struct xa_state
*xas
, void **dst
,
1777 unsigned long max
, unsigned int n
, xa_mark_t mark
)
1783 xas_for_each_marked(xas
, entry
, max
, mark
) {
1784 if (xas_retry(xas
, entry
))
1796 * xa_extract() - Copy selected entries from the XArray into a normal array.
1797 * @xa: The source XArray to copy from.
1798 * @dst: The buffer to copy entries into.
1799 * @start: The first index in the XArray eligible to be selected.
1800 * @max: The last index in the XArray eligible to be selected.
1801 * @n: The maximum number of entries to copy.
1802 * @filter: Selection criterion.
1804 * Copies up to @n entries that match @filter from the XArray. The
1805 * copied entries will have indices between @start and @max, inclusive.
1807 * The @filter may be an XArray mark value, in which case entries which are
1808 * marked with that mark will be copied. It may also be %XA_PRESENT, in
1809 * which case all entries which are not NULL will be copied.
1811 * The entries returned may not represent a snapshot of the XArray at a
1812 * moment in time. For example, if another thread stores to index 5, then
1813 * index 10, calling xa_extract() may return the old contents of index 5
1814 * and the new contents of index 10. Indices not modified while this
1815 * function is running will not be skipped.
1817 * If you need stronger guarantees, holding the xa_lock across calls to this
1818 * function will prevent concurrent modification.
1820 * Context: Any context. Takes and releases the RCU lock.
1821 * Return: The number of entries copied.
1823 unsigned int xa_extract(struct xarray
*xa
, void **dst
, unsigned long start
,
1824 unsigned long max
, unsigned int n
, xa_mark_t filter
)
1826 XA_STATE(xas
, xa
, start
);
1831 if ((__force
unsigned int)filter
< XA_MAX_MARKS
)
1832 return xas_extract_marked(&xas
, dst
, max
, n
, filter
);
1833 return xas_extract_present(&xas
, dst
, max
, n
);
1835 EXPORT_SYMBOL(xa_extract
);
1838 * xa_destroy() - Free all internal data structures.
1841 * After calling this function, the XArray is empty and has freed all memory
1842 * allocated for its internal data structures. You are responsible for
1843 * freeing the objects referenced by the XArray.
1845 * Context: Any context. Takes and releases the xa_lock, interrupt-safe.
1847 void xa_destroy(struct xarray
*xa
)
1849 XA_STATE(xas
, xa
, 0);
1850 unsigned long flags
;
1854 xas_lock_irqsave(&xas
, flags
);
1855 entry
= xa_head_locked(xa
);
1856 RCU_INIT_POINTER(xa
->xa_head
, NULL
);
1857 xas_init_marks(&xas
);
1858 /* lockdep checks we're still holding the lock in xas_free_nodes() */
1859 if (xa_is_node(entry
))
1860 xas_free_nodes(&xas
, xa_to_node(entry
));
1861 xas_unlock_irqrestore(&xas
, flags
);
1863 EXPORT_SYMBOL(xa_destroy
);
1866 void xa_dump_node(const struct xa_node
*node
)
1872 if ((unsigned long)node
& 3) {
1873 pr_cont("node %px\n", node
);
1877 pr_cont("node %px %s %d parent %px shift %d count %d values %d "
1878 "array %px list %px %px marks",
1879 node
, node
->parent
? "offset" : "max", node
->offset
,
1880 node
->parent
, node
->shift
, node
->count
, node
->nr_values
,
1881 node
->array
, node
->private_list
.prev
, node
->private_list
.next
);
1882 for (i
= 0; i
< XA_MAX_MARKS
; i
++)
1883 for (j
= 0; j
< XA_MARK_LONGS
; j
++)
1884 pr_cont(" %lx", node
->marks
[i
][j
]);
1888 void xa_dump_index(unsigned long index
, unsigned int shift
)
1891 pr_info("%lu: ", index
);
1892 else if (shift
>= BITS_PER_LONG
)
1893 pr_info("0-%lu: ", ~0UL);
1895 pr_info("%lu-%lu: ", index
, index
| ((1UL << shift
) - 1));
1898 void xa_dump_entry(const void *entry
, unsigned long index
, unsigned long shift
)
1903 xa_dump_index(index
, shift
);
1905 if (xa_is_node(entry
)) {
1907 pr_cont("%px\n", entry
);
1910 struct xa_node
*node
= xa_to_node(entry
);
1912 for (i
= 0; i
< XA_CHUNK_SIZE
; i
++)
1913 xa_dump_entry(node
->slots
[i
],
1914 index
+ (i
<< node
->shift
), node
->shift
);
1916 } else if (xa_is_value(entry
))
1917 pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry
),
1918 xa_to_value(entry
), entry
);
1919 else if (!xa_is_internal(entry
))
1920 pr_cont("%px\n", entry
);
1921 else if (xa_is_retry(entry
))
1922 pr_cont("retry (%ld)\n", xa_to_internal(entry
));
1923 else if (xa_is_sibling(entry
))
1924 pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry
));
1925 else if (xa_is_zero(entry
))
1926 pr_cont("zero (%ld)\n", xa_to_internal(entry
));
1928 pr_cont("UNKNOWN ENTRY (%px)\n", entry
);
1931 void xa_dump(const struct xarray
*xa
)
1933 void *entry
= xa
->xa_head
;
1934 unsigned int shift
= 0;
1936 pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa
, entry
,
1937 xa
->xa_flags
, xa_marked(xa
, XA_MARK_0
),
1938 xa_marked(xa
, XA_MARK_1
), xa_marked(xa
, XA_MARK_2
));
1939 if (xa_is_node(entry
))
1940 shift
= xa_to_node(entry
)->shift
+ XA_CHUNK_SHIFT
;
1941 xa_dump_entry(entry
, 0, shift
);