accel/amdxdna: use modern PM helpers
[drm/drm-misc.git] / kernel / bpf / range_tree.c
blob5bdf9aadca3a62f5a4acb572f62edb7f420c4ed0
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3 #include <linux/interval_tree_generic.h>
4 #include <linux/slab.h>
5 #include <linux/bpf_mem_alloc.h>
6 #include <linux/bpf.h>
7 #include "range_tree.h"
9 /*
10 * struct range_tree is a data structure used to allocate contiguous memory
11 * ranges in bpf arena. It's a large bitmap. The contiguous sequence of bits is
12 * represented by struct range_node or 'rn' for short.
13 * rn->rn_rbnode links it into an interval tree while
14 * rn->rb_range_size links it into a second rbtree sorted by size of the range.
15 * __find_range() performs binary search and best fit algorithm to find the
16 * range less or equal requested size.
17 * range_tree_clear/set() clears or sets a range of bits in this bitmap. The
18 * adjacent ranges are merged or split at the same time.
20 * The split/merge logic is based/borrowed from XFS's xbitmap32 added
21 * in commit 6772fcc8890a ("xfs: convert xbitmap to interval tree").
23 * The implementation relies on external lock to protect rbtree-s.
24 * The alloc/free of range_node-s is done via bpf_mem_alloc.
26 * bpf arena is using range_tree to represent unallocated slots.
27 * At init time:
28 * range_tree_set(rt, 0, max);
29 * Then:
30 * start = range_tree_find(rt, len);
31 * if (start >= 0)
32 * range_tree_clear(rt, start, len);
33 * to find free range and mark slots as allocated and later:
34 * range_tree_set(rt, start, len);
35 * to mark as unallocated after use.
37 struct range_node {
38 struct rb_node rn_rbnode;
39 struct rb_node rb_range_size;
40 u32 rn_start;
41 u32 rn_last; /* inclusive */
42 u32 __rn_subtree_last;
45 static struct range_node *rb_to_range_node(struct rb_node *rb)
47 return rb_entry(rb, struct range_node, rb_range_size);
50 static u32 rn_size(struct range_node *rn)
52 return rn->rn_last - rn->rn_start + 1;
55 /* Find range that fits best to requested size */
56 static inline struct range_node *__find_range(struct range_tree *rt, u32 len)
58 struct rb_node *rb = rt->range_size_root.rb_root.rb_node;
59 struct range_node *best = NULL;
61 while (rb) {
62 struct range_node *rn = rb_to_range_node(rb);
64 if (len <= rn_size(rn)) {
65 best = rn;
66 rb = rb->rb_right;
67 } else {
68 rb = rb->rb_left;
72 return best;
75 s64 range_tree_find(struct range_tree *rt, u32 len)
77 struct range_node *rn;
79 rn = __find_range(rt, len);
80 if (!rn)
81 return -ENOENT;
82 return rn->rn_start;
85 /* Insert the range into rbtree sorted by the range size */
86 static inline void __range_size_insert(struct range_node *rn,
87 struct rb_root_cached *root)
89 struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
90 u64 size = rn_size(rn);
91 bool leftmost = true;
93 while (*link) {
94 rb = *link;
95 if (size > rn_size(rb_to_range_node(rb))) {
96 link = &rb->rb_left;
97 } else {
98 link = &rb->rb_right;
99 leftmost = false;
103 rb_link_node(&rn->rb_range_size, rb, link);
104 rb_insert_color_cached(&rn->rb_range_size, root, leftmost);
107 #define START(node) ((node)->rn_start)
108 #define LAST(node) ((node)->rn_last)
110 INTERVAL_TREE_DEFINE(struct range_node, rn_rbnode, u32,
111 __rn_subtree_last, START, LAST,
112 static inline __maybe_unused,
113 __range_it)
115 static inline __maybe_unused void
116 range_it_insert(struct range_node *rn, struct range_tree *rt)
118 __range_size_insert(rn, &rt->range_size_root);
119 __range_it_insert(rn, &rt->it_root);
122 static inline __maybe_unused void
123 range_it_remove(struct range_node *rn, struct range_tree *rt)
125 rb_erase_cached(&rn->rb_range_size, &rt->range_size_root);
126 RB_CLEAR_NODE(&rn->rb_range_size);
127 __range_it_remove(rn, &rt->it_root);
130 static inline __maybe_unused struct range_node *
131 range_it_iter_first(struct range_tree *rt, u32 start, u32 last)
133 return __range_it_iter_first(&rt->it_root, start, last);
136 /* Clear the range in this range tree */
137 int range_tree_clear(struct range_tree *rt, u32 start, u32 len)
139 u32 last = start + len - 1;
140 struct range_node *new_rn;
141 struct range_node *rn;
143 while ((rn = range_it_iter_first(rt, start, last))) {
144 if (rn->rn_start < start && rn->rn_last > last) {
145 u32 old_last = rn->rn_last;
147 /* Overlaps with the entire clearing range */
148 range_it_remove(rn, rt);
149 rn->rn_last = start - 1;
150 range_it_insert(rn, rt);
152 /* Add a range */
153 migrate_disable();
154 new_rn = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
155 migrate_enable();
156 if (!new_rn)
157 return -ENOMEM;
158 new_rn->rn_start = last + 1;
159 new_rn->rn_last = old_last;
160 range_it_insert(new_rn, rt);
161 } else if (rn->rn_start < start) {
162 /* Overlaps with the left side of the clearing range */
163 range_it_remove(rn, rt);
164 rn->rn_last = start - 1;
165 range_it_insert(rn, rt);
166 } else if (rn->rn_last > last) {
167 /* Overlaps with the right side of the clearing range */
168 range_it_remove(rn, rt);
169 rn->rn_start = last + 1;
170 range_it_insert(rn, rt);
171 break;
172 } else {
173 /* in the middle of the clearing range */
174 range_it_remove(rn, rt);
175 migrate_disable();
176 bpf_mem_free(&bpf_global_ma, rn);
177 migrate_enable();
180 return 0;
183 /* Is the whole range set ? */
184 int is_range_tree_set(struct range_tree *rt, u32 start, u32 len)
186 u32 last = start + len - 1;
187 struct range_node *left;
189 /* Is this whole range set ? */
190 left = range_it_iter_first(rt, start, last);
191 if (left && left->rn_start <= start && left->rn_last >= last)
192 return 0;
193 return -ESRCH;
196 /* Set the range in this range tree */
197 int range_tree_set(struct range_tree *rt, u32 start, u32 len)
199 u32 last = start + len - 1;
200 struct range_node *right;
201 struct range_node *left;
202 int err;
204 /* Is this whole range already set ? */
205 left = range_it_iter_first(rt, start, last);
206 if (left && left->rn_start <= start && left->rn_last >= last)
207 return 0;
209 /* Clear out everything in the range we want to set. */
210 err = range_tree_clear(rt, start, len);
211 if (err)
212 return err;
214 /* Do we have a left-adjacent range ? */
215 left = range_it_iter_first(rt, start - 1, start - 1);
216 if (left && left->rn_last + 1 != start)
217 return -EFAULT;
219 /* Do we have a right-adjacent range ? */
220 right = range_it_iter_first(rt, last + 1, last + 1);
221 if (right && right->rn_start != last + 1)
222 return -EFAULT;
224 if (left && right) {
225 /* Combine left and right adjacent ranges */
226 range_it_remove(left, rt);
227 range_it_remove(right, rt);
228 left->rn_last = right->rn_last;
229 range_it_insert(left, rt);
230 migrate_disable();
231 bpf_mem_free(&bpf_global_ma, right);
232 migrate_enable();
233 } else if (left) {
234 /* Combine with the left range */
235 range_it_remove(left, rt);
236 left->rn_last = last;
237 range_it_insert(left, rt);
238 } else if (right) {
239 /* Combine with the right range */
240 range_it_remove(right, rt);
241 right->rn_start = start;
242 range_it_insert(right, rt);
243 } else {
244 migrate_disable();
245 left = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
246 migrate_enable();
247 if (!left)
248 return -ENOMEM;
249 left->rn_start = start;
250 left->rn_last = last;
251 range_it_insert(left, rt);
253 return 0;
256 void range_tree_destroy(struct range_tree *rt)
258 struct range_node *rn;
260 while ((rn = range_it_iter_first(rt, 0, -1U))) {
261 range_it_remove(rn, rt);
262 migrate_disable();
263 bpf_mem_free(&bpf_global_ma, rn);
264 migrate_enable();
268 void range_tree_init(struct range_tree *rt)
270 rt->it_root = RB_ROOT_CACHED;
271 rt->range_size_root = RB_ROOT_CACHED;