1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <linux/kmemleak.h>
7 #include <linux/slab.h>
9 #include "i915_buddy.h"
12 #include "i915_globals.h"
13 #include "i915_utils.h"
15 static struct i915_global_block
{
16 struct i915_global base
;
17 struct kmem_cache
*slab_blocks
;
20 static void i915_global_buddy_shrink(void)
22 kmem_cache_shrink(global
.slab_blocks
);
25 static void i915_global_buddy_exit(void)
27 kmem_cache_destroy(global
.slab_blocks
);
30 static struct i915_global_block global
= { {
31 .shrink
= i915_global_buddy_shrink
,
32 .exit
= i915_global_buddy_exit
,
35 int __init
i915_global_buddy_init(void)
37 global
.slab_blocks
= KMEM_CACHE(i915_buddy_block
, SLAB_HWCACHE_ALIGN
);
38 if (!global
.slab_blocks
)
41 i915_global_register(&global
.base
);
45 static struct i915_buddy_block
*i915_block_alloc(struct i915_buddy_block
*parent
,
49 struct i915_buddy_block
*block
;
51 block
= kmem_cache_zalloc(global
.slab_blocks
, GFP_KERNEL
);
55 block
->header
= offset
;
56 block
->header
|= order
;
57 block
->parent
= parent
;
62 static void i915_block_free(struct i915_buddy_block
*block
)
64 kmem_cache_free(global
.slab_blocks
, block
);
67 static void mark_allocated(struct i915_buddy_block
*block
)
69 block
->header
&= ~I915_BUDDY_HEADER_STATE
;
70 block
->header
|= I915_BUDDY_ALLOCATED
;
72 list_del(&block
->link
);
75 static void mark_free(struct i915_buddy_mm
*mm
,
76 struct i915_buddy_block
*block
)
78 block
->header
&= ~I915_BUDDY_HEADER_STATE
;
79 block
->header
|= I915_BUDDY_FREE
;
81 list_add(&block
->link
,
82 &mm
->free_list
[i915_buddy_block_order(block
)]);
85 static void mark_split(struct i915_buddy_block
*block
)
87 block
->header
&= ~I915_BUDDY_HEADER_STATE
;
88 block
->header
|= I915_BUDDY_SPLIT
;
90 list_del(&block
->link
);
93 int i915_buddy_init(struct i915_buddy_mm
*mm
, u64 size
, u64 chunk_size
)
98 if (size
< chunk_size
)
101 if (chunk_size
< PAGE_SIZE
)
104 if (!is_power_of_2(chunk_size
))
107 size
= round_down(size
, chunk_size
);
110 mm
->chunk_size
= chunk_size
;
111 mm
->max_order
= ilog2(size
) - ilog2(chunk_size
);
113 GEM_BUG_ON(mm
->max_order
> I915_BUDDY_MAX_ORDER
);
115 mm
->free_list
= kmalloc_array(mm
->max_order
+ 1,
116 sizeof(struct list_head
),
121 for (i
= 0; i
<= mm
->max_order
; ++i
)
122 INIT_LIST_HEAD(&mm
->free_list
[i
]);
124 mm
->n_roots
= hweight64(size
);
126 mm
->roots
= kmalloc_array(mm
->n_roots
,
127 sizeof(struct i915_buddy_block
*),
136 * Split into power-of-two blocks, in case we are given a size that is
137 * not itself a power-of-two.
140 struct i915_buddy_block
*root
;
144 root_size
= rounddown_pow_of_two(size
);
145 order
= ilog2(root_size
) - ilog2(chunk_size
);
147 root
= i915_block_alloc(NULL
, order
, offset
);
153 GEM_BUG_ON(i
> mm
->max_order
);
154 GEM_BUG_ON(i915_buddy_block_size(mm
, root
) < chunk_size
);
167 i915_block_free(mm
->roots
[i
]);
170 kfree(mm
->free_list
);
174 void i915_buddy_fini(struct i915_buddy_mm
*mm
)
178 for (i
= 0; i
< mm
->n_roots
; ++i
) {
179 GEM_WARN_ON(!i915_buddy_block_is_free(mm
->roots
[i
]));
180 i915_block_free(mm
->roots
[i
]);
184 kfree(mm
->free_list
);
187 static int split_block(struct i915_buddy_mm
*mm
,
188 struct i915_buddy_block
*block
)
190 unsigned int block_order
= i915_buddy_block_order(block
) - 1;
191 u64 offset
= i915_buddy_block_offset(block
);
193 GEM_BUG_ON(!i915_buddy_block_is_free(block
));
194 GEM_BUG_ON(!i915_buddy_block_order(block
));
196 block
->left
= i915_block_alloc(block
, block_order
, offset
);
200 block
->right
= i915_block_alloc(block
, block_order
,
201 offset
+ (mm
->chunk_size
<< block_order
));
203 i915_block_free(block
->left
);
207 mark_free(mm
, block
->left
);
208 mark_free(mm
, block
->right
);
215 static struct i915_buddy_block
*
216 get_buddy(struct i915_buddy_block
*block
)
218 struct i915_buddy_block
*parent
;
220 parent
= block
->parent
;
224 if (parent
->left
== block
)
225 return parent
->right
;
230 static void __i915_buddy_free(struct i915_buddy_mm
*mm
,
231 struct i915_buddy_block
*block
)
233 struct i915_buddy_block
*parent
;
235 while ((parent
= block
->parent
)) {
236 struct i915_buddy_block
*buddy
;
238 buddy
= get_buddy(block
);
240 if (!i915_buddy_block_is_free(buddy
))
243 list_del(&buddy
->link
);
245 i915_block_free(block
);
246 i915_block_free(buddy
);
251 mark_free(mm
, block
);
254 void i915_buddy_free(struct i915_buddy_mm
*mm
,
255 struct i915_buddy_block
*block
)
257 GEM_BUG_ON(!i915_buddy_block_is_allocated(block
));
258 __i915_buddy_free(mm
, block
);
261 void i915_buddy_free_list(struct i915_buddy_mm
*mm
, struct list_head
*objects
)
263 struct i915_buddy_block
*block
, *on
;
265 list_for_each_entry_safe(block
, on
, objects
, link
) {
266 i915_buddy_free(mm
, block
);
269 INIT_LIST_HEAD(objects
);
273 * Allocate power-of-two block. The order value here translates to:
275 * 0 = 2^0 * mm->chunk_size
276 * 1 = 2^1 * mm->chunk_size
277 * 2 = 2^2 * mm->chunk_size
280 struct i915_buddy_block
*
281 i915_buddy_alloc(struct i915_buddy_mm
*mm
, unsigned int order
)
283 struct i915_buddy_block
*block
= NULL
;
287 for (i
= order
; i
<= mm
->max_order
; ++i
) {
288 block
= list_first_entry_or_null(&mm
->free_list
[i
],
289 struct i915_buddy_block
,
296 return ERR_PTR(-ENOSPC
);
298 GEM_BUG_ON(!i915_buddy_block_is_free(block
));
301 err
= split_block(mm
, block
);
310 mark_allocated(block
);
311 kmemleak_update_trace(block
);
316 __i915_buddy_free(mm
, block
);
320 static inline bool overlaps(u64 s1
, u64 e1
, u64 s2
, u64 e2
)
322 return s1
<= e2
&& e1
>= s2
;
325 static inline bool contains(u64 s1
, u64 e1
, u64 s2
, u64 e2
)
327 return s1
<= s2
&& e1
>= e2
;
331 * Allocate range. Note that it's safe to chain together multiple alloc_ranges
332 * with the same blocks list.
334 * Intended for pre-allocating portions of the address space, for example to
335 * reserve a block for the initial framebuffer or similar, hence the expectation
336 * here is that i915_buddy_alloc() is still the main vehicle for
337 * allocations, so if that's not the case then the drm_mm range allocator is
338 * probably a much better fit, and so you should probably go use that instead.
340 int i915_buddy_alloc_range(struct i915_buddy_mm
*mm
,
341 struct list_head
*blocks
,
344 struct i915_buddy_block
*block
;
345 struct i915_buddy_block
*buddy
;
346 LIST_HEAD(allocated
);
352 if (size
< mm
->chunk_size
)
355 if (!IS_ALIGNED(size
| start
, mm
->chunk_size
))
358 if (range_overflows(start
, size
, mm
->size
))
361 for (i
= 0; i
< mm
->n_roots
; ++i
)
362 list_add_tail(&mm
->roots
[i
]->tmp_link
, &dfs
);
364 end
= start
+ size
- 1;
370 block
= list_first_entry_or_null(&dfs
,
371 struct i915_buddy_block
,
376 list_del(&block
->tmp_link
);
378 block_start
= i915_buddy_block_offset(block
);
379 block_end
= block_start
+ i915_buddy_block_size(mm
, block
) - 1;
381 if (!overlaps(start
, end
, block_start
, block_end
))
384 if (i915_buddy_block_is_allocated(block
)) {
389 if (contains(start
, end
, block_start
, block_end
)) {
390 if (!i915_buddy_block_is_free(block
)) {
395 mark_allocated(block
);
396 list_add_tail(&block
->link
, &allocated
);
400 if (!i915_buddy_block_is_split(block
)) {
401 err
= split_block(mm
, block
);
406 list_add(&block
->right
->tmp_link
, &dfs
);
407 list_add(&block
->left
->tmp_link
, &dfs
);
410 list_splice_tail(&allocated
, blocks
);
415 * We really don't want to leave around a bunch of split blocks, since
416 * bigger is better, so make sure we merge everything back before we
417 * free the allocated blocks.
419 buddy
= get_buddy(block
);
421 (i915_buddy_block_is_free(block
) &&
422 i915_buddy_block_is_free(buddy
)))
423 __i915_buddy_free(mm
, block
);
426 i915_buddy_free_list(mm
, &allocated
);
430 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
431 #include "selftests/i915_buddy.c"