1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * Copyright 2016 Intel Corporation
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 **************************************************************************/
31 * Generic simple memory manager implementation. Intended to be used as a base
32 * class implementation for more advanced memory managers.
34 * Note that the algorithm used is quite simple and there might be substantial
35 * performance gains if a smarter free list is implemented. Currently it is
36 * just an unordered stack of free regions. This could easily be improved if
37 * an RB-tree is used instead. At least if we expect heavy fragmentation.
39 * Aligned allocations can also see improvement.
42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
46 #include <drm/drm_mm.h>
47 #include <linux/slab.h>
48 #include <linux/seq_file.h>
49 #include <linux/export.h>
50 #include <linux/interval_tree_generic.h>
55 * drm_mm provides a simple range allocator. The drivers are free to use the
56 * resource allocator from the linux core if it suits them, the upside of drm_mm
57 * is that it's in the DRM core. Which means that it's easier to extend for
58 * some of the crazier special purpose needs of gpus.
60 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
61 * Drivers are free to embed either of them into their own suitable
62 * datastructures. drm_mm itself will not do any memory allocations of its own,
63 * so if drivers choose not to embed nodes they need to still allocate them
66 * The range allocator also supports reservation of preallocated blocks. This is
67 * useful for taking over initial mode setting configurations from the firmware,
68 * where an object needs to be created which exactly matches the firmware's
69 * scanout target. As long as the range is still free it can be inserted anytime
70 * after the allocator is initialized, which helps with avoiding looped
71 * dependencies in the driver load sequence.
73 * drm_mm maintains a stack of most recently freed holes, which of all
74 * simplistic datastructures seems to be a fairly decent approach to clustering
75 * allocations and avoiding too much fragmentation. This means free space
76 * searches are O(num_holes). Given that all the fancy features drm_mm supports
77 * something better would be fairly complex and since gfx thrashing is a fairly
78 * steep cliff not a real concern. Removing a node again is O(1).
80 * drm_mm supports a few features: Alignment and range restrictions can be
81 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
82 * opaque unsigned long) which in conjunction with a driver callback can be used
83 * to implement sophisticated placement restrictions. The i915 DRM driver uses
84 * this to implement guard pages between incompatible caching domains in the
87 * Two behaviors are supported for searching and allocating: bottom-up and
88 * top-down. The default is bottom-up. Top-down allocation can be used if the
89 * memory area has different restrictions, or just to reduce fragmentation.
91 * Finally iteration helpers to walk all nodes and all holes are provided as are
92 * some basic allocator dumpers for debugging.
94 * Note that this range allocator is not thread-safe, drivers need to protect
95 * modifications with their on locking. The idea behind this is that for a full
96 * memory manager additional data needs to be protected anyway, hence internal
97 * locking would be fully redundant.
100 static struct drm_mm_node
*drm_mm_search_free_in_range_generic(const struct drm_mm
*mm
,
106 enum drm_mm_search_flags flags
);
108 #ifdef CONFIG_DRM_DEBUG_MM
109 #include <linux/stackdepot.h>
111 #define STACKDEPTH 32
114 static noinline
void save_stack(struct drm_mm_node
*node
)
116 unsigned long entries
[STACKDEPTH
];
117 struct stack_trace trace
= {
119 .max_entries
= STACKDEPTH
,
123 save_stack_trace(&trace
);
124 if (trace
.nr_entries
!= 0 &&
125 trace
.entries
[trace
.nr_entries
-1] == ULONG_MAX
)
128 /* May be called under spinlock, so avoid sleeping */
129 node
->stack
= depot_save_stack(&trace
, GFP_NOWAIT
);
132 static void show_leaks(struct drm_mm
*mm
)
134 struct drm_mm_node
*node
;
135 unsigned long entries
[STACKDEPTH
];
138 buf
= kmalloc(BUFSZ
, GFP_KERNEL
);
142 list_for_each_entry(node
, drm_mm_nodes(mm
), node_list
) {
143 struct stack_trace trace
= {
145 .max_entries
= STACKDEPTH
149 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
150 node
->start
, node
->size
);
154 depot_fetch_stack(node
->stack
, &trace
);
155 snprint_stack_trace(buf
, BUFSZ
, &trace
, 0);
156 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
157 node
->start
, node
->size
, buf
);
166 static void save_stack(struct drm_mm_node
*node
) { }
167 static void show_leaks(struct drm_mm
*mm
) { }
170 #define START(node) ((node)->start)
171 #define LAST(node) ((node)->start + (node)->size - 1)
173 INTERVAL_TREE_DEFINE(struct drm_mm_node
, rb
,
175 START
, LAST
, static inline, drm_mm_interval_tree
)
178 __drm_mm_interval_first(const struct drm_mm
*mm
, u64 start
, u64 last
)
180 return drm_mm_interval_tree_iter_first((struct rb_root
*)&mm
->interval_tree
,
183 EXPORT_SYMBOL(__drm_mm_interval_first
);
185 static void drm_mm_interval_tree_add_node(struct drm_mm_node
*hole_node
,
186 struct drm_mm_node
*node
)
188 struct drm_mm
*mm
= hole_node
->mm
;
189 struct rb_node
**link
, *rb
;
190 struct drm_mm_node
*parent
;
192 node
->__subtree_last
= LAST(node
);
194 if (hole_node
->allocated
) {
197 parent
= rb_entry(rb
, struct drm_mm_node
, rb
);
198 if (parent
->__subtree_last
>= node
->__subtree_last
)
201 parent
->__subtree_last
= node
->__subtree_last
;
206 link
= &hole_node
->rb
.rb_right
;
209 link
= &mm
->interval_tree
.rb_node
;
214 parent
= rb_entry(rb
, struct drm_mm_node
, rb
);
215 if (parent
->__subtree_last
< node
->__subtree_last
)
216 parent
->__subtree_last
= node
->__subtree_last
;
217 if (node
->start
< parent
->start
)
218 link
= &parent
->rb
.rb_left
;
220 link
= &parent
->rb
.rb_right
;
223 rb_link_node(&node
->rb
, rb
, link
);
224 rb_insert_augmented(&node
->rb
,
226 &drm_mm_interval_tree_augment
);
229 static void drm_mm_insert_helper(struct drm_mm_node
*hole_node
,
230 struct drm_mm_node
*node
,
231 u64 size
, u64 alignment
,
233 u64 range_start
, u64 range_end
,
234 enum drm_mm_allocator_flags flags
)
236 struct drm_mm
*mm
= hole_node
->mm
;
237 u64 hole_start
= drm_mm_hole_node_start(hole_node
);
238 u64 hole_end
= drm_mm_hole_node_end(hole_node
);
239 u64 adj_start
= hole_start
;
240 u64 adj_end
= hole_end
;
242 DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node
) || node
->allocated
);
244 if (mm
->color_adjust
)
245 mm
->color_adjust(hole_node
, color
, &adj_start
, &adj_end
);
247 adj_start
= max(adj_start
, range_start
);
248 adj_end
= min(adj_end
, range_end
);
250 if (flags
& DRM_MM_CREATE_TOP
)
251 adj_start
= adj_end
- size
;
256 div64_u64_rem(adj_start
, alignment
, &rem
);
258 if (flags
& DRM_MM_CREATE_TOP
)
261 adj_start
+= alignment
- rem
;
265 if (adj_start
== hole_start
) {
266 hole_node
->hole_follows
= 0;
267 list_del(&hole_node
->hole_stack
);
270 node
->start
= adj_start
;
276 list_add(&node
->node_list
, &hole_node
->node_list
);
278 drm_mm_interval_tree_add_node(hole_node
, node
);
280 DRM_MM_BUG_ON(node
->start
< range_start
);
281 DRM_MM_BUG_ON(node
->start
< adj_start
);
282 DRM_MM_BUG_ON(node
->start
+ node
->size
> adj_end
);
283 DRM_MM_BUG_ON(node
->start
+ node
->size
> range_end
);
285 node
->hole_follows
= 0;
286 if (__drm_mm_hole_node_start(node
) < hole_end
) {
287 list_add(&node
->hole_stack
, &mm
->hole_stack
);
288 node
->hole_follows
= 1;
295 * drm_mm_reserve_node - insert an pre-initialized node
296 * @mm: drm_mm allocator to insert @node into
297 * @node: drm_mm_node to insert
299 * This functions inserts an already set-up &drm_mm_node into the allocator,
300 * meaning that start, size and color must be set by the caller. All other
301 * fields must be cleared to 0. This is useful to initialize the allocator with
302 * preallocated objects which must be set-up before the range allocator can be
303 * set-up, e.g. when taking over a firmware framebuffer.
306 * 0 on success, -ENOSPC if there's no hole where @node is.
308 int drm_mm_reserve_node(struct drm_mm
*mm
, struct drm_mm_node
*node
)
310 u64 end
= node
->start
+ node
->size
;
311 struct drm_mm_node
*hole
;
312 u64 hole_start
, hole_end
;
313 u64 adj_start
, adj_end
;
315 end
= node
->start
+ node
->size
;
316 if (unlikely(end
<= node
->start
))
319 /* Find the relevant hole to add our node to */
320 hole
= drm_mm_interval_tree_iter_first(&mm
->interval_tree
,
321 node
->start
, ~(u64
)0);
323 if (hole
->start
< end
)
326 hole
= list_entry(drm_mm_nodes(mm
), typeof(*hole
), node_list
);
329 hole
= list_last_entry(&hole
->node_list
, typeof(*hole
), node_list
);
330 if (!drm_mm_hole_follows(hole
))
333 adj_start
= hole_start
= __drm_mm_hole_node_start(hole
);
334 adj_end
= hole_end
= __drm_mm_hole_node_end(hole
);
336 if (mm
->color_adjust
)
337 mm
->color_adjust(hole
, node
->color
, &adj_start
, &adj_end
);
339 if (adj_start
> node
->start
|| adj_end
< end
)
345 list_add(&node
->node_list
, &hole
->node_list
);
347 drm_mm_interval_tree_add_node(hole
, node
);
349 if (node
->start
== hole_start
) {
350 hole
->hole_follows
= 0;
351 list_del(&hole
->hole_stack
);
354 node
->hole_follows
= 0;
355 if (end
!= hole_end
) {
356 list_add(&node
->hole_stack
, &mm
->hole_stack
);
357 node
->hole_follows
= 1;
364 EXPORT_SYMBOL(drm_mm_reserve_node
);
367 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
368 * @mm: drm_mm to allocate from
369 * @node: preallocate node to insert
370 * @size: size of the allocation
371 * @alignment: alignment of the allocation
372 * @color: opaque tag value to use for this node
373 * @start: start of the allowed range for this node
374 * @end: end of the allowed range for this node
375 * @sflags: flags to fine-tune the allocation search
376 * @aflags: flags to fine-tune the allocation behavior
378 * The preallocated @node must be cleared to 0.
381 * 0 on success, -ENOSPC if there's no suitable hole.
383 int drm_mm_insert_node_in_range_generic(struct drm_mm
*mm
, struct drm_mm_node
*node
,
384 u64 size
, u64 alignment
,
387 enum drm_mm_search_flags sflags
,
388 enum drm_mm_allocator_flags aflags
)
390 struct drm_mm_node
*hole_node
;
392 if (WARN_ON(size
== 0))
395 hole_node
= drm_mm_search_free_in_range_generic(mm
,
396 size
, alignment
, color
,
401 drm_mm_insert_helper(hole_node
, node
,
402 size
, alignment
, color
,
406 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic
);
409 * drm_mm_remove_node - Remove a memory node from the allocator.
410 * @node: drm_mm_node to remove
412 * This just removes a node from its drm_mm allocator. The node does not need to
413 * be cleared again before it can be re-inserted into this or any other drm_mm
414 * allocator. It is a bug to call this function on a unallocated node.
416 void drm_mm_remove_node(struct drm_mm_node
*node
)
418 struct drm_mm
*mm
= node
->mm
;
419 struct drm_mm_node
*prev_node
;
421 DRM_MM_BUG_ON(!node
->allocated
);
422 DRM_MM_BUG_ON(node
->scanned_block
);
425 list_entry(node
->node_list
.prev
, struct drm_mm_node
, node_list
);
427 if (drm_mm_hole_follows(node
)) {
428 DRM_MM_BUG_ON(__drm_mm_hole_node_start(node
) ==
429 __drm_mm_hole_node_end(node
));
430 list_del(&node
->hole_stack
);
432 DRM_MM_BUG_ON(__drm_mm_hole_node_start(node
) !=
433 __drm_mm_hole_node_end(node
));
436 if (!drm_mm_hole_follows(prev_node
)) {
437 prev_node
->hole_follows
= 1;
438 list_add(&prev_node
->hole_stack
, &mm
->hole_stack
);
440 list_move(&prev_node
->hole_stack
, &mm
->hole_stack
);
442 drm_mm_interval_tree_remove(node
, &mm
->interval_tree
);
443 list_del(&node
->node_list
);
446 EXPORT_SYMBOL(drm_mm_remove_node
);
448 static int check_free_hole(u64 start
, u64 end
, u64 size
, u64 alignment
)
450 if (end
- start
< size
)
456 div64_u64_rem(start
, alignment
, &rem
);
458 start
+= alignment
- rem
;
461 return end
>= start
+ size
;
464 static struct drm_mm_node
*drm_mm_search_free_in_range_generic(const struct drm_mm
*mm
,
470 enum drm_mm_search_flags flags
)
472 struct drm_mm_node
*entry
;
473 struct drm_mm_node
*best
;
478 DRM_MM_BUG_ON(mm
->scan_active
);
483 __drm_mm_for_each_hole(entry
, mm
, adj_start
, adj_end
,
484 flags
& DRM_MM_SEARCH_BELOW
) {
485 u64 hole_size
= adj_end
- adj_start
;
487 if (mm
->color_adjust
) {
488 mm
->color_adjust(entry
, color
, &adj_start
, &adj_end
);
489 if (adj_end
<= adj_start
)
493 adj_start
= max(adj_start
, start
);
494 adj_end
= min(adj_end
, end
);
496 if (!check_free_hole(adj_start
, adj_end
, size
, alignment
))
499 if (!(flags
& DRM_MM_SEARCH_BEST
))
502 if (hole_size
< best_size
) {
504 best_size
= hole_size
;
512 * drm_mm_replace_node - move an allocation from @old to @new
513 * @old: drm_mm_node to remove from the allocator
514 * @new: drm_mm_node which should inherit @old's allocation
516 * This is useful for when drivers embed the drm_mm_node structure and hence
517 * can't move allocations by reassigning pointers. It's a combination of remove
518 * and insert with the guarantee that the allocation start will match.
520 void drm_mm_replace_node(struct drm_mm_node
*old
, struct drm_mm_node
*new)
522 DRM_MM_BUG_ON(!old
->allocated
);
524 list_replace(&old
->node_list
, &new->node_list
);
525 list_replace(&old
->hole_stack
, &new->hole_stack
);
526 rb_replace_node(&old
->rb
, &new->rb
, &old
->mm
->interval_tree
);
527 new->hole_follows
= old
->hole_follows
;
529 new->start
= old
->start
;
530 new->size
= old
->size
;
531 new->color
= old
->color
;
532 new->__subtree_last
= old
->__subtree_last
;
537 EXPORT_SYMBOL(drm_mm_replace_node
);
540 * DOC: lru scan roster
542 * Very often GPUs need to have continuous allocations for a given object. When
543 * evicting objects to make space for a new one it is therefore not most
544 * efficient when we simply start to select all objects from the tail of an LRU
545 * until there's a suitable hole: Especially for big objects or nodes that
546 * otherwise have special allocation constraints there's a good chance we evict
547 * lots of (smaller) objects unnecessarily.
549 * The DRM range allocator supports this use-case through the scanning
550 * interfaces. First a scan operation needs to be initialized with
551 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
552 * objects to the roster, probably by walking an LRU list, but this can be
553 * freely implemented. Eviction candiates are added using
554 * drm_mm_scan_add_block() until a suitable hole is found or there are no
555 * further evictable objects. Eviction roster metadata is tracked in struct
558 * The driver must walk through all objects again in exactly the reverse
559 * order to restore the allocator state. Note that while the allocator is used
560 * in the scan mode no other operation is allowed.
562 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
563 * reported true) in the scan, and any overlapping nodes after color adjustment
564 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
565 * since freeing a node is also O(1) the overall complexity is
566 * O(scanned_objects). So like the free stack which needs to be walked before a
567 * scan operation even begins this is linear in the number of objects. It
568 * doesn't seem to hurt too badly.
572 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
574 * @mm: drm_mm to scan
575 * @size: size of the allocation
576 * @alignment: alignment of the allocation
577 * @color: opaque tag value to use for the allocation
578 * @start: start of the allowed range for the allocation
579 * @end: end of the allowed range for the allocation
580 * @flags: flags to specify how the allocation will be performed afterwards
582 * This simply sets up the scanning routines with the parameters for the desired
586 * As long as the scan list is non-empty, no other operations than
587 * adding/removing nodes to/from the scan list are allowed.
589 void drm_mm_scan_init_with_range(struct drm_mm_scan
*scan
,
598 DRM_MM_BUG_ON(start
>= end
);
599 DRM_MM_BUG_ON(!size
|| size
> end
- start
);
600 DRM_MM_BUG_ON(mm
->scan_active
);
608 scan
->alignment
= alignment
;
609 scan
->remainder_mask
= is_power_of_2(alignment
) ? alignment
- 1 : 0;
613 DRM_MM_BUG_ON(end
<= start
);
614 scan
->range_start
= start
;
615 scan
->range_end
= end
;
617 scan
->hit_start
= U64_MAX
;
620 EXPORT_SYMBOL(drm_mm_scan_init_with_range
);
623 * drm_mm_scan_add_block - add a node to the scan list
624 * @scan: the active drm_mm scanner
625 * @node: drm_mm_node to add
627 * Add a node to the scan list that might be freed to make space for the desired
631 * True if a hole has been found, false otherwise.
633 bool drm_mm_scan_add_block(struct drm_mm_scan
*scan
,
634 struct drm_mm_node
*node
)
636 struct drm_mm
*mm
= scan
->mm
;
637 struct drm_mm_node
*hole
;
638 u64 hole_start
, hole_end
;
639 u64 col_start
, col_end
;
640 u64 adj_start
, adj_end
;
642 DRM_MM_BUG_ON(node
->mm
!= mm
);
643 DRM_MM_BUG_ON(!node
->allocated
);
644 DRM_MM_BUG_ON(node
->scanned_block
);
645 node
->scanned_block
= true;
648 /* Remove this block from the node_list so that we enlarge the hole
649 * (distance between the end of our previous node and the start of
650 * or next), without poisoning the link so that we can restore it
651 * later in drm_mm_scan_remove_block().
653 hole
= list_prev_entry(node
, node_list
);
654 DRM_MM_BUG_ON(list_next_entry(hole
, node_list
) != node
);
655 __list_del_entry(&node
->node_list
);
657 hole_start
= __drm_mm_hole_node_start(hole
);
658 hole_end
= __drm_mm_hole_node_end(hole
);
660 col_start
= hole_start
;
662 if (mm
->color_adjust
)
663 mm
->color_adjust(hole
, scan
->color
, &col_start
, &col_end
);
665 adj_start
= max(col_start
, scan
->range_start
);
666 adj_end
= min(col_end
, scan
->range_end
);
667 if (adj_end
<= adj_start
|| adj_end
- adj_start
< scan
->size
)
670 if (scan
->flags
== DRM_MM_CREATE_TOP
)
671 adj_start
= adj_end
- scan
->size
;
673 if (scan
->alignment
) {
676 if (likely(scan
->remainder_mask
))
677 rem
= adj_start
& scan
->remainder_mask
;
679 div64_u64_rem(adj_start
, scan
->alignment
, &rem
);
682 if (scan
->flags
!= DRM_MM_CREATE_TOP
)
683 adj_start
+= scan
->alignment
;
684 if (adj_start
< max(col_start
, scan
->range_start
) ||
685 min(col_end
, scan
->range_end
) - adj_start
< scan
->size
)
688 if (adj_end
<= adj_start
||
689 adj_end
- adj_start
< scan
->size
)
694 scan
->hit_start
= adj_start
;
695 scan
->hit_end
= adj_start
+ scan
->size
;
697 DRM_MM_BUG_ON(scan
->hit_start
>= scan
->hit_end
);
698 DRM_MM_BUG_ON(scan
->hit_start
< hole_start
);
699 DRM_MM_BUG_ON(scan
->hit_end
> hole_end
);
703 EXPORT_SYMBOL(drm_mm_scan_add_block
);
706 * drm_mm_scan_remove_block - remove a node from the scan list
707 * @scan: the active drm_mm scanner
708 * @node: drm_mm_node to remove
710 * Nodes **must** be removed in exactly the reverse order from the scan list as
711 * they have been added (e.g. using list_add() as they are added and then
712 * list_for_each() over that eviction list to remove), otherwise the internal
713 * state of the memory manager will be corrupted.
715 * When the scan list is empty, the selected memory nodes can be freed. An
716 * immediately following drm_mm_insert_node_in_range_generic() or one of the
717 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
718 * the just freed block (because its at the top of the free_stack list).
721 * True if this block should be evicted, false otherwise. Will always
722 * return false when no hole has been found.
724 bool drm_mm_scan_remove_block(struct drm_mm_scan
*scan
,
725 struct drm_mm_node
*node
)
727 struct drm_mm_node
*prev_node
;
729 DRM_MM_BUG_ON(node
->mm
!= scan
->mm
);
730 DRM_MM_BUG_ON(!node
->scanned_block
);
731 node
->scanned_block
= false;
733 DRM_MM_BUG_ON(!node
->mm
->scan_active
);
734 node
->mm
->scan_active
--;
736 /* During drm_mm_scan_add_block() we decoupled this node leaving
737 * its pointers intact. Now that the caller is walking back along
738 * the eviction list we can restore this block into its rightful
739 * place on the full node_list. To confirm that the caller is walking
740 * backwards correctly we check that prev_node->next == node->next,
741 * i.e. both believe the same node should be on the other side of the
744 prev_node
= list_prev_entry(node
, node_list
);
745 DRM_MM_BUG_ON(list_next_entry(prev_node
, node_list
) !=
746 list_next_entry(node
, node_list
));
747 list_add(&node
->node_list
, &prev_node
->node_list
);
749 return (node
->start
+ node
->size
> scan
->hit_start
&&
750 node
->start
< scan
->hit_end
);
752 EXPORT_SYMBOL(drm_mm_scan_remove_block
);
755 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
756 * @scan: drm_mm scan with target hole
758 * After completing an eviction scan and removing the selected nodes, we may
759 * need to remove a few more nodes from either side of the target hole if
760 * mm.color_adjust is being used.
763 * A node to evict, or NULL if there are no overlapping nodes.
765 struct drm_mm_node
*drm_mm_scan_color_evict(struct drm_mm_scan
*scan
)
767 struct drm_mm
*mm
= scan
->mm
;
768 struct drm_mm_node
*hole
;
769 u64 hole_start
, hole_end
;
771 DRM_MM_BUG_ON(list_empty(&mm
->hole_stack
));
773 if (!mm
->color_adjust
)
776 hole
= list_first_entry(&mm
->hole_stack
, typeof(*hole
), hole_stack
);
777 hole_start
= __drm_mm_hole_node_start(hole
);
778 hole_end
= __drm_mm_hole_node_end(hole
);
780 DRM_MM_BUG_ON(hole_start
> scan
->hit_start
);
781 DRM_MM_BUG_ON(hole_end
< scan
->hit_end
);
783 mm
->color_adjust(hole
, scan
->color
, &hole_start
, &hole_end
);
784 if (hole_start
> scan
->hit_start
)
786 if (hole_end
< scan
->hit_end
)
787 return list_next_entry(hole
, node_list
);
791 EXPORT_SYMBOL(drm_mm_scan_color_evict
);
794 * drm_mm_init - initialize a drm-mm allocator
795 * @mm: the drm_mm structure to initialize
796 * @start: start of the range managed by @mm
797 * @size: end of the range managed by @mm
799 * Note that @mm must be cleared to 0 before calling this function.
801 void drm_mm_init(struct drm_mm
*mm
, u64 start
, u64 size
)
803 DRM_MM_BUG_ON(start
+ size
<= start
);
805 INIT_LIST_HEAD(&mm
->hole_stack
);
808 /* Clever trick to avoid a special case in the free hole tracking. */
809 INIT_LIST_HEAD(&mm
->head_node
.node_list
);
810 mm
->head_node
.allocated
= 0;
811 mm
->head_node
.hole_follows
= 1;
812 mm
->head_node
.mm
= mm
;
813 mm
->head_node
.start
= start
+ size
;
814 mm
->head_node
.size
= start
- mm
->head_node
.start
;
815 list_add_tail(&mm
->head_node
.hole_stack
, &mm
->hole_stack
);
817 mm
->interval_tree
= RB_ROOT
;
819 mm
->color_adjust
= NULL
;
821 EXPORT_SYMBOL(drm_mm_init
);
824 * drm_mm_takedown - clean up a drm_mm allocator
825 * @mm: drm_mm allocator to clean up
827 * Note that it is a bug to call this function on an allocator which is not
830 void drm_mm_takedown(struct drm_mm
*mm
)
832 if (WARN(!drm_mm_clean(mm
),
833 "Memory manager not clean during takedown.\n"))
836 EXPORT_SYMBOL(drm_mm_takedown
);
838 static u64
drm_mm_dump_hole(struct drm_printer
*p
, const struct drm_mm_node
*entry
)
840 u64 hole_start
, hole_end
, hole_size
;
842 if (entry
->hole_follows
) {
843 hole_start
= drm_mm_hole_node_start(entry
);
844 hole_end
= drm_mm_hole_node_end(entry
);
845 hole_size
= hole_end
- hole_start
;
846 drm_printf(p
, "%#018llx-%#018llx: %llu: free\n", hole_start
,
847 hole_end
, hole_size
);
855 * drm_mm_print - print allocator state
856 * @mm: drm_mm allocator to print
857 * @p: DRM printer to use
859 void drm_mm_print(const struct drm_mm
*mm
, struct drm_printer
*p
)
861 const struct drm_mm_node
*entry
;
862 u64 total_used
= 0, total_free
= 0, total
= 0;
864 total_free
+= drm_mm_dump_hole(p
, &mm
->head_node
);
866 drm_mm_for_each_node(entry
, mm
) {
867 drm_printf(p
, "%#018llx-%#018llx: %llu: used\n", entry
->start
,
868 entry
->start
+ entry
->size
, entry
->size
);
869 total_used
+= entry
->size
;
870 total_free
+= drm_mm_dump_hole(p
, entry
);
872 total
= total_free
+ total_used
;
874 drm_printf(p
, "total: %llu, used %llu free %llu\n", total
,
875 total_used
, total_free
);
877 EXPORT_SYMBOL(drm_mm_print
);