1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
45 #include <drm/drm_mm.h>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 #include <linux/export.h>
49 #include <linux/interval_tree_generic.h>
54 * drm_mm provides a simple range allocator. The drivers are free to use the
55 * resource allocator from the linux core if it suits them, the upside of drm_mm
56 * is that it's in the DRM core. Which means that it's easier to extend for
57 * some of the crazier special purpose needs of gpus.
59 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
60 * Drivers are free to embed either of them into their own suitable
61 * datastructures. drm_mm itself will not do any allocations of its own, so if
62 * drivers choose not to embed nodes they need to still allocate them
65 * The range allocator also supports reservation of preallocated blocks. This is
66 * useful for taking over initial mode setting configurations from the firmware,
67 * where an object needs to be created which exactly matches the firmware's
68 * scanout target. As long as the range is still free it can be inserted anytime
69 * after the allocator is initialized, which helps with avoiding looped
70 * depencies in the driver load sequence.
72 * drm_mm maintains a stack of most recently freed holes, which of all
73 * simplistic datastructures seems to be a fairly decent approach to clustering
74 * allocations and avoiding too much fragmentation. This means free space
75 * searches are O(num_holes). Given that all the fancy features drm_mm supports
76 * something better would be fairly complex and since gfx thrashing is a fairly
77 * steep cliff not a real concern. Removing a node again is O(1).
79 * drm_mm supports a few features: Alignment and range restrictions can be
80 * supplied. Further more every &drm_mm_node has a color value (which is just an
81 * opaqua unsigned long) which in conjunction with a driver callback can be used
82 * to implement sophisticated placement restrictions. The i915 DRM driver uses
83 * this to implement guard pages between incompatible caching domains in the
86 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
87 * The default is bottom-up. Top-down allocation can be used if the memory area
88 * has different restrictions, or just to reduce fragmentation.
90 * Finally iteration helpers to walk all nodes and all holes are provided as are
91 * some basic allocator dumpers for debugging.
94 static struct drm_mm_node
*drm_mm_search_free_generic(const struct drm_mm
*mm
,
98 enum drm_mm_search_flags flags
);
99 static struct drm_mm_node
*drm_mm_search_free_in_range_generic(const struct drm_mm
*mm
,
105 enum drm_mm_search_flags flags
);
107 #define START(node) ((node)->start)
108 #define LAST(node) ((node)->start + (node)->size - 1)
110 INTERVAL_TREE_DEFINE(struct drm_mm_node
, rb
,
112 START
, LAST
, static inline, drm_mm_interval_tree
)
115 drm_mm_interval_first(struct drm_mm
*mm
, u64 start
, u64 last
)
117 return drm_mm_interval_tree_iter_first(&mm
->interval_tree
,
120 EXPORT_SYMBOL(drm_mm_interval_first
);
123 drm_mm_interval_next(struct drm_mm_node
*node
, u64 start
, u64 last
)
125 return drm_mm_interval_tree_iter_next(node
, start
, last
);
127 EXPORT_SYMBOL(drm_mm_interval_next
);
129 static void drm_mm_interval_tree_add_node(struct drm_mm_node
*hole_node
,
130 struct drm_mm_node
*node
)
132 struct drm_mm
*mm
= hole_node
->mm
;
133 struct rb_node
**link
, *rb
;
134 struct drm_mm_node
*parent
;
136 node
->__subtree_last
= LAST(node
);
138 if (hole_node
->allocated
) {
141 parent
= rb_entry(rb
, struct drm_mm_node
, rb
);
142 if (parent
->__subtree_last
>= node
->__subtree_last
)
145 parent
->__subtree_last
= node
->__subtree_last
;
150 link
= &hole_node
->rb
.rb_right
;
153 link
= &mm
->interval_tree
.rb_node
;
158 parent
= rb_entry(rb
, struct drm_mm_node
, rb
);
159 if (parent
->__subtree_last
< node
->__subtree_last
)
160 parent
->__subtree_last
= node
->__subtree_last
;
161 if (node
->start
< parent
->start
)
162 link
= &parent
->rb
.rb_left
;
164 link
= &parent
->rb
.rb_right
;
167 rb_link_node(&node
->rb
, rb
, link
);
168 rb_insert_augmented(&node
->rb
,
170 &drm_mm_interval_tree_augment
);
173 static void drm_mm_insert_helper(struct drm_mm_node
*hole_node
,
174 struct drm_mm_node
*node
,
175 u64 size
, unsigned alignment
,
177 enum drm_mm_allocator_flags flags
)
179 struct drm_mm
*mm
= hole_node
->mm
;
180 u64 hole_start
= drm_mm_hole_node_start(hole_node
);
181 u64 hole_end
= drm_mm_hole_node_end(hole_node
);
182 u64 adj_start
= hole_start
;
183 u64 adj_end
= hole_end
;
185 BUG_ON(node
->allocated
);
187 if (mm
->color_adjust
)
188 mm
->color_adjust(hole_node
, color
, &adj_start
, &adj_end
);
190 if (flags
& DRM_MM_CREATE_TOP
)
191 adj_start
= adj_end
- size
;
197 rem
= do_div(tmp
, alignment
);
199 if (flags
& DRM_MM_CREATE_TOP
)
202 adj_start
+= alignment
- rem
;
206 BUG_ON(adj_start
< hole_start
);
207 BUG_ON(adj_end
> hole_end
);
209 if (adj_start
== hole_start
) {
210 hole_node
->hole_follows
= 0;
211 list_del(&hole_node
->hole_stack
);
214 node
->start
= adj_start
;
220 list_add(&node
->node_list
, &hole_node
->node_list
);
222 drm_mm_interval_tree_add_node(hole_node
, node
);
224 BUG_ON(node
->start
+ node
->size
> adj_end
);
226 node
->hole_follows
= 0;
227 if (__drm_mm_hole_node_start(node
) < hole_end
) {
228 list_add(&node
->hole_stack
, &mm
->hole_stack
);
229 node
->hole_follows
= 1;
234 * drm_mm_reserve_node - insert an pre-initialized node
235 * @mm: drm_mm allocator to insert @node into
236 * @node: drm_mm_node to insert
238 * This functions inserts an already set-up drm_mm_node into the allocator,
239 * meaning that start, size and color must be set by the caller. This is useful
240 * to initialize the allocator with preallocated objects which must be set-up
241 * before the range allocator can be set-up, e.g. when taking over a firmware
245 * 0 on success, -ENOSPC if there's no hole where @node is.
247 int drm_mm_reserve_node(struct drm_mm
*mm
, struct drm_mm_node
*node
)
249 u64 end
= node
->start
+ node
->size
;
250 struct drm_mm_node
*hole
;
251 u64 hole_start
, hole_end
;
253 if (WARN_ON(node
->size
== 0))
256 end
= node
->start
+ node
->size
;
258 /* Find the relevant hole to add our node to */
259 hole
= drm_mm_interval_tree_iter_first(&mm
->interval_tree
,
260 node
->start
, ~(u64
)0);
262 if (hole
->start
< end
)
265 hole
= list_entry(&mm
->head_node
.node_list
,
266 typeof(*hole
), node_list
);
269 hole
= list_last_entry(&hole
->node_list
, typeof(*hole
), node_list
);
270 if (!hole
->hole_follows
)
273 hole_start
= __drm_mm_hole_node_start(hole
);
274 hole_end
= __drm_mm_hole_node_end(hole
);
275 if (hole_start
> node
->start
|| hole_end
< end
)
281 list_add(&node
->node_list
, &hole
->node_list
);
283 drm_mm_interval_tree_add_node(hole
, node
);
285 if (node
->start
== hole_start
) {
286 hole
->hole_follows
= 0;
287 list_del(&hole
->hole_stack
);
290 node
->hole_follows
= 0;
291 if (end
!= hole_end
) {
292 list_add(&node
->hole_stack
, &mm
->hole_stack
);
293 node
->hole_follows
= 1;
298 EXPORT_SYMBOL(drm_mm_reserve_node
);
301 * drm_mm_insert_node_generic - search for space and insert @node
302 * @mm: drm_mm to allocate from
303 * @node: preallocate node to insert
304 * @size: size of the allocation
305 * @alignment: alignment of the allocation
306 * @color: opaque tag value to use for this node
307 * @sflags: flags to fine-tune the allocation search
308 * @aflags: flags to fine-tune the allocation behavior
310 * The preallocated node must be cleared to 0.
313 * 0 on success, -ENOSPC if there's no suitable hole.
315 int drm_mm_insert_node_generic(struct drm_mm
*mm
, struct drm_mm_node
*node
,
316 u64 size
, unsigned alignment
,
318 enum drm_mm_search_flags sflags
,
319 enum drm_mm_allocator_flags aflags
)
321 struct drm_mm_node
*hole_node
;
323 if (WARN_ON(size
== 0))
326 hole_node
= drm_mm_search_free_generic(mm
, size
, alignment
,
331 drm_mm_insert_helper(hole_node
, node
, size
, alignment
, color
, aflags
);
334 EXPORT_SYMBOL(drm_mm_insert_node_generic
);
336 static void drm_mm_insert_helper_range(struct drm_mm_node
*hole_node
,
337 struct drm_mm_node
*node
,
338 u64 size
, unsigned alignment
,
341 enum drm_mm_allocator_flags flags
)
343 struct drm_mm
*mm
= hole_node
->mm
;
344 u64 hole_start
= drm_mm_hole_node_start(hole_node
);
345 u64 hole_end
= drm_mm_hole_node_end(hole_node
);
346 u64 adj_start
= hole_start
;
347 u64 adj_end
= hole_end
;
349 BUG_ON(!hole_node
->hole_follows
|| node
->allocated
);
351 if (adj_start
< start
)
356 if (mm
->color_adjust
)
357 mm
->color_adjust(hole_node
, color
, &adj_start
, &adj_end
);
359 if (flags
& DRM_MM_CREATE_TOP
)
360 adj_start
= adj_end
- size
;
366 rem
= do_div(tmp
, alignment
);
368 if (flags
& DRM_MM_CREATE_TOP
)
371 adj_start
+= alignment
- rem
;
375 if (adj_start
== hole_start
) {
376 hole_node
->hole_follows
= 0;
377 list_del(&hole_node
->hole_stack
);
380 node
->start
= adj_start
;
386 list_add(&node
->node_list
, &hole_node
->node_list
);
388 drm_mm_interval_tree_add_node(hole_node
, node
);
390 BUG_ON(node
->start
< start
);
391 BUG_ON(node
->start
< adj_start
);
392 BUG_ON(node
->start
+ node
->size
> adj_end
);
393 BUG_ON(node
->start
+ node
->size
> end
);
395 node
->hole_follows
= 0;
396 if (__drm_mm_hole_node_start(node
) < hole_end
) {
397 list_add(&node
->hole_stack
, &mm
->hole_stack
);
398 node
->hole_follows
= 1;
403 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
404 * @mm: drm_mm to allocate from
405 * @node: preallocate node to insert
406 * @size: size of the allocation
407 * @alignment: alignment of the allocation
408 * @color: opaque tag value to use for this node
409 * @start: start of the allowed range for this node
410 * @end: end of the allowed range for this node
411 * @sflags: flags to fine-tune the allocation search
412 * @aflags: flags to fine-tune the allocation behavior
414 * The preallocated node must be cleared to 0.
417 * 0 on success, -ENOSPC if there's no suitable hole.
419 int drm_mm_insert_node_in_range_generic(struct drm_mm
*mm
, struct drm_mm_node
*node
,
420 u64 size
, unsigned alignment
,
423 enum drm_mm_search_flags sflags
,
424 enum drm_mm_allocator_flags aflags
)
426 struct drm_mm_node
*hole_node
;
428 if (WARN_ON(size
== 0))
431 hole_node
= drm_mm_search_free_in_range_generic(mm
,
432 size
, alignment
, color
,
437 drm_mm_insert_helper_range(hole_node
, node
,
438 size
, alignment
, color
,
442 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic
);
445 * drm_mm_remove_node - Remove a memory node from the allocator.
446 * @node: drm_mm_node to remove
448 * This just removes a node from its drm_mm allocator. The node does not need to
449 * be cleared again before it can be re-inserted into this or any other drm_mm
450 * allocator. It is a bug to call this function on a un-allocated node.
452 void drm_mm_remove_node(struct drm_mm_node
*node
)
454 struct drm_mm
*mm
= node
->mm
;
455 struct drm_mm_node
*prev_node
;
457 if (WARN_ON(!node
->allocated
))
460 BUG_ON(node
->scanned_block
|| node
->scanned_prev_free
461 || node
->scanned_next_free
);
464 list_entry(node
->node_list
.prev
, struct drm_mm_node
, node_list
);
466 if (node
->hole_follows
) {
467 BUG_ON(__drm_mm_hole_node_start(node
) ==
468 __drm_mm_hole_node_end(node
));
469 list_del(&node
->hole_stack
);
471 BUG_ON(__drm_mm_hole_node_start(node
) !=
472 __drm_mm_hole_node_end(node
));
475 if (!prev_node
->hole_follows
) {
476 prev_node
->hole_follows
= 1;
477 list_add(&prev_node
->hole_stack
, &mm
->hole_stack
);
479 list_move(&prev_node
->hole_stack
, &mm
->hole_stack
);
481 drm_mm_interval_tree_remove(node
, &mm
->interval_tree
);
482 list_del(&node
->node_list
);
485 EXPORT_SYMBOL(drm_mm_remove_node
);
487 static int check_free_hole(u64 start
, u64 end
, u64 size
, unsigned alignment
)
489 if (end
- start
< size
)
496 rem
= do_div(tmp
, alignment
);
498 start
+= alignment
- rem
;
501 return end
>= start
+ size
;
504 static struct drm_mm_node
*drm_mm_search_free_generic(const struct drm_mm
*mm
,
508 enum drm_mm_search_flags flags
)
510 struct drm_mm_node
*entry
;
511 struct drm_mm_node
*best
;
516 BUG_ON(mm
->scanned_blocks
);
521 __drm_mm_for_each_hole(entry
, mm
, adj_start
, adj_end
,
522 flags
& DRM_MM_SEARCH_BELOW
) {
523 u64 hole_size
= adj_end
- adj_start
;
525 if (mm
->color_adjust
) {
526 mm
->color_adjust(entry
, color
, &adj_start
, &adj_end
);
527 if (adj_end
<= adj_start
)
531 if (!check_free_hole(adj_start
, adj_end
, size
, alignment
))
534 if (!(flags
& DRM_MM_SEARCH_BEST
))
537 if (hole_size
< best_size
) {
539 best_size
= hole_size
;
546 static struct drm_mm_node
*drm_mm_search_free_in_range_generic(const struct drm_mm
*mm
,
552 enum drm_mm_search_flags flags
)
554 struct drm_mm_node
*entry
;
555 struct drm_mm_node
*best
;
560 BUG_ON(mm
->scanned_blocks
);
565 __drm_mm_for_each_hole(entry
, mm
, adj_start
, adj_end
,
566 flags
& DRM_MM_SEARCH_BELOW
) {
567 u64 hole_size
= adj_end
- adj_start
;
569 if (adj_start
< start
)
574 if (mm
->color_adjust
) {
575 mm
->color_adjust(entry
, color
, &adj_start
, &adj_end
);
576 if (adj_end
<= adj_start
)
580 if (!check_free_hole(adj_start
, adj_end
, size
, alignment
))
583 if (!(flags
& DRM_MM_SEARCH_BEST
))
586 if (hole_size
< best_size
) {
588 best_size
= hole_size
;
596 * drm_mm_replace_node - move an allocation from @old to @new
597 * @old: drm_mm_node to remove from the allocator
598 * @new: drm_mm_node which should inherit @old's allocation
600 * This is useful for when drivers embed the drm_mm_node structure and hence
601 * can't move allocations by reassigning pointers. It's a combination of remove
602 * and insert with the guarantee that the allocation start will match.
604 void drm_mm_replace_node(struct drm_mm_node
*old
, struct drm_mm_node
*new)
606 list_replace(&old
->node_list
, &new->node_list
);
607 list_replace(&old
->hole_stack
, &new->hole_stack
);
608 rb_replace_node(&old
->rb
, &new->rb
, &old
->mm
->interval_tree
);
609 new->hole_follows
= old
->hole_follows
;
611 new->start
= old
->start
;
612 new->size
= old
->size
;
613 new->color
= old
->color
;
614 new->__subtree_last
= old
->__subtree_last
;
619 EXPORT_SYMBOL(drm_mm_replace_node
);
622 * DOC: lru scan roaster
624 * Very often GPUs need to have continuous allocations for a given object. When
625 * evicting objects to make space for a new one it is therefore not most
626 * efficient when we simply start to select all objects from the tail of an LRU
627 * until there's a suitable hole: Especially for big objects or nodes that
628 * otherwise have special allocation constraints there's a good chance we evict
629 * lots of (smaller) objects unecessarily.
631 * The DRM range allocator supports this use-case through the scanning
632 * interfaces. First a scan operation needs to be initialized with
633 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
634 * objects to the roaster (probably by walking an LRU list, but this can be
635 * freely implemented) until a suitable hole is found or there's no further
638 * The the driver must walk through all objects again in exactly the reverse
639 * order to restore the allocator state. Note that while the allocator is used
640 * in the scan mode no other operation is allowed.
642 * Finally the driver evicts all objects selected in the scan. Adding and
643 * removing an object is O(1), and since freeing a node is also O(1) the overall
644 * complexity is O(scanned_objects). So like the free stack which needs to be
645 * walked before a scan operation even begins this is linear in the number of
646 * objects. It doesn't seem to hurt badly.
650 * drm_mm_init_scan - initialize lru scanning
651 * @mm: drm_mm to scan
652 * @size: size of the allocation
653 * @alignment: alignment of the allocation
654 * @color: opaque tag value to use for the allocation
656 * This simply sets up the scanning routines with the parameters for the desired
657 * hole. Note that there's no need to specify allocation flags, since they only
658 * change the place a node is allocated from within a suitable hole.
661 * As long as the scan list is non-empty, no other operations than
662 * adding/removing nodes to/from the scan list are allowed.
664 void drm_mm_init_scan(struct drm_mm
*mm
,
669 mm
->scan_color
= color
;
670 mm
->scan_alignment
= alignment
;
671 mm
->scan_size
= size
;
672 mm
->scanned_blocks
= 0;
673 mm
->scan_hit_start
= 0;
674 mm
->scan_hit_end
= 0;
675 mm
->scan_check_range
= 0;
676 mm
->prev_scanned_node
= NULL
;
678 EXPORT_SYMBOL(drm_mm_init_scan
);
681 * drm_mm_init_scan - initialize range-restricted lru scanning
682 * @mm: drm_mm to scan
683 * @size: size of the allocation
684 * @alignment: alignment of the allocation
685 * @color: opaque tag value to use for the allocation
686 * @start: start of the allowed range for the allocation
687 * @end: end of the allowed range for the allocation
689 * This simply sets up the scanning routines with the parameters for the desired
690 * hole. Note that there's no need to specify allocation flags, since they only
691 * change the place a node is allocated from within a suitable hole.
694 * As long as the scan list is non-empty, no other operations than
695 * adding/removing nodes to/from the scan list are allowed.
697 void drm_mm_init_scan_with_range(struct drm_mm
*mm
,
704 mm
->scan_color
= color
;
705 mm
->scan_alignment
= alignment
;
706 mm
->scan_size
= size
;
707 mm
->scanned_blocks
= 0;
708 mm
->scan_hit_start
= 0;
709 mm
->scan_hit_end
= 0;
710 mm
->scan_start
= start
;
712 mm
->scan_check_range
= 1;
713 mm
->prev_scanned_node
= NULL
;
715 EXPORT_SYMBOL(drm_mm_init_scan_with_range
);
718 * drm_mm_scan_add_block - add a node to the scan list
719 * @node: drm_mm_node to add
721 * Add a node to the scan list that might be freed to make space for the desired
725 * True if a hole has been found, false otherwise.
727 bool drm_mm_scan_add_block(struct drm_mm_node
*node
)
729 struct drm_mm
*mm
= node
->mm
;
730 struct drm_mm_node
*prev_node
;
731 u64 hole_start
, hole_end
;
732 u64 adj_start
, adj_end
;
734 mm
->scanned_blocks
++;
736 BUG_ON(node
->scanned_block
);
737 node
->scanned_block
= 1;
739 prev_node
= list_entry(node
->node_list
.prev
, struct drm_mm_node
,
742 node
->scanned_preceeds_hole
= prev_node
->hole_follows
;
743 prev_node
->hole_follows
= 1;
744 list_del(&node
->node_list
);
745 node
->node_list
.prev
= &prev_node
->node_list
;
746 node
->node_list
.next
= &mm
->prev_scanned_node
->node_list
;
747 mm
->prev_scanned_node
= node
;
749 adj_start
= hole_start
= drm_mm_hole_node_start(prev_node
);
750 adj_end
= hole_end
= drm_mm_hole_node_end(prev_node
);
752 if (mm
->scan_check_range
) {
753 if (adj_start
< mm
->scan_start
)
754 adj_start
= mm
->scan_start
;
755 if (adj_end
> mm
->scan_end
)
756 adj_end
= mm
->scan_end
;
759 if (mm
->color_adjust
)
760 mm
->color_adjust(prev_node
, mm
->scan_color
,
761 &adj_start
, &adj_end
);
763 if (check_free_hole(adj_start
, adj_end
,
764 mm
->scan_size
, mm
->scan_alignment
)) {
765 mm
->scan_hit_start
= hole_start
;
766 mm
->scan_hit_end
= hole_end
;
772 EXPORT_SYMBOL(drm_mm_scan_add_block
);
775 * drm_mm_scan_remove_block - remove a node from the scan list
776 * @node: drm_mm_node to remove
778 * Nodes _must_ be removed in the exact same order from the scan list as they
779 * have been added, otherwise the internal state of the memory manager will be
782 * When the scan list is empty, the selected memory nodes can be freed. An
783 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
784 * return the just freed block (because its at the top of the free_stack list).
787 * True if this block should be evicted, false otherwise. Will always
788 * return false when no hole has been found.
790 bool drm_mm_scan_remove_block(struct drm_mm_node
*node
)
792 struct drm_mm
*mm
= node
->mm
;
793 struct drm_mm_node
*prev_node
;
795 mm
->scanned_blocks
--;
797 BUG_ON(!node
->scanned_block
);
798 node
->scanned_block
= 0;
800 prev_node
= list_entry(node
->node_list
.prev
, struct drm_mm_node
,
803 prev_node
->hole_follows
= node
->scanned_preceeds_hole
;
804 list_add(&node
->node_list
, &prev_node
->node_list
);
806 return (drm_mm_hole_node_end(node
) > mm
->scan_hit_start
&&
807 node
->start
< mm
->scan_hit_end
);
809 EXPORT_SYMBOL(drm_mm_scan_remove_block
);
812 * drm_mm_clean - checks whether an allocator is clean
813 * @mm: drm_mm allocator to check
816 * True if the allocator is completely free, false if there's still a node
819 bool drm_mm_clean(struct drm_mm
* mm
)
821 struct list_head
*head
= &mm
->head_node
.node_list
;
823 return (head
->next
->next
== head
);
825 EXPORT_SYMBOL(drm_mm_clean
);
828 * drm_mm_init - initialize a drm-mm allocator
829 * @mm: the drm_mm structure to initialize
830 * @start: start of the range managed by @mm
831 * @size: end of the range managed by @mm
833 * Note that @mm must be cleared to 0 before calling this function.
835 void drm_mm_init(struct drm_mm
* mm
, u64 start
, u64 size
)
837 INIT_LIST_HEAD(&mm
->hole_stack
);
838 mm
->scanned_blocks
= 0;
840 /* Clever trick to avoid a special case in the free hole tracking. */
841 INIT_LIST_HEAD(&mm
->head_node
.node_list
);
842 mm
->head_node
.hole_follows
= 1;
843 mm
->head_node
.scanned_block
= 0;
844 mm
->head_node
.scanned_prev_free
= 0;
845 mm
->head_node
.scanned_next_free
= 0;
846 mm
->head_node
.mm
= mm
;
847 mm
->head_node
.start
= start
+ size
;
848 mm
->head_node
.size
= start
- mm
->head_node
.start
;
849 list_add_tail(&mm
->head_node
.hole_stack
, &mm
->hole_stack
);
851 mm
->interval_tree
= RB_ROOT
;
853 mm
->color_adjust
= NULL
;
855 EXPORT_SYMBOL(drm_mm_init
);
858 * drm_mm_takedown - clean up a drm_mm allocator
859 * @mm: drm_mm allocator to clean up
861 * Note that it is a bug to call this function on an allocator which is not
864 void drm_mm_takedown(struct drm_mm
* mm
)
866 WARN(!list_empty(&mm
->head_node
.node_list
),
867 "Memory manager not clean during takedown.\n");
869 EXPORT_SYMBOL(drm_mm_takedown
);
871 static u64
drm_mm_debug_hole(struct drm_mm_node
*entry
,
874 u64 hole_start
, hole_end
, hole_size
;
876 if (entry
->hole_follows
) {
877 hole_start
= drm_mm_hole_node_start(entry
);
878 hole_end
= drm_mm_hole_node_end(entry
);
879 hole_size
= hole_end
- hole_start
;
880 pr_debug("%s %#llx-%#llx: %llu: free\n", prefix
, hole_start
,
881 hole_end
, hole_size
);
889 * drm_mm_debug_table - dump allocator state to dmesg
890 * @mm: drm_mm allocator to dump
891 * @prefix: prefix to use for dumping to dmesg
893 void drm_mm_debug_table(struct drm_mm
*mm
, const char *prefix
)
895 struct drm_mm_node
*entry
;
896 u64 total_used
= 0, total_free
= 0, total
= 0;
898 total_free
+= drm_mm_debug_hole(&mm
->head_node
, prefix
);
900 drm_mm_for_each_node(entry
, mm
) {
901 pr_debug("%s %#llx-%#llx: %llu: used\n", prefix
, entry
->start
,
902 entry
->start
+ entry
->size
, entry
->size
);
903 total_used
+= entry
->size
;
904 total_free
+= drm_mm_debug_hole(entry
, prefix
);
906 total
= total_free
+ total_used
;
908 pr_debug("%s total: %llu, used %llu free %llu\n", prefix
, total
,
909 total_used
, total_free
);
911 EXPORT_SYMBOL(drm_mm_debug_table
);
913 #if defined(CONFIG_DEBUG_FS)
914 static u64
drm_mm_dump_hole(struct seq_file
*m
, struct drm_mm_node
*entry
)
916 u64 hole_start
, hole_end
, hole_size
;
918 if (entry
->hole_follows
) {
919 hole_start
= drm_mm_hole_node_start(entry
);
920 hole_end
= drm_mm_hole_node_end(entry
);
921 hole_size
= hole_end
- hole_start
;
922 seq_printf(m
, "%#018llx-%#018llx: %llu: free\n", hole_start
,
923 hole_end
, hole_size
);
931 * drm_mm_dump_table - dump allocator state to a seq_file
932 * @m: seq_file to dump to
933 * @mm: drm_mm allocator to dump
935 int drm_mm_dump_table(struct seq_file
*m
, struct drm_mm
*mm
)
937 struct drm_mm_node
*entry
;
938 u64 total_used
= 0, total_free
= 0, total
= 0;
940 total_free
+= drm_mm_dump_hole(m
, &mm
->head_node
);
942 drm_mm_for_each_node(entry
, mm
) {
943 seq_printf(m
, "%#018llx-%#018llx: %llu: used\n", entry
->start
,
944 entry
->start
+ entry
->size
, entry
->size
);
945 total_used
+= entry
->size
;
946 total_free
+= drm_mm_dump_hole(m
, entry
);
948 total
= total_free
+ total_used
;
950 seq_printf(m
, "total: %llu, used %llu free %llu\n", total
,
951 total_used
, total_free
);
954 EXPORT_SYMBOL(drm_mm_dump_table
);