Input: hilkbd - Add casts to HP9000/300 I/O accessors
[linux/fpc-iii.git] / drivers / gpu / drm / drm_mm.c
blob3166026a1874d7a9ffba190273dd1ab270fb83db
1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * Copyright 2016 Intel Corporation
5 * All Rights Reserved.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 **************************************************************************/
31 * Generic simple memory manager implementation. Intended to be used as a base
32 * class implementation for more advanced memory managers.
34 * Note that the algorithm used is quite simple and there might be substantial
35 * performance gains if a smarter free list is implemented. Currently it is
36 * just an unordered stack of free regions. This could easily be improved if
37 * an RB-tree is used instead. At least if we expect heavy fragmentation.
39 * Aligned allocations can also see improvement.
41 * Authors:
42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
45 #include <drm/drmP.h>
46 #include <drm/drm_mm.h>
47 #include <linux/slab.h>
48 #include <linux/seq_file.h>
49 #include <linux/export.h>
50 #include <linux/interval_tree_generic.h>
52 /**
53 * DOC: Overview
55 * drm_mm provides a simple range allocator. The drivers are free to use the
56 * resource allocator from the linux core if it suits them, the upside of drm_mm
57 * is that it's in the DRM core. Which means that it's easier to extend for
58 * some of the crazier special purpose needs of gpus.
60 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
61 * Drivers are free to embed either of them into their own suitable
62 * datastructures. drm_mm itself will not do any memory allocations of its own,
63 * so if drivers choose not to embed nodes they need to still allocate them
64 * themselves.
66 * The range allocator also supports reservation of preallocated blocks. This is
67 * useful for taking over initial mode setting configurations from the firmware,
68 * where an object needs to be created which exactly matches the firmware's
69 * scanout target. As long as the range is still free it can be inserted anytime
70 * after the allocator is initialized, which helps with avoiding looped
71 * dependencies in the driver load sequence.
73 * drm_mm maintains a stack of most recently freed holes, which of all
74 * simplistic datastructures seems to be a fairly decent approach to clustering
75 * allocations and avoiding too much fragmentation. This means free space
76 * searches are O(num_holes). Given that all the fancy features drm_mm supports
77 * something better would be fairly complex and since gfx thrashing is a fairly
78 * steep cliff not a real concern. Removing a node again is O(1).
80 * drm_mm supports a few features: Alignment and range restrictions can be
81 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
82 * opaque unsigned long) which in conjunction with a driver callback can be used
83 * to implement sophisticated placement restrictions. The i915 DRM driver uses
84 * this to implement guard pages between incompatible caching domains in the
85 * graphics TT.
87 * Two behaviors are supported for searching and allocating: bottom-up and
88 * top-down. The default is bottom-up. Top-down allocation can be used if the
89 * memory area has different restrictions, or just to reduce fragmentation.
91 * Finally iteration helpers to walk all nodes and all holes are provided as are
92 * some basic allocator dumpers for debugging.
94 * Note that this range allocator is not thread-safe, drivers need to protect
95 * modifications with their own locking. The idea behind this is that for a full
96 * memory manager additional data needs to be protected anyway, hence internal
97 * locking would be fully redundant.
100 #ifdef CONFIG_DRM_DEBUG_MM
101 #include <linux/stackdepot.h>
103 #define STACKDEPTH 32
104 #define BUFSZ 4096
106 static noinline void save_stack(struct drm_mm_node *node)
108 unsigned long entries[STACKDEPTH];
109 struct stack_trace trace = {
110 .entries = entries,
111 .max_entries = STACKDEPTH,
112 .skip = 1
115 save_stack_trace(&trace);
116 if (trace.nr_entries != 0 &&
117 trace.entries[trace.nr_entries-1] == ULONG_MAX)
118 trace.nr_entries--;
120 /* May be called under spinlock, so avoid sleeping */
121 node->stack = depot_save_stack(&trace, GFP_NOWAIT);
124 static void show_leaks(struct drm_mm *mm)
126 struct drm_mm_node *node;
127 unsigned long entries[STACKDEPTH];
128 char *buf;
130 buf = kmalloc(BUFSZ, GFP_KERNEL);
131 if (!buf)
132 return;
134 list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
135 struct stack_trace trace = {
136 .entries = entries,
137 .max_entries = STACKDEPTH
140 if (!node->stack) {
141 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
142 node->start, node->size);
143 continue;
146 depot_fetch_stack(node->stack, &trace);
147 snprint_stack_trace(buf, BUFSZ, &trace, 0);
148 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
149 node->start, node->size, buf);
152 kfree(buf);
155 #undef STACKDEPTH
156 #undef BUFSZ
157 #else
158 static void save_stack(struct drm_mm_node *node) { }
159 static void show_leaks(struct drm_mm *mm) { }
160 #endif
162 #define START(node) ((node)->start)
163 #define LAST(node) ((node)->start + (node)->size - 1)
165 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
166 u64, __subtree_last,
167 START, LAST, static inline, drm_mm_interval_tree)
169 struct drm_mm_node *
170 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
172 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
173 start, last) ?: (struct drm_mm_node *)&mm->head_node;
175 EXPORT_SYMBOL(__drm_mm_interval_first);
177 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
178 struct drm_mm_node *node)
180 struct drm_mm *mm = hole_node->mm;
181 struct rb_node **link, *rb;
182 struct drm_mm_node *parent;
183 bool leftmost;
185 node->__subtree_last = LAST(node);
187 if (hole_node->allocated) {
188 rb = &hole_node->rb;
189 while (rb) {
190 parent = rb_entry(rb, struct drm_mm_node, rb);
191 if (parent->__subtree_last >= node->__subtree_last)
192 break;
194 parent->__subtree_last = node->__subtree_last;
195 rb = rb_parent(rb);
198 rb = &hole_node->rb;
199 link = &hole_node->rb.rb_right;
200 leftmost = false;
201 } else {
202 rb = NULL;
203 link = &mm->interval_tree.rb_root.rb_node;
204 leftmost = true;
207 while (*link) {
208 rb = *link;
209 parent = rb_entry(rb, struct drm_mm_node, rb);
210 if (parent->__subtree_last < node->__subtree_last)
211 parent->__subtree_last = node->__subtree_last;
212 if (node->start < parent->start) {
213 link = &parent->rb.rb_left;
214 } else {
215 link = &parent->rb.rb_right;
216 leftmost = false;
220 rb_link_node(&node->rb, rb, link);
221 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
222 &drm_mm_interval_tree_augment);
225 #define RB_INSERT(root, member, expr) do { \
226 struct rb_node **link = &root.rb_node, *rb = NULL; \
227 u64 x = expr(node); \
228 while (*link) { \
229 rb = *link; \
230 if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
231 link = &rb->rb_left; \
232 else \
233 link = &rb->rb_right; \
235 rb_link_node(&node->member, rb, link); \
236 rb_insert_color(&node->member, &root); \
237 } while (0)
239 #define HOLE_SIZE(NODE) ((NODE)->hole_size)
240 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
242 static void add_hole(struct drm_mm_node *node)
244 struct drm_mm *mm = node->mm;
246 node->hole_size =
247 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
248 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
250 RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
251 RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
253 list_add(&node->hole_stack, &mm->hole_stack);
256 static void rm_hole(struct drm_mm_node *node)
258 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
260 list_del(&node->hole_stack);
261 rb_erase(&node->rb_hole_size, &node->mm->holes_size);
262 rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
263 node->hole_size = 0;
265 DRM_MM_BUG_ON(drm_mm_hole_follows(node));
268 static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
270 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
273 static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
275 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
278 static inline u64 rb_hole_size(struct rb_node *rb)
280 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
283 static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
285 struct rb_node *best = NULL;
286 struct rb_node **link = &mm->holes_size.rb_node;
288 while (*link) {
289 struct rb_node *rb = *link;
291 if (size <= rb_hole_size(rb)) {
292 link = &rb->rb_left;
293 best = rb;
294 } else {
295 link = &rb->rb_right;
299 return rb_hole_size_to_node(best);
302 static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
304 struct drm_mm_node *node = NULL;
305 struct rb_node **link = &mm->holes_addr.rb_node;
307 while (*link) {
308 u64 hole_start;
310 node = rb_hole_addr_to_node(*link);
311 hole_start = __drm_mm_hole_node_start(node);
313 if (addr < hole_start)
314 link = &node->rb_hole_addr.rb_left;
315 else if (addr > hole_start + node->hole_size)
316 link = &node->rb_hole_addr.rb_right;
317 else
318 break;
321 return node;
324 static struct drm_mm_node *
325 first_hole(struct drm_mm *mm,
326 u64 start, u64 end, u64 size,
327 enum drm_mm_insert_mode mode)
329 if (RB_EMPTY_ROOT(&mm->holes_size))
330 return NULL;
332 switch (mode) {
333 default:
334 case DRM_MM_INSERT_BEST:
335 return best_hole(mm, size);
337 case DRM_MM_INSERT_LOW:
338 return find_hole(mm, start);
340 case DRM_MM_INSERT_HIGH:
341 return find_hole(mm, end);
343 case DRM_MM_INSERT_EVICT:
344 return list_first_entry_or_null(&mm->hole_stack,
345 struct drm_mm_node,
346 hole_stack);
350 static struct drm_mm_node *
351 next_hole(struct drm_mm *mm,
352 struct drm_mm_node *node,
353 enum drm_mm_insert_mode mode)
355 switch (mode) {
356 default:
357 case DRM_MM_INSERT_BEST:
358 return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
360 case DRM_MM_INSERT_LOW:
361 return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
363 case DRM_MM_INSERT_HIGH:
364 return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
366 case DRM_MM_INSERT_EVICT:
367 node = list_next_entry(node, hole_stack);
368 return &node->hole_stack == &mm->hole_stack ? NULL : node;
373 * drm_mm_reserve_node - insert an pre-initialized node
374 * @mm: drm_mm allocator to insert @node into
375 * @node: drm_mm_node to insert
377 * This functions inserts an already set-up &drm_mm_node into the allocator,
378 * meaning that start, size and color must be set by the caller. All other
379 * fields must be cleared to 0. This is useful to initialize the allocator with
380 * preallocated objects which must be set-up before the range allocator can be
381 * set-up, e.g. when taking over a firmware framebuffer.
383 * Returns:
384 * 0 on success, -ENOSPC if there's no hole where @node is.
386 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
388 u64 end = node->start + node->size;
389 struct drm_mm_node *hole;
390 u64 hole_start, hole_end;
391 u64 adj_start, adj_end;
393 end = node->start + node->size;
394 if (unlikely(end <= node->start))
395 return -ENOSPC;
397 /* Find the relevant hole to add our node to */
398 hole = find_hole(mm, node->start);
399 if (!hole)
400 return -ENOSPC;
402 adj_start = hole_start = __drm_mm_hole_node_start(hole);
403 adj_end = hole_end = hole_start + hole->hole_size;
405 if (mm->color_adjust)
406 mm->color_adjust(hole, node->color, &adj_start, &adj_end);
408 if (adj_start > node->start || adj_end < end)
409 return -ENOSPC;
411 node->mm = mm;
413 list_add(&node->node_list, &hole->node_list);
414 drm_mm_interval_tree_add_node(hole, node);
415 node->allocated = true;
416 node->hole_size = 0;
418 rm_hole(hole);
419 if (node->start > hole_start)
420 add_hole(hole);
421 if (end < hole_end)
422 add_hole(node);
424 save_stack(node);
425 return 0;
427 EXPORT_SYMBOL(drm_mm_reserve_node);
430 * drm_mm_insert_node_in_range - ranged search for space and insert @node
431 * @mm: drm_mm to allocate from
432 * @node: preallocate node to insert
433 * @size: size of the allocation
434 * @alignment: alignment of the allocation
435 * @color: opaque tag value to use for this node
436 * @range_start: start of the allowed range for this node
437 * @range_end: end of the allowed range for this node
438 * @mode: fine-tune the allocation search and placement
440 * The preallocated @node must be cleared to 0.
442 * Returns:
443 * 0 on success, -ENOSPC if there's no suitable hole.
445 int drm_mm_insert_node_in_range(struct drm_mm * const mm,
446 struct drm_mm_node * const node,
447 u64 size, u64 alignment,
448 unsigned long color,
449 u64 range_start, u64 range_end,
450 enum drm_mm_insert_mode mode)
452 struct drm_mm_node *hole;
453 u64 remainder_mask;
455 DRM_MM_BUG_ON(range_start >= range_end);
457 if (unlikely(size == 0 || range_end - range_start < size))
458 return -ENOSPC;
460 if (alignment <= 1)
461 alignment = 0;
463 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
464 for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
465 hole = next_hole(mm, hole, mode)) {
466 u64 hole_start = __drm_mm_hole_node_start(hole);
467 u64 hole_end = hole_start + hole->hole_size;
468 u64 adj_start, adj_end;
469 u64 col_start, col_end;
471 if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
472 break;
474 if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
475 break;
477 col_start = hole_start;
478 col_end = hole_end;
479 if (mm->color_adjust)
480 mm->color_adjust(hole, color, &col_start, &col_end);
482 adj_start = max(col_start, range_start);
483 adj_end = min(col_end, range_end);
485 if (adj_end <= adj_start || adj_end - adj_start < size)
486 continue;
488 if (mode == DRM_MM_INSERT_HIGH)
489 adj_start = adj_end - size;
491 if (alignment) {
492 u64 rem;
494 if (likely(remainder_mask))
495 rem = adj_start & remainder_mask;
496 else
497 div64_u64_rem(adj_start, alignment, &rem);
498 if (rem) {
499 adj_start -= rem;
500 if (mode != DRM_MM_INSERT_HIGH)
501 adj_start += alignment;
503 if (adj_start < max(col_start, range_start) ||
504 min(col_end, range_end) - adj_start < size)
505 continue;
507 if (adj_end <= adj_start ||
508 adj_end - adj_start < size)
509 continue;
513 node->mm = mm;
514 node->size = size;
515 node->start = adj_start;
516 node->color = color;
517 node->hole_size = 0;
519 list_add(&node->node_list, &hole->node_list);
520 drm_mm_interval_tree_add_node(hole, node);
521 node->allocated = true;
523 rm_hole(hole);
524 if (adj_start > hole_start)
525 add_hole(hole);
526 if (adj_start + size < hole_end)
527 add_hole(node);
529 save_stack(node);
530 return 0;
533 return -ENOSPC;
535 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
538 * drm_mm_remove_node - Remove a memory node from the allocator.
539 * @node: drm_mm_node to remove
541 * This just removes a node from its drm_mm allocator. The node does not need to
542 * be cleared again before it can be re-inserted into this or any other drm_mm
543 * allocator. It is a bug to call this function on a unallocated node.
545 void drm_mm_remove_node(struct drm_mm_node *node)
547 struct drm_mm *mm = node->mm;
548 struct drm_mm_node *prev_node;
550 DRM_MM_BUG_ON(!node->allocated);
551 DRM_MM_BUG_ON(node->scanned_block);
553 prev_node = list_prev_entry(node, node_list);
555 if (drm_mm_hole_follows(node))
556 rm_hole(node);
558 drm_mm_interval_tree_remove(node, &mm->interval_tree);
559 list_del(&node->node_list);
560 node->allocated = false;
562 if (drm_mm_hole_follows(prev_node))
563 rm_hole(prev_node);
564 add_hole(prev_node);
566 EXPORT_SYMBOL(drm_mm_remove_node);
569 * drm_mm_replace_node - move an allocation from @old to @new
570 * @old: drm_mm_node to remove from the allocator
571 * @new: drm_mm_node which should inherit @old's allocation
573 * This is useful for when drivers embed the drm_mm_node structure and hence
574 * can't move allocations by reassigning pointers. It's a combination of remove
575 * and insert with the guarantee that the allocation start will match.
577 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
579 struct drm_mm *mm = old->mm;
581 DRM_MM_BUG_ON(!old->allocated);
583 *new = *old;
585 list_replace(&old->node_list, &new->node_list);
586 rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
588 if (drm_mm_hole_follows(old)) {
589 list_replace(&old->hole_stack, &new->hole_stack);
590 rb_replace_node(&old->rb_hole_size,
591 &new->rb_hole_size,
592 &mm->holes_size);
593 rb_replace_node(&old->rb_hole_addr,
594 &new->rb_hole_addr,
595 &mm->holes_addr);
598 old->allocated = false;
599 new->allocated = true;
601 EXPORT_SYMBOL(drm_mm_replace_node);
604 * DOC: lru scan roster
606 * Very often GPUs need to have continuous allocations for a given object. When
607 * evicting objects to make space for a new one it is therefore not most
608 * efficient when we simply start to select all objects from the tail of an LRU
609 * until there's a suitable hole: Especially for big objects or nodes that
610 * otherwise have special allocation constraints there's a good chance we evict
611 * lots of (smaller) objects unnecessarily.
613 * The DRM range allocator supports this use-case through the scanning
614 * interfaces. First a scan operation needs to be initialized with
615 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
616 * objects to the roster, probably by walking an LRU list, but this can be
617 * freely implemented. Eviction candiates are added using
618 * drm_mm_scan_add_block() until a suitable hole is found or there are no
619 * further evictable objects. Eviction roster metadata is tracked in &struct
620 * drm_mm_scan.
622 * The driver must walk through all objects again in exactly the reverse
623 * order to restore the allocator state. Note that while the allocator is used
624 * in the scan mode no other operation is allowed.
626 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
627 * reported true) in the scan, and any overlapping nodes after color adjustment
628 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
629 * since freeing a node is also O(1) the overall complexity is
630 * O(scanned_objects). So like the free stack which needs to be walked before a
631 * scan operation even begins this is linear in the number of objects. It
632 * doesn't seem to hurt too badly.
636 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
637 * @scan: scan state
638 * @mm: drm_mm to scan
639 * @size: size of the allocation
640 * @alignment: alignment of the allocation
641 * @color: opaque tag value to use for the allocation
642 * @start: start of the allowed range for the allocation
643 * @end: end of the allowed range for the allocation
644 * @mode: fine-tune the allocation search and placement
646 * This simply sets up the scanning routines with the parameters for the desired
647 * hole.
649 * Warning:
650 * As long as the scan list is non-empty, no other operations than
651 * adding/removing nodes to/from the scan list are allowed.
653 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
654 struct drm_mm *mm,
655 u64 size,
656 u64 alignment,
657 unsigned long color,
658 u64 start,
659 u64 end,
660 enum drm_mm_insert_mode mode)
662 DRM_MM_BUG_ON(start >= end);
663 DRM_MM_BUG_ON(!size || size > end - start);
664 DRM_MM_BUG_ON(mm->scan_active);
666 scan->mm = mm;
668 if (alignment <= 1)
669 alignment = 0;
671 scan->color = color;
672 scan->alignment = alignment;
673 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
674 scan->size = size;
675 scan->mode = mode;
677 DRM_MM_BUG_ON(end <= start);
678 scan->range_start = start;
679 scan->range_end = end;
681 scan->hit_start = U64_MAX;
682 scan->hit_end = 0;
684 EXPORT_SYMBOL(drm_mm_scan_init_with_range);
687 * drm_mm_scan_add_block - add a node to the scan list
688 * @scan: the active drm_mm scanner
689 * @node: drm_mm_node to add
691 * Add a node to the scan list that might be freed to make space for the desired
692 * hole.
694 * Returns:
695 * True if a hole has been found, false otherwise.
697 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
698 struct drm_mm_node *node)
700 struct drm_mm *mm = scan->mm;
701 struct drm_mm_node *hole;
702 u64 hole_start, hole_end;
703 u64 col_start, col_end;
704 u64 adj_start, adj_end;
706 DRM_MM_BUG_ON(node->mm != mm);
707 DRM_MM_BUG_ON(!node->allocated);
708 DRM_MM_BUG_ON(node->scanned_block);
709 node->scanned_block = true;
710 mm->scan_active++;
712 /* Remove this block from the node_list so that we enlarge the hole
713 * (distance between the end of our previous node and the start of
714 * or next), without poisoning the link so that we can restore it
715 * later in drm_mm_scan_remove_block().
717 hole = list_prev_entry(node, node_list);
718 DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
719 __list_del_entry(&node->node_list);
721 hole_start = __drm_mm_hole_node_start(hole);
722 hole_end = __drm_mm_hole_node_end(hole);
724 col_start = hole_start;
725 col_end = hole_end;
726 if (mm->color_adjust)
727 mm->color_adjust(hole, scan->color, &col_start, &col_end);
729 adj_start = max(col_start, scan->range_start);
730 adj_end = min(col_end, scan->range_end);
731 if (adj_end <= adj_start || adj_end - adj_start < scan->size)
732 return false;
734 if (scan->mode == DRM_MM_INSERT_HIGH)
735 adj_start = adj_end - scan->size;
737 if (scan->alignment) {
738 u64 rem;
740 if (likely(scan->remainder_mask))
741 rem = adj_start & scan->remainder_mask;
742 else
743 div64_u64_rem(adj_start, scan->alignment, &rem);
744 if (rem) {
745 adj_start -= rem;
746 if (scan->mode != DRM_MM_INSERT_HIGH)
747 adj_start += scan->alignment;
748 if (adj_start < max(col_start, scan->range_start) ||
749 min(col_end, scan->range_end) - adj_start < scan->size)
750 return false;
752 if (adj_end <= adj_start ||
753 adj_end - adj_start < scan->size)
754 return false;
758 scan->hit_start = adj_start;
759 scan->hit_end = adj_start + scan->size;
761 DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
762 DRM_MM_BUG_ON(scan->hit_start < hole_start);
763 DRM_MM_BUG_ON(scan->hit_end > hole_end);
765 return true;
767 EXPORT_SYMBOL(drm_mm_scan_add_block);
770 * drm_mm_scan_remove_block - remove a node from the scan list
771 * @scan: the active drm_mm scanner
772 * @node: drm_mm_node to remove
774 * Nodes **must** be removed in exactly the reverse order from the scan list as
775 * they have been added (e.g. using list_add() as they are added and then
776 * list_for_each() over that eviction list to remove), otherwise the internal
777 * state of the memory manager will be corrupted.
779 * When the scan list is empty, the selected memory nodes can be freed. An
780 * immediately following drm_mm_insert_node_in_range_generic() or one of the
781 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
782 * the just freed block (because its at the top of the free_stack list).
784 * Returns:
785 * True if this block should be evicted, false otherwise. Will always
786 * return false when no hole has been found.
788 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
789 struct drm_mm_node *node)
791 struct drm_mm_node *prev_node;
793 DRM_MM_BUG_ON(node->mm != scan->mm);
794 DRM_MM_BUG_ON(!node->scanned_block);
795 node->scanned_block = false;
797 DRM_MM_BUG_ON(!node->mm->scan_active);
798 node->mm->scan_active--;
800 /* During drm_mm_scan_add_block() we decoupled this node leaving
801 * its pointers intact. Now that the caller is walking back along
802 * the eviction list we can restore this block into its rightful
803 * place on the full node_list. To confirm that the caller is walking
804 * backwards correctly we check that prev_node->next == node->next,
805 * i.e. both believe the same node should be on the other side of the
806 * hole.
808 prev_node = list_prev_entry(node, node_list);
809 DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
810 list_next_entry(node, node_list));
811 list_add(&node->node_list, &prev_node->node_list);
813 return (node->start + node->size > scan->hit_start &&
814 node->start < scan->hit_end);
816 EXPORT_SYMBOL(drm_mm_scan_remove_block);
819 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
820 * @scan: drm_mm scan with target hole
822 * After completing an eviction scan and removing the selected nodes, we may
823 * need to remove a few more nodes from either side of the target hole if
824 * mm.color_adjust is being used.
826 * Returns:
827 * A node to evict, or NULL if there are no overlapping nodes.
829 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
831 struct drm_mm *mm = scan->mm;
832 struct drm_mm_node *hole;
833 u64 hole_start, hole_end;
835 DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
837 if (!mm->color_adjust)
838 return NULL;
841 * The hole found during scanning should ideally be the first element
842 * in the hole_stack list, but due to side-effects in the driver it
843 * may not be.
845 list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
846 hole_start = __drm_mm_hole_node_start(hole);
847 hole_end = hole_start + hole->hole_size;
849 if (hole_start <= scan->hit_start &&
850 hole_end >= scan->hit_end)
851 break;
854 /* We should only be called after we found the hole previously */
855 DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
856 if (unlikely(&hole->hole_stack == &mm->hole_stack))
857 return NULL;
859 DRM_MM_BUG_ON(hole_start > scan->hit_start);
860 DRM_MM_BUG_ON(hole_end < scan->hit_end);
862 mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
863 if (hole_start > scan->hit_start)
864 return hole;
865 if (hole_end < scan->hit_end)
866 return list_next_entry(hole, node_list);
868 return NULL;
870 EXPORT_SYMBOL(drm_mm_scan_color_evict);
873 * drm_mm_init - initialize a drm-mm allocator
874 * @mm: the drm_mm structure to initialize
875 * @start: start of the range managed by @mm
876 * @size: end of the range managed by @mm
878 * Note that @mm must be cleared to 0 before calling this function.
880 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
882 DRM_MM_BUG_ON(start + size <= start);
884 mm->color_adjust = NULL;
886 INIT_LIST_HEAD(&mm->hole_stack);
887 mm->interval_tree = RB_ROOT_CACHED;
888 mm->holes_size = RB_ROOT;
889 mm->holes_addr = RB_ROOT;
891 /* Clever trick to avoid a special case in the free hole tracking. */
892 INIT_LIST_HEAD(&mm->head_node.node_list);
893 mm->head_node.allocated = false;
894 mm->head_node.mm = mm;
895 mm->head_node.start = start + size;
896 mm->head_node.size = -size;
897 add_hole(&mm->head_node);
899 mm->scan_active = 0;
901 EXPORT_SYMBOL(drm_mm_init);
904 * drm_mm_takedown - clean up a drm_mm allocator
905 * @mm: drm_mm allocator to clean up
907 * Note that it is a bug to call this function on an allocator which is not
908 * clean.
910 void drm_mm_takedown(struct drm_mm *mm)
912 if (WARN(!drm_mm_clean(mm),
913 "Memory manager not clean during takedown.\n"))
914 show_leaks(mm);
916 EXPORT_SYMBOL(drm_mm_takedown);
918 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
920 u64 start, size;
922 size = entry->hole_size;
923 if (size) {
924 start = drm_mm_hole_node_start(entry);
925 drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
926 start, start + size, size);
929 return size;
932 * drm_mm_print - print allocator state
933 * @mm: drm_mm allocator to print
934 * @p: DRM printer to use
936 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
938 const struct drm_mm_node *entry;
939 u64 total_used = 0, total_free = 0, total = 0;
941 total_free += drm_mm_dump_hole(p, &mm->head_node);
943 drm_mm_for_each_node(entry, mm) {
944 drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
945 entry->start + entry->size, entry->size);
946 total_used += entry->size;
947 total_free += drm_mm_dump_hole(p, entry);
949 total = total_free + total_used;
951 drm_printf(p, "total: %llu, used %llu free %llu\n", total,
952 total_used, total_free);
954 EXPORT_SYMBOL(drm_mm_print);