ARM: 7409/1: Do not call flush_cache_user_range with mmap_sem held
[linux/fpc-iii.git] / drivers / gpu / drm / drm_mm.c
blob959186cbf3280b90b911ff54373531ee577eef97
1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
40 * Authors:
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
44 #include "drmP.h"
45 #include "drm_mm.h"
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
49 #define MM_UNUSED_TARGET 4
51 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53 struct drm_mm_node *child;
55 if (atomic)
56 child = kzalloc(sizeof(*child), GFP_ATOMIC);
57 else
58 child = kzalloc(sizeof(*child), GFP_KERNEL);
60 if (unlikely(child == NULL)) {
61 spin_lock(&mm->unused_lock);
62 if (list_empty(&mm->unused_nodes))
63 child = NULL;
64 else {
65 child =
66 list_entry(mm->unused_nodes.next,
67 struct drm_mm_node, node_list);
68 list_del(&child->node_list);
69 --mm->num_unused;
71 spin_unlock(&mm->unused_lock);
73 return child;
76 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
77 * drm_mm: memory manager struct we are pre-allocating for
79 * Returns 0 on success or -ENOMEM if allocation fails.
81 int drm_mm_pre_get(struct drm_mm *mm)
83 struct drm_mm_node *node;
85 spin_lock(&mm->unused_lock);
86 while (mm->num_unused < MM_UNUSED_TARGET) {
87 spin_unlock(&mm->unused_lock);
88 node = kzalloc(sizeof(*node), GFP_KERNEL);
89 spin_lock(&mm->unused_lock);
91 if (unlikely(node == NULL)) {
92 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
93 spin_unlock(&mm->unused_lock);
94 return ret;
96 ++mm->num_unused;
97 list_add_tail(&node->node_list, &mm->unused_nodes);
99 spin_unlock(&mm->unused_lock);
100 return 0;
102 EXPORT_SYMBOL(drm_mm_pre_get);
104 static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
106 return hole_node->start + hole_node->size;
109 static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
111 struct drm_mm_node *next_node =
112 list_entry(hole_node->node_list.next, struct drm_mm_node,
113 node_list);
115 return next_node->start;
118 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
119 struct drm_mm_node *node,
120 unsigned long size, unsigned alignment)
122 struct drm_mm *mm = hole_node->mm;
123 unsigned long tmp = 0, wasted = 0;
124 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
125 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
127 BUG_ON(!hole_node->hole_follows || node->allocated);
129 if (alignment)
130 tmp = hole_start % alignment;
132 if (!tmp) {
133 hole_node->hole_follows = 0;
134 list_del_init(&hole_node->hole_stack);
135 } else
136 wasted = alignment - tmp;
138 node->start = hole_start + wasted;
139 node->size = size;
140 node->mm = mm;
141 node->allocated = 1;
143 INIT_LIST_HEAD(&node->hole_stack);
144 list_add(&node->node_list, &hole_node->node_list);
146 BUG_ON(node->start + node->size > hole_end);
148 if (node->start + node->size < hole_end) {
149 list_add(&node->hole_stack, &mm->hole_stack);
150 node->hole_follows = 1;
151 } else {
152 node->hole_follows = 0;
156 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
157 unsigned long size,
158 unsigned alignment,
159 int atomic)
161 struct drm_mm_node *node;
163 node = drm_mm_kmalloc(hole_node->mm, atomic);
164 if (unlikely(node == NULL))
165 return NULL;
167 drm_mm_insert_helper(hole_node, node, size, alignment);
169 return node;
171 EXPORT_SYMBOL(drm_mm_get_block_generic);
174 * Search for free space and insert a preallocated memory node. Returns
175 * -ENOSPC if no suitable free area is available. The preallocated memory node
176 * must be cleared.
178 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
179 unsigned long size, unsigned alignment)
181 struct drm_mm_node *hole_node;
183 hole_node = drm_mm_search_free(mm, size, alignment, 0);
184 if (!hole_node)
185 return -ENOSPC;
187 drm_mm_insert_helper(hole_node, node, size, alignment);
189 return 0;
191 EXPORT_SYMBOL(drm_mm_insert_node);
193 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
194 struct drm_mm_node *node,
195 unsigned long size, unsigned alignment,
196 unsigned long start, unsigned long end)
198 struct drm_mm *mm = hole_node->mm;
199 unsigned long tmp = 0, wasted = 0;
200 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
201 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
203 BUG_ON(!hole_node->hole_follows || node->allocated);
205 if (hole_start < start)
206 wasted += start - hole_start;
207 if (alignment)
208 tmp = (hole_start + wasted) % alignment;
210 if (tmp)
211 wasted += alignment - tmp;
213 if (!wasted) {
214 hole_node->hole_follows = 0;
215 list_del_init(&hole_node->hole_stack);
218 node->start = hole_start + wasted;
219 node->size = size;
220 node->mm = mm;
221 node->allocated = 1;
223 INIT_LIST_HEAD(&node->hole_stack);
224 list_add(&node->node_list, &hole_node->node_list);
226 BUG_ON(node->start + node->size > hole_end);
227 BUG_ON(node->start + node->size > end);
229 if (node->start + node->size < hole_end) {
230 list_add(&node->hole_stack, &mm->hole_stack);
231 node->hole_follows = 1;
232 } else {
233 node->hole_follows = 0;
237 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
238 unsigned long size,
239 unsigned alignment,
240 unsigned long start,
241 unsigned long end,
242 int atomic)
244 struct drm_mm_node *node;
246 node = drm_mm_kmalloc(hole_node->mm, atomic);
247 if (unlikely(node == NULL))
248 return NULL;
250 drm_mm_insert_helper_range(hole_node, node, size, alignment,
251 start, end);
253 return node;
255 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
258 * Search for free space and insert a preallocated memory node. Returns
259 * -ENOSPC if no suitable free area is available. This is for range
260 * restricted allocations. The preallocated memory node must be cleared.
262 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
263 unsigned long size, unsigned alignment,
264 unsigned long start, unsigned long end)
266 struct drm_mm_node *hole_node;
268 hole_node = drm_mm_search_free_in_range(mm, size, alignment,
269 start, end, 0);
270 if (!hole_node)
271 return -ENOSPC;
273 drm_mm_insert_helper_range(hole_node, node, size, alignment,
274 start, end);
276 return 0;
278 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
281 * Remove a memory node from the allocator.
283 void drm_mm_remove_node(struct drm_mm_node *node)
285 struct drm_mm *mm = node->mm;
286 struct drm_mm_node *prev_node;
288 BUG_ON(node->scanned_block || node->scanned_prev_free
289 || node->scanned_next_free);
291 prev_node =
292 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
294 if (node->hole_follows) {
295 BUG_ON(drm_mm_hole_node_start(node)
296 == drm_mm_hole_node_end(node));
297 list_del(&node->hole_stack);
298 } else
299 BUG_ON(drm_mm_hole_node_start(node)
300 != drm_mm_hole_node_end(node));
302 if (!prev_node->hole_follows) {
303 prev_node->hole_follows = 1;
304 list_add(&prev_node->hole_stack, &mm->hole_stack);
305 } else
306 list_move(&prev_node->hole_stack, &mm->hole_stack);
308 list_del(&node->node_list);
309 node->allocated = 0;
311 EXPORT_SYMBOL(drm_mm_remove_node);
314 * Remove a memory node from the allocator and free the allocated struct
315 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
316 * drm_mm_get_block functions.
318 void drm_mm_put_block(struct drm_mm_node *node)
321 struct drm_mm *mm = node->mm;
323 drm_mm_remove_node(node);
325 spin_lock(&mm->unused_lock);
326 if (mm->num_unused < MM_UNUSED_TARGET) {
327 list_add(&node->node_list, &mm->unused_nodes);
328 ++mm->num_unused;
329 } else
330 kfree(node);
331 spin_unlock(&mm->unused_lock);
333 EXPORT_SYMBOL(drm_mm_put_block);
335 static int check_free_hole(unsigned long start, unsigned long end,
336 unsigned long size, unsigned alignment)
338 unsigned wasted = 0;
340 if (end - start < size)
341 return 0;
343 if (alignment) {
344 unsigned tmp = start % alignment;
345 if (tmp)
346 wasted = alignment - tmp;
349 if (end >= start + size + wasted) {
350 return 1;
353 return 0;
356 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
357 unsigned long size,
358 unsigned alignment, int best_match)
360 struct drm_mm_node *entry;
361 struct drm_mm_node *best;
362 unsigned long best_size;
364 BUG_ON(mm->scanned_blocks);
366 best = NULL;
367 best_size = ~0UL;
369 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
370 BUG_ON(!entry->hole_follows);
371 if (!check_free_hole(drm_mm_hole_node_start(entry),
372 drm_mm_hole_node_end(entry),
373 size, alignment))
374 continue;
376 if (!best_match)
377 return entry;
379 if (entry->size < best_size) {
380 best = entry;
381 best_size = entry->size;
385 return best;
387 EXPORT_SYMBOL(drm_mm_search_free);
389 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
390 unsigned long size,
391 unsigned alignment,
392 unsigned long start,
393 unsigned long end,
394 int best_match)
396 struct drm_mm_node *entry;
397 struct drm_mm_node *best;
398 unsigned long best_size;
400 BUG_ON(mm->scanned_blocks);
402 best = NULL;
403 best_size = ~0UL;
405 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
406 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
407 start : drm_mm_hole_node_start(entry);
408 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
409 end : drm_mm_hole_node_end(entry);
411 BUG_ON(!entry->hole_follows);
412 if (!check_free_hole(adj_start, adj_end, size, alignment))
413 continue;
415 if (!best_match)
416 return entry;
418 if (entry->size < best_size) {
419 best = entry;
420 best_size = entry->size;
424 return best;
426 EXPORT_SYMBOL(drm_mm_search_free_in_range);
429 * Moves an allocation. To be used with embedded struct drm_mm_node.
431 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
433 list_replace(&old->node_list, &new->node_list);
434 list_replace(&old->hole_stack, &new->hole_stack);
435 new->hole_follows = old->hole_follows;
436 new->mm = old->mm;
437 new->start = old->start;
438 new->size = old->size;
440 old->allocated = 0;
441 new->allocated = 1;
443 EXPORT_SYMBOL(drm_mm_replace_node);
446 * Initializa lru scanning.
448 * This simply sets up the scanning routines with the parameters for the desired
449 * hole.
451 * Warning: As long as the scan list is non-empty, no other operations than
452 * adding/removing nodes to/from the scan list are allowed.
454 void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
455 unsigned alignment)
457 mm->scan_alignment = alignment;
458 mm->scan_size = size;
459 mm->scanned_blocks = 0;
460 mm->scan_hit_start = 0;
461 mm->scan_hit_size = 0;
462 mm->scan_check_range = 0;
463 mm->prev_scanned_node = NULL;
465 EXPORT_SYMBOL(drm_mm_init_scan);
468 * Initializa lru scanning.
470 * This simply sets up the scanning routines with the parameters for the desired
471 * hole. This version is for range-restricted scans.
473 * Warning: As long as the scan list is non-empty, no other operations than
474 * adding/removing nodes to/from the scan list are allowed.
476 void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
477 unsigned alignment,
478 unsigned long start,
479 unsigned long end)
481 mm->scan_alignment = alignment;
482 mm->scan_size = size;
483 mm->scanned_blocks = 0;
484 mm->scan_hit_start = 0;
485 mm->scan_hit_size = 0;
486 mm->scan_start = start;
487 mm->scan_end = end;
488 mm->scan_check_range = 1;
489 mm->prev_scanned_node = NULL;
491 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
494 * Add a node to the scan list that might be freed to make space for the desired
495 * hole.
497 * Returns non-zero, if a hole has been found, zero otherwise.
499 int drm_mm_scan_add_block(struct drm_mm_node *node)
501 struct drm_mm *mm = node->mm;
502 struct drm_mm_node *prev_node;
503 unsigned long hole_start, hole_end;
504 unsigned long adj_start;
505 unsigned long adj_end;
507 mm->scanned_blocks++;
509 BUG_ON(node->scanned_block);
510 node->scanned_block = 1;
512 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
513 node_list);
515 node->scanned_preceeds_hole = prev_node->hole_follows;
516 prev_node->hole_follows = 1;
517 list_del(&node->node_list);
518 node->node_list.prev = &prev_node->node_list;
519 node->node_list.next = &mm->prev_scanned_node->node_list;
520 mm->prev_scanned_node = node;
522 hole_start = drm_mm_hole_node_start(prev_node);
523 hole_end = drm_mm_hole_node_end(prev_node);
524 if (mm->scan_check_range) {
525 adj_start = hole_start < mm->scan_start ?
526 mm->scan_start : hole_start;
527 adj_end = hole_end > mm->scan_end ?
528 mm->scan_end : hole_end;
529 } else {
530 adj_start = hole_start;
531 adj_end = hole_end;
534 if (check_free_hole(adj_start , adj_end,
535 mm->scan_size, mm->scan_alignment)) {
536 mm->scan_hit_start = hole_start;
537 mm->scan_hit_size = hole_end;
539 return 1;
542 return 0;
544 EXPORT_SYMBOL(drm_mm_scan_add_block);
547 * Remove a node from the scan list.
549 * Nodes _must_ be removed in the exact same order from the scan list as they
550 * have been added, otherwise the internal state of the memory manager will be
551 * corrupted.
553 * When the scan list is empty, the selected memory nodes can be freed. An
554 * immediately following drm_mm_search_free with best_match = 0 will then return
555 * the just freed block (because its at the top of the free_stack list).
557 * Returns one if this block should be evicted, zero otherwise. Will always
558 * return zero when no hole has been found.
560 int drm_mm_scan_remove_block(struct drm_mm_node *node)
562 struct drm_mm *mm = node->mm;
563 struct drm_mm_node *prev_node;
565 mm->scanned_blocks--;
567 BUG_ON(!node->scanned_block);
568 node->scanned_block = 0;
570 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
571 node_list);
573 prev_node->hole_follows = node->scanned_preceeds_hole;
574 INIT_LIST_HEAD(&node->node_list);
575 list_add(&node->node_list, &prev_node->node_list);
577 /* Only need to check for containement because start&size for the
578 * complete resulting free block (not just the desired part) is
579 * stored. */
580 if (node->start >= mm->scan_hit_start &&
581 node->start + node->size
582 <= mm->scan_hit_start + mm->scan_hit_size) {
583 return 1;
586 return 0;
588 EXPORT_SYMBOL(drm_mm_scan_remove_block);
590 int drm_mm_clean(struct drm_mm * mm)
592 struct list_head *head = &mm->head_node.node_list;
594 return (head->next->next == head);
596 EXPORT_SYMBOL(drm_mm_clean);
598 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
600 INIT_LIST_HEAD(&mm->hole_stack);
601 INIT_LIST_HEAD(&mm->unused_nodes);
602 mm->num_unused = 0;
603 mm->scanned_blocks = 0;
604 spin_lock_init(&mm->unused_lock);
606 /* Clever trick to avoid a special case in the free hole tracking. */
607 INIT_LIST_HEAD(&mm->head_node.node_list);
608 INIT_LIST_HEAD(&mm->head_node.hole_stack);
609 mm->head_node.hole_follows = 1;
610 mm->head_node.scanned_block = 0;
611 mm->head_node.scanned_prev_free = 0;
612 mm->head_node.scanned_next_free = 0;
613 mm->head_node.mm = mm;
614 mm->head_node.start = start + size;
615 mm->head_node.size = start - mm->head_node.start;
616 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
618 return 0;
620 EXPORT_SYMBOL(drm_mm_init);
622 void drm_mm_takedown(struct drm_mm * mm)
624 struct drm_mm_node *entry, *next;
626 if (!list_empty(&mm->head_node.node_list)) {
627 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
628 return;
631 spin_lock(&mm->unused_lock);
632 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
633 list_del(&entry->node_list);
634 kfree(entry);
635 --mm->num_unused;
637 spin_unlock(&mm->unused_lock);
639 BUG_ON(mm->num_unused != 0);
641 EXPORT_SYMBOL(drm_mm_takedown);
643 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
645 struct drm_mm_node *entry;
646 unsigned long total_used = 0, total_free = 0, total = 0;
647 unsigned long hole_start, hole_end, hole_size;
649 hole_start = drm_mm_hole_node_start(&mm->head_node);
650 hole_end = drm_mm_hole_node_end(&mm->head_node);
651 hole_size = hole_end - hole_start;
652 if (hole_size)
653 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
654 prefix, hole_start, hole_end,
655 hole_size);
656 total_free += hole_size;
658 drm_mm_for_each_node(entry, mm) {
659 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
660 prefix, entry->start, entry->start + entry->size,
661 entry->size);
662 total_used += entry->size;
664 if (entry->hole_follows) {
665 hole_start = drm_mm_hole_node_start(entry);
666 hole_end = drm_mm_hole_node_end(entry);
667 hole_size = hole_end - hole_start;
668 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
669 prefix, hole_start, hole_end,
670 hole_size);
671 total_free += hole_size;
674 total = total_free + total_used;
676 printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
677 total_used, total_free);
679 EXPORT_SYMBOL(drm_mm_debug_table);
681 #if defined(CONFIG_DEBUG_FS)
682 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
684 struct drm_mm_node *entry;
685 unsigned long total_used = 0, total_free = 0, total = 0;
686 unsigned long hole_start, hole_end, hole_size;
688 hole_start = drm_mm_hole_node_start(&mm->head_node);
689 hole_end = drm_mm_hole_node_end(&mm->head_node);
690 hole_size = hole_end - hole_start;
691 if (hole_size)
692 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
693 hole_start, hole_end, hole_size);
694 total_free += hole_size;
696 drm_mm_for_each_node(entry, mm) {
697 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
698 entry->start, entry->start + entry->size,
699 entry->size);
700 total_used += entry->size;
701 if (entry->hole_follows) {
702 hole_start = drm_mm_hole_node_start(entry);
703 hole_end = drm_mm_hole_node_end(entry);
704 hole_size = hole_end - hole_start;
705 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
706 hole_start, hole_end, hole_size);
707 total_free += hole_size;
710 total = total_free + total_used;
712 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
713 return 0;
715 EXPORT_SYMBOL(drm_mm_dump_table);
716 #endif