percpu, x86: don't use PMD_SIZE as embedded atom_size on 32bit
[zen-stable.git] / drivers / gpu / drm / drm_mm.c
blob961fb54f4266e9fd3595873f61281393765cf5b2
1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
40 * Authors:
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
44 #include "drmP.h"
45 #include "drm_mm.h"
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 #include <linux/export.h>
50 #define MM_UNUSED_TARGET 4
52 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
54 struct drm_mm_node *child;
56 if (atomic)
57 child = kzalloc(sizeof(*child), GFP_ATOMIC);
58 else
59 child = kzalloc(sizeof(*child), GFP_KERNEL);
61 if (unlikely(child == NULL)) {
62 spin_lock(&mm->unused_lock);
63 if (list_empty(&mm->unused_nodes))
64 child = NULL;
65 else {
66 child =
67 list_entry(mm->unused_nodes.next,
68 struct drm_mm_node, node_list);
69 list_del(&child->node_list);
70 --mm->num_unused;
72 spin_unlock(&mm->unused_lock);
74 return child;
77 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
78 * drm_mm: memory manager struct we are pre-allocating for
80 * Returns 0 on success or -ENOMEM if allocation fails.
82 int drm_mm_pre_get(struct drm_mm *mm)
84 struct drm_mm_node *node;
86 spin_lock(&mm->unused_lock);
87 while (mm->num_unused < MM_UNUSED_TARGET) {
88 spin_unlock(&mm->unused_lock);
89 node = kzalloc(sizeof(*node), GFP_KERNEL);
90 spin_lock(&mm->unused_lock);
92 if (unlikely(node == NULL)) {
93 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
94 spin_unlock(&mm->unused_lock);
95 return ret;
97 ++mm->num_unused;
98 list_add_tail(&node->node_list, &mm->unused_nodes);
100 spin_unlock(&mm->unused_lock);
101 return 0;
103 EXPORT_SYMBOL(drm_mm_pre_get);
105 static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
107 return hole_node->start + hole_node->size;
110 static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
112 struct drm_mm_node *next_node =
113 list_entry(hole_node->node_list.next, struct drm_mm_node,
114 node_list);
116 return next_node->start;
119 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
120 struct drm_mm_node *node,
121 unsigned long size, unsigned alignment)
123 struct drm_mm *mm = hole_node->mm;
124 unsigned long tmp = 0, wasted = 0;
125 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
126 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
128 BUG_ON(!hole_node->hole_follows || node->allocated);
130 if (alignment)
131 tmp = hole_start % alignment;
133 if (!tmp) {
134 hole_node->hole_follows = 0;
135 list_del_init(&hole_node->hole_stack);
136 } else
137 wasted = alignment - tmp;
139 node->start = hole_start + wasted;
140 node->size = size;
141 node->mm = mm;
142 node->allocated = 1;
144 INIT_LIST_HEAD(&node->hole_stack);
145 list_add(&node->node_list, &hole_node->node_list);
147 BUG_ON(node->start + node->size > hole_end);
149 if (node->start + node->size < hole_end) {
150 list_add(&node->hole_stack, &mm->hole_stack);
151 node->hole_follows = 1;
152 } else {
153 node->hole_follows = 0;
157 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
158 unsigned long size,
159 unsigned alignment,
160 int atomic)
162 struct drm_mm_node *node;
164 node = drm_mm_kmalloc(hole_node->mm, atomic);
165 if (unlikely(node == NULL))
166 return NULL;
168 drm_mm_insert_helper(hole_node, node, size, alignment);
170 return node;
172 EXPORT_SYMBOL(drm_mm_get_block_generic);
175 * Search for free space and insert a preallocated memory node. Returns
176 * -ENOSPC if no suitable free area is available. The preallocated memory node
177 * must be cleared.
179 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
180 unsigned long size, unsigned alignment)
182 struct drm_mm_node *hole_node;
184 hole_node = drm_mm_search_free(mm, size, alignment, 0);
185 if (!hole_node)
186 return -ENOSPC;
188 drm_mm_insert_helper(hole_node, node, size, alignment);
190 return 0;
192 EXPORT_SYMBOL(drm_mm_insert_node);
194 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
195 struct drm_mm_node *node,
196 unsigned long size, unsigned alignment,
197 unsigned long start, unsigned long end)
199 struct drm_mm *mm = hole_node->mm;
200 unsigned long tmp = 0, wasted = 0;
201 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
202 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
204 BUG_ON(!hole_node->hole_follows || node->allocated);
206 if (hole_start < start)
207 wasted += start - hole_start;
208 if (alignment)
209 tmp = (hole_start + wasted) % alignment;
211 if (tmp)
212 wasted += alignment - tmp;
214 if (!wasted) {
215 hole_node->hole_follows = 0;
216 list_del_init(&hole_node->hole_stack);
219 node->start = hole_start + wasted;
220 node->size = size;
221 node->mm = mm;
222 node->allocated = 1;
224 INIT_LIST_HEAD(&node->hole_stack);
225 list_add(&node->node_list, &hole_node->node_list);
227 BUG_ON(node->start + node->size > hole_end);
228 BUG_ON(node->start + node->size > end);
230 if (node->start + node->size < hole_end) {
231 list_add(&node->hole_stack, &mm->hole_stack);
232 node->hole_follows = 1;
233 } else {
234 node->hole_follows = 0;
238 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
239 unsigned long size,
240 unsigned alignment,
241 unsigned long start,
242 unsigned long end,
243 int atomic)
245 struct drm_mm_node *node;
247 node = drm_mm_kmalloc(hole_node->mm, atomic);
248 if (unlikely(node == NULL))
249 return NULL;
251 drm_mm_insert_helper_range(hole_node, node, size, alignment,
252 start, end);
254 return node;
256 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
259 * Search for free space and insert a preallocated memory node. Returns
260 * -ENOSPC if no suitable free area is available. This is for range
261 * restricted allocations. The preallocated memory node must be cleared.
263 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
264 unsigned long size, unsigned alignment,
265 unsigned long start, unsigned long end)
267 struct drm_mm_node *hole_node;
269 hole_node = drm_mm_search_free_in_range(mm, size, alignment,
270 start, end, 0);
271 if (!hole_node)
272 return -ENOSPC;
274 drm_mm_insert_helper_range(hole_node, node, size, alignment,
275 start, end);
277 return 0;
279 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
282 * Remove a memory node from the allocator.
284 void drm_mm_remove_node(struct drm_mm_node *node)
286 struct drm_mm *mm = node->mm;
287 struct drm_mm_node *prev_node;
289 BUG_ON(node->scanned_block || node->scanned_prev_free
290 || node->scanned_next_free);
292 prev_node =
293 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
295 if (node->hole_follows) {
296 BUG_ON(drm_mm_hole_node_start(node)
297 == drm_mm_hole_node_end(node));
298 list_del(&node->hole_stack);
299 } else
300 BUG_ON(drm_mm_hole_node_start(node)
301 != drm_mm_hole_node_end(node));
303 if (!prev_node->hole_follows) {
304 prev_node->hole_follows = 1;
305 list_add(&prev_node->hole_stack, &mm->hole_stack);
306 } else
307 list_move(&prev_node->hole_stack, &mm->hole_stack);
309 list_del(&node->node_list);
310 node->allocated = 0;
312 EXPORT_SYMBOL(drm_mm_remove_node);
315 * Remove a memory node from the allocator and free the allocated struct
316 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
317 * drm_mm_get_block functions.
319 void drm_mm_put_block(struct drm_mm_node *node)
322 struct drm_mm *mm = node->mm;
324 drm_mm_remove_node(node);
326 spin_lock(&mm->unused_lock);
327 if (mm->num_unused < MM_UNUSED_TARGET) {
328 list_add(&node->node_list, &mm->unused_nodes);
329 ++mm->num_unused;
330 } else
331 kfree(node);
332 spin_unlock(&mm->unused_lock);
334 EXPORT_SYMBOL(drm_mm_put_block);
336 static int check_free_hole(unsigned long start, unsigned long end,
337 unsigned long size, unsigned alignment)
339 unsigned wasted = 0;
341 if (end - start < size)
342 return 0;
344 if (alignment) {
345 unsigned tmp = start % alignment;
346 if (tmp)
347 wasted = alignment - tmp;
350 if (end >= start + size + wasted) {
351 return 1;
354 return 0;
357 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
358 unsigned long size,
359 unsigned alignment, int best_match)
361 struct drm_mm_node *entry;
362 struct drm_mm_node *best;
363 unsigned long best_size;
365 BUG_ON(mm->scanned_blocks);
367 best = NULL;
368 best_size = ~0UL;
370 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
371 BUG_ON(!entry->hole_follows);
372 if (!check_free_hole(drm_mm_hole_node_start(entry),
373 drm_mm_hole_node_end(entry),
374 size, alignment))
375 continue;
377 if (!best_match)
378 return entry;
380 if (entry->size < best_size) {
381 best = entry;
382 best_size = entry->size;
386 return best;
388 EXPORT_SYMBOL(drm_mm_search_free);
390 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
391 unsigned long size,
392 unsigned alignment,
393 unsigned long start,
394 unsigned long end,
395 int best_match)
397 struct drm_mm_node *entry;
398 struct drm_mm_node *best;
399 unsigned long best_size;
401 BUG_ON(mm->scanned_blocks);
403 best = NULL;
404 best_size = ~0UL;
406 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
407 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
408 start : drm_mm_hole_node_start(entry);
409 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
410 end : drm_mm_hole_node_end(entry);
412 BUG_ON(!entry->hole_follows);
413 if (!check_free_hole(adj_start, adj_end, size, alignment))
414 continue;
416 if (!best_match)
417 return entry;
419 if (entry->size < best_size) {
420 best = entry;
421 best_size = entry->size;
425 return best;
427 EXPORT_SYMBOL(drm_mm_search_free_in_range);
430 * Moves an allocation. To be used with embedded struct drm_mm_node.
432 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
434 list_replace(&old->node_list, &new->node_list);
435 list_replace(&old->hole_stack, &new->hole_stack);
436 new->hole_follows = old->hole_follows;
437 new->mm = old->mm;
438 new->start = old->start;
439 new->size = old->size;
441 old->allocated = 0;
442 new->allocated = 1;
444 EXPORT_SYMBOL(drm_mm_replace_node);
447 * Initializa lru scanning.
449 * This simply sets up the scanning routines with the parameters for the desired
450 * hole.
452 * Warning: As long as the scan list is non-empty, no other operations than
453 * adding/removing nodes to/from the scan list are allowed.
455 void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
456 unsigned alignment)
458 mm->scan_alignment = alignment;
459 mm->scan_size = size;
460 mm->scanned_blocks = 0;
461 mm->scan_hit_start = 0;
462 mm->scan_hit_size = 0;
463 mm->scan_check_range = 0;
464 mm->prev_scanned_node = NULL;
466 EXPORT_SYMBOL(drm_mm_init_scan);
469 * Initializa lru scanning.
471 * This simply sets up the scanning routines with the parameters for the desired
472 * hole. This version is for range-restricted scans.
474 * Warning: As long as the scan list is non-empty, no other operations than
475 * adding/removing nodes to/from the scan list are allowed.
477 void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
478 unsigned alignment,
479 unsigned long start,
480 unsigned long end)
482 mm->scan_alignment = alignment;
483 mm->scan_size = size;
484 mm->scanned_blocks = 0;
485 mm->scan_hit_start = 0;
486 mm->scan_hit_size = 0;
487 mm->scan_start = start;
488 mm->scan_end = end;
489 mm->scan_check_range = 1;
490 mm->prev_scanned_node = NULL;
492 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
495 * Add a node to the scan list that might be freed to make space for the desired
496 * hole.
498 * Returns non-zero, if a hole has been found, zero otherwise.
500 int drm_mm_scan_add_block(struct drm_mm_node *node)
502 struct drm_mm *mm = node->mm;
503 struct drm_mm_node *prev_node;
504 unsigned long hole_start, hole_end;
505 unsigned long adj_start;
506 unsigned long adj_end;
508 mm->scanned_blocks++;
510 BUG_ON(node->scanned_block);
511 node->scanned_block = 1;
513 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
514 node_list);
516 node->scanned_preceeds_hole = prev_node->hole_follows;
517 prev_node->hole_follows = 1;
518 list_del(&node->node_list);
519 node->node_list.prev = &prev_node->node_list;
520 node->node_list.next = &mm->prev_scanned_node->node_list;
521 mm->prev_scanned_node = node;
523 hole_start = drm_mm_hole_node_start(prev_node);
524 hole_end = drm_mm_hole_node_end(prev_node);
525 if (mm->scan_check_range) {
526 adj_start = hole_start < mm->scan_start ?
527 mm->scan_start : hole_start;
528 adj_end = hole_end > mm->scan_end ?
529 mm->scan_end : hole_end;
530 } else {
531 adj_start = hole_start;
532 adj_end = hole_end;
535 if (check_free_hole(adj_start , adj_end,
536 mm->scan_size, mm->scan_alignment)) {
537 mm->scan_hit_start = hole_start;
538 mm->scan_hit_size = hole_end;
540 return 1;
543 return 0;
545 EXPORT_SYMBOL(drm_mm_scan_add_block);
548 * Remove a node from the scan list.
550 * Nodes _must_ be removed in the exact same order from the scan list as they
551 * have been added, otherwise the internal state of the memory manager will be
552 * corrupted.
554 * When the scan list is empty, the selected memory nodes can be freed. An
555 * immediately following drm_mm_search_free with best_match = 0 will then return
556 * the just freed block (because its at the top of the free_stack list).
558 * Returns one if this block should be evicted, zero otherwise. Will always
559 * return zero when no hole has been found.
561 int drm_mm_scan_remove_block(struct drm_mm_node *node)
563 struct drm_mm *mm = node->mm;
564 struct drm_mm_node *prev_node;
566 mm->scanned_blocks--;
568 BUG_ON(!node->scanned_block);
569 node->scanned_block = 0;
571 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
572 node_list);
574 prev_node->hole_follows = node->scanned_preceeds_hole;
575 INIT_LIST_HEAD(&node->node_list);
576 list_add(&node->node_list, &prev_node->node_list);
578 /* Only need to check for containement because start&size for the
579 * complete resulting free block (not just the desired part) is
580 * stored. */
581 if (node->start >= mm->scan_hit_start &&
582 node->start + node->size
583 <= mm->scan_hit_start + mm->scan_hit_size) {
584 return 1;
587 return 0;
589 EXPORT_SYMBOL(drm_mm_scan_remove_block);
591 int drm_mm_clean(struct drm_mm * mm)
593 struct list_head *head = &mm->head_node.node_list;
595 return (head->next->next == head);
597 EXPORT_SYMBOL(drm_mm_clean);
599 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
601 INIT_LIST_HEAD(&mm->hole_stack);
602 INIT_LIST_HEAD(&mm->unused_nodes);
603 mm->num_unused = 0;
604 mm->scanned_blocks = 0;
605 spin_lock_init(&mm->unused_lock);
607 /* Clever trick to avoid a special case in the free hole tracking. */
608 INIT_LIST_HEAD(&mm->head_node.node_list);
609 INIT_LIST_HEAD(&mm->head_node.hole_stack);
610 mm->head_node.hole_follows = 1;
611 mm->head_node.scanned_block = 0;
612 mm->head_node.scanned_prev_free = 0;
613 mm->head_node.scanned_next_free = 0;
614 mm->head_node.mm = mm;
615 mm->head_node.start = start + size;
616 mm->head_node.size = start - mm->head_node.start;
617 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
619 return 0;
621 EXPORT_SYMBOL(drm_mm_init);
623 void drm_mm_takedown(struct drm_mm * mm)
625 struct drm_mm_node *entry, *next;
627 if (!list_empty(&mm->head_node.node_list)) {
628 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
629 return;
632 spin_lock(&mm->unused_lock);
633 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
634 list_del(&entry->node_list);
635 kfree(entry);
636 --mm->num_unused;
638 spin_unlock(&mm->unused_lock);
640 BUG_ON(mm->num_unused != 0);
642 EXPORT_SYMBOL(drm_mm_takedown);
644 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
646 struct drm_mm_node *entry;
647 unsigned long total_used = 0, total_free = 0, total = 0;
648 unsigned long hole_start, hole_end, hole_size;
650 hole_start = drm_mm_hole_node_start(&mm->head_node);
651 hole_end = drm_mm_hole_node_end(&mm->head_node);
652 hole_size = hole_end - hole_start;
653 if (hole_size)
654 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
655 prefix, hole_start, hole_end,
656 hole_size);
657 total_free += hole_size;
659 drm_mm_for_each_node(entry, mm) {
660 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
661 prefix, entry->start, entry->start + entry->size,
662 entry->size);
663 total_used += entry->size;
665 if (entry->hole_follows) {
666 hole_start = drm_mm_hole_node_start(entry);
667 hole_end = drm_mm_hole_node_end(entry);
668 hole_size = hole_end - hole_start;
669 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
670 prefix, hole_start, hole_end,
671 hole_size);
672 total_free += hole_size;
675 total = total_free + total_used;
677 printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
678 total_used, total_free);
680 EXPORT_SYMBOL(drm_mm_debug_table);
682 #if defined(CONFIG_DEBUG_FS)
683 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
685 struct drm_mm_node *entry;
686 unsigned long total_used = 0, total_free = 0, total = 0;
687 unsigned long hole_start, hole_end, hole_size;
689 hole_start = drm_mm_hole_node_start(&mm->head_node);
690 hole_end = drm_mm_hole_node_end(&mm->head_node);
691 hole_size = hole_end - hole_start;
692 if (hole_size)
693 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
694 hole_start, hole_end, hole_size);
695 total_free += hole_size;
697 drm_mm_for_each_node(entry, mm) {
698 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
699 entry->start, entry->start + entry->size,
700 entry->size);
701 total_used += entry->size;
702 if (entry->hole_follows) {
703 hole_start = drm_mm_hole_node_start(entry);
704 hole_end = drm_mm_hole_node_end(entry);
705 hole_size = hole_end - hole_start;
706 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
707 hole_start, hole_end, hole_size);
708 total_free += hole_size;
711 total = total_free + total_used;
713 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
714 return 0;
716 EXPORT_SYMBOL(drm_mm_dump_table);
717 #endif