Staging: netwave: delete the driver
[linux/fpc-iii.git] / drivers / gpu / drm / drm_mm.c
blob2ac074c8f5d2e546d40ff53fa3fe54c83547773d
1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
40 * Authors:
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
44 #include "drmP.h"
45 #include "drm_mm.h"
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
49 #define MM_UNUSED_TARGET 4
51 unsigned long drm_mm_tail_space(struct drm_mm *mm)
53 struct list_head *tail_node;
54 struct drm_mm_node *entry;
56 tail_node = mm->ml_entry.prev;
57 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
58 if (!entry->free)
59 return 0;
61 return entry->size;
64 int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
66 struct list_head *tail_node;
67 struct drm_mm_node *entry;
69 tail_node = mm->ml_entry.prev;
70 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
71 if (!entry->free)
72 return -ENOMEM;
74 if (entry->size <= size)
75 return -ENOMEM;
77 entry->size -= size;
78 return 0;
81 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
83 struct drm_mm_node *child;
85 if (atomic)
86 child = kmalloc(sizeof(*child), GFP_ATOMIC);
87 else
88 child = kmalloc(sizeof(*child), GFP_KERNEL);
90 if (unlikely(child == NULL)) {
91 spin_lock(&mm->unused_lock);
92 if (list_empty(&mm->unused_nodes))
93 child = NULL;
94 else {
95 child =
96 list_entry(mm->unused_nodes.next,
97 struct drm_mm_node, fl_entry);
98 list_del(&child->fl_entry);
99 --mm->num_unused;
101 spin_unlock(&mm->unused_lock);
103 return child;
106 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
107 * drm_mm: memory manager struct we are pre-allocating for
109 * Returns 0 on success or -ENOMEM if allocation fails.
111 int drm_mm_pre_get(struct drm_mm *mm)
113 struct drm_mm_node *node;
115 spin_lock(&mm->unused_lock);
116 while (mm->num_unused < MM_UNUSED_TARGET) {
117 spin_unlock(&mm->unused_lock);
118 node = kmalloc(sizeof(*node), GFP_KERNEL);
119 spin_lock(&mm->unused_lock);
121 if (unlikely(node == NULL)) {
122 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
123 spin_unlock(&mm->unused_lock);
124 return ret;
126 ++mm->num_unused;
127 list_add_tail(&node->fl_entry, &mm->unused_nodes);
129 spin_unlock(&mm->unused_lock);
130 return 0;
132 EXPORT_SYMBOL(drm_mm_pre_get);
134 static int drm_mm_create_tail_node(struct drm_mm *mm,
135 unsigned long start,
136 unsigned long size, int atomic)
138 struct drm_mm_node *child;
140 child = drm_mm_kmalloc(mm, atomic);
141 if (unlikely(child == NULL))
142 return -ENOMEM;
144 child->free = 1;
145 child->size = size;
146 child->start = start;
147 child->mm = mm;
149 list_add_tail(&child->ml_entry, &mm->ml_entry);
150 list_add_tail(&child->fl_entry, &mm->fl_entry);
152 return 0;
155 int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
157 struct list_head *tail_node;
158 struct drm_mm_node *entry;
160 tail_node = mm->ml_entry.prev;
161 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
162 if (!entry->free) {
163 return drm_mm_create_tail_node(mm, entry->start + entry->size,
164 size, atomic);
166 entry->size += size;
167 return 0;
170 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
171 unsigned long size,
172 int atomic)
174 struct drm_mm_node *child;
176 child = drm_mm_kmalloc(parent->mm, atomic);
177 if (unlikely(child == NULL))
178 return NULL;
180 INIT_LIST_HEAD(&child->fl_entry);
182 child->free = 0;
183 child->size = size;
184 child->start = parent->start;
185 child->mm = parent->mm;
187 list_add_tail(&child->ml_entry, &parent->ml_entry);
188 INIT_LIST_HEAD(&child->fl_entry);
190 parent->size -= size;
191 parent->start += size;
192 return child;
196 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
197 unsigned long size,
198 unsigned alignment,
199 int atomic)
202 struct drm_mm_node *align_splitoff = NULL;
203 unsigned tmp = 0;
205 if (alignment)
206 tmp = node->start % alignment;
208 if (tmp) {
209 align_splitoff =
210 drm_mm_split_at_start(node, alignment - tmp, atomic);
211 if (unlikely(align_splitoff == NULL))
212 return NULL;
215 if (node->size == size) {
216 list_del_init(&node->fl_entry);
217 node->free = 0;
218 } else {
219 node = drm_mm_split_at_start(node, size, atomic);
222 if (align_splitoff)
223 drm_mm_put_block(align_splitoff);
225 return node;
227 EXPORT_SYMBOL(drm_mm_get_block_generic);
229 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
230 unsigned long size,
231 unsigned alignment,
232 unsigned long start,
233 unsigned long end,
234 int atomic)
236 struct drm_mm_node *align_splitoff = NULL;
237 unsigned tmp = 0;
238 unsigned wasted = 0;
240 if (node->start < start)
241 wasted += start - node->start;
242 if (alignment)
243 tmp = ((node->start + wasted) % alignment);
245 if (tmp)
246 wasted += alignment - tmp;
247 if (wasted) {
248 align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
249 if (unlikely(align_splitoff == NULL))
250 return NULL;
253 if (node->size == size) {
254 list_del_init(&node->fl_entry);
255 node->free = 0;
256 } else {
257 node = drm_mm_split_at_start(node, size, atomic);
260 if (align_splitoff)
261 drm_mm_put_block(align_splitoff);
263 return node;
265 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
268 * Put a block. Merge with the previous and / or next block if they are free.
269 * Otherwise add to the free stack.
272 void drm_mm_put_block(struct drm_mm_node *cur)
275 struct drm_mm *mm = cur->mm;
276 struct list_head *cur_head = &cur->ml_entry;
277 struct list_head *root_head = &mm->ml_entry;
278 struct drm_mm_node *prev_node = NULL;
279 struct drm_mm_node *next_node;
281 int merged = 0;
283 if (cur_head->prev != root_head) {
284 prev_node =
285 list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
286 if (prev_node->free) {
287 prev_node->size += cur->size;
288 merged = 1;
291 if (cur_head->next != root_head) {
292 next_node =
293 list_entry(cur_head->next, struct drm_mm_node, ml_entry);
294 if (next_node->free) {
295 if (merged) {
296 prev_node->size += next_node->size;
297 list_del(&next_node->ml_entry);
298 list_del(&next_node->fl_entry);
299 spin_lock(&mm->unused_lock);
300 if (mm->num_unused < MM_UNUSED_TARGET) {
301 list_add(&next_node->fl_entry,
302 &mm->unused_nodes);
303 ++mm->num_unused;
304 } else
305 kfree(next_node);
306 spin_unlock(&mm->unused_lock);
307 } else {
308 next_node->size += cur->size;
309 next_node->start = cur->start;
310 merged = 1;
314 if (!merged) {
315 cur->free = 1;
316 list_add(&cur->fl_entry, &mm->fl_entry);
317 } else {
318 list_del(&cur->ml_entry);
319 spin_lock(&mm->unused_lock);
320 if (mm->num_unused < MM_UNUSED_TARGET) {
321 list_add(&cur->fl_entry, &mm->unused_nodes);
322 ++mm->num_unused;
323 } else
324 kfree(cur);
325 spin_unlock(&mm->unused_lock);
329 EXPORT_SYMBOL(drm_mm_put_block);
331 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
332 unsigned long size,
333 unsigned alignment, int best_match)
335 struct list_head *list;
336 const struct list_head *free_stack = &mm->fl_entry;
337 struct drm_mm_node *entry;
338 struct drm_mm_node *best;
339 unsigned long best_size;
340 unsigned wasted;
342 best = NULL;
343 best_size = ~0UL;
345 list_for_each(list, free_stack) {
346 entry = list_entry(list, struct drm_mm_node, fl_entry);
347 wasted = 0;
349 if (entry->size < size)
350 continue;
352 if (alignment) {
353 register unsigned tmp = entry->start % alignment;
354 if (tmp)
355 wasted += alignment - tmp;
358 if (entry->size >= size + wasted) {
359 if (!best_match)
360 return entry;
361 if (entry->size < best_size) {
362 best = entry;
363 best_size = entry->size;
368 return best;
370 EXPORT_SYMBOL(drm_mm_search_free);
372 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
373 unsigned long size,
374 unsigned alignment,
375 unsigned long start,
376 unsigned long end,
377 int best_match)
379 struct list_head *list;
380 const struct list_head *free_stack = &mm->fl_entry;
381 struct drm_mm_node *entry;
382 struct drm_mm_node *best;
383 unsigned long best_size;
384 unsigned wasted;
386 best = NULL;
387 best_size = ~0UL;
389 list_for_each(list, free_stack) {
390 entry = list_entry(list, struct drm_mm_node, fl_entry);
391 wasted = 0;
393 if (entry->size < size)
394 continue;
396 if (entry->start > end || (entry->start+entry->size) < start)
397 continue;
399 if (entry->start < start)
400 wasted += start - entry->start;
402 if (alignment) {
403 register unsigned tmp = (entry->start + wasted) % alignment;
404 if (tmp)
405 wasted += alignment - tmp;
408 if (entry->size >= size + wasted &&
409 (entry->start + wasted + size) <= end) {
410 if (!best_match)
411 return entry;
412 if (entry->size < best_size) {
413 best = entry;
414 best_size = entry->size;
419 return best;
421 EXPORT_SYMBOL(drm_mm_search_free_in_range);
423 int drm_mm_clean(struct drm_mm * mm)
425 struct list_head *head = &mm->ml_entry;
427 return (head->next->next == head);
429 EXPORT_SYMBOL(drm_mm_clean);
431 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
433 INIT_LIST_HEAD(&mm->ml_entry);
434 INIT_LIST_HEAD(&mm->fl_entry);
435 INIT_LIST_HEAD(&mm->unused_nodes);
436 mm->num_unused = 0;
437 spin_lock_init(&mm->unused_lock);
439 return drm_mm_create_tail_node(mm, start, size, 0);
441 EXPORT_SYMBOL(drm_mm_init);
443 void drm_mm_takedown(struct drm_mm * mm)
445 struct list_head *bnode = mm->fl_entry.next;
446 struct drm_mm_node *entry;
447 struct drm_mm_node *next;
449 entry = list_entry(bnode, struct drm_mm_node, fl_entry);
451 if (entry->ml_entry.next != &mm->ml_entry ||
452 entry->fl_entry.next != &mm->fl_entry) {
453 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
454 return;
457 list_del(&entry->fl_entry);
458 list_del(&entry->ml_entry);
459 kfree(entry);
461 spin_lock(&mm->unused_lock);
462 list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
463 list_del(&entry->fl_entry);
464 kfree(entry);
465 --mm->num_unused;
467 spin_unlock(&mm->unused_lock);
469 BUG_ON(mm->num_unused != 0);
471 EXPORT_SYMBOL(drm_mm_takedown);
473 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
475 struct drm_mm_node *entry;
476 int total_used = 0, total_free = 0, total = 0;
478 list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
479 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
480 prefix, entry->start, entry->start + entry->size,
481 entry->size, entry->free ? "free" : "used");
482 total += entry->size;
483 if (entry->free)
484 total_free += entry->size;
485 else
486 total_used += entry->size;
488 printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
489 total_used, total_free);
491 EXPORT_SYMBOL(drm_mm_debug_table);
493 #if defined(CONFIG_DEBUG_FS)
494 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
496 struct drm_mm_node *entry;
497 int total_used = 0, total_free = 0, total = 0;
499 list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
500 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
501 total += entry->size;
502 if (entry->free)
503 total_free += entry->size;
504 else
505 total_used += entry->size;
507 seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
508 return 0;
510 EXPORT_SYMBOL(drm_mm_dump_table);
511 #endif