1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
46 #include <linux/slab.h>
48 #define MM_UNUSED_TARGET 4
50 unsigned long drm_mm_tail_space(struct drm_mm
*mm
)
52 struct list_head
*tail_node
;
53 struct drm_mm_node
*entry
;
55 tail_node
= mm
->ml_entry
.prev
;
56 entry
= list_entry(tail_node
, struct drm_mm_node
, ml_entry
);
63 int drm_mm_remove_space_from_tail(struct drm_mm
*mm
, unsigned long size
)
65 struct list_head
*tail_node
;
66 struct drm_mm_node
*entry
;
68 tail_node
= mm
->ml_entry
.prev
;
69 entry
= list_entry(tail_node
, struct drm_mm_node
, ml_entry
);
73 if (entry
->size
<= size
)
80 static struct drm_mm_node
*drm_mm_kmalloc(struct drm_mm
*mm
, int atomic
)
82 struct drm_mm_node
*child
;
85 child
= kmalloc(sizeof(*child
), GFP_ATOMIC
);
87 child
= kmalloc(sizeof(*child
), GFP_KERNEL
);
89 if (unlikely(child
== NULL
)) {
90 spin_lock(&mm
->unused_lock
);
91 if (list_empty(&mm
->unused_nodes
))
95 list_entry(mm
->unused_nodes
.next
,
96 struct drm_mm_node
, fl_entry
);
97 list_del(&child
->fl_entry
);
100 spin_unlock(&mm
->unused_lock
);
105 int drm_mm_pre_get(struct drm_mm
*mm
)
107 struct drm_mm_node
*node
;
109 spin_lock(&mm
->unused_lock
);
110 while (mm
->num_unused
< MM_UNUSED_TARGET
) {
111 spin_unlock(&mm
->unused_lock
);
112 node
= kmalloc(sizeof(*node
), GFP_KERNEL
);
113 spin_lock(&mm
->unused_lock
);
115 if (unlikely(node
== NULL
)) {
116 int ret
= (mm
->num_unused
< 2) ? -ENOMEM
: 0;
117 spin_unlock(&mm
->unused_lock
);
121 list_add_tail(&node
->fl_entry
, &mm
->unused_nodes
);
123 spin_unlock(&mm
->unused_lock
);
126 EXPORT_SYMBOL(drm_mm_pre_get
);
128 static int drm_mm_create_tail_node(struct drm_mm
*mm
,
130 unsigned long size
, int atomic
)
132 struct drm_mm_node
*child
;
134 child
= drm_mm_kmalloc(mm
, atomic
);
135 if (unlikely(child
== NULL
))
140 child
->start
= start
;
143 list_add_tail(&child
->ml_entry
, &mm
->ml_entry
);
144 list_add_tail(&child
->fl_entry
, &mm
->fl_entry
);
149 int drm_mm_add_space_to_tail(struct drm_mm
*mm
, unsigned long size
, int atomic
)
151 struct list_head
*tail_node
;
152 struct drm_mm_node
*entry
;
154 tail_node
= mm
->ml_entry
.prev
;
155 entry
= list_entry(tail_node
, struct drm_mm_node
, ml_entry
);
157 return drm_mm_create_tail_node(mm
, entry
->start
+ entry
->size
,
164 static struct drm_mm_node
*drm_mm_split_at_start(struct drm_mm_node
*parent
,
168 struct drm_mm_node
*child
;
170 child
= drm_mm_kmalloc(parent
->mm
, atomic
);
171 if (unlikely(child
== NULL
))
174 INIT_LIST_HEAD(&child
->fl_entry
);
178 child
->start
= parent
->start
;
179 child
->mm
= parent
->mm
;
181 list_add_tail(&child
->ml_entry
, &parent
->ml_entry
);
182 INIT_LIST_HEAD(&child
->fl_entry
);
184 parent
->size
-= size
;
185 parent
->start
+= size
;
190 struct drm_mm_node
*drm_mm_get_block_generic(struct drm_mm_node
*node
,
196 struct drm_mm_node
*align_splitoff
= NULL
;
200 tmp
= node
->start
% alignment
;
204 drm_mm_split_at_start(node
, alignment
- tmp
, atomic
);
205 if (unlikely(align_splitoff
== NULL
))
209 if (node
->size
== size
) {
210 list_del_init(&node
->fl_entry
);
213 node
= drm_mm_split_at_start(node
, size
, atomic
);
217 drm_mm_put_block(align_splitoff
);
221 EXPORT_SYMBOL(drm_mm_get_block_generic
);
224 * Put a block. Merge with the previous and / or next block if they are free.
225 * Otherwise add to the free stack.
228 void drm_mm_put_block(struct drm_mm_node
*cur
)
231 struct drm_mm
*mm
= cur
->mm
;
232 struct list_head
*cur_head
= &cur
->ml_entry
;
233 struct list_head
*root_head
= &mm
->ml_entry
;
234 struct drm_mm_node
*prev_node
= NULL
;
235 struct drm_mm_node
*next_node
;
239 if (cur_head
->prev
!= root_head
) {
241 list_entry(cur_head
->prev
, struct drm_mm_node
, ml_entry
);
242 if (prev_node
->free
) {
243 prev_node
->size
+= cur
->size
;
247 if (cur_head
->next
!= root_head
) {
249 list_entry(cur_head
->next
, struct drm_mm_node
, ml_entry
);
250 if (next_node
->free
) {
252 prev_node
->size
+= next_node
->size
;
253 list_del(&next_node
->ml_entry
);
254 list_del(&next_node
->fl_entry
);
255 if (mm
->num_unused
< MM_UNUSED_TARGET
) {
256 list_add(&next_node
->fl_entry
,
262 next_node
->size
+= cur
->size
;
263 next_node
->start
= cur
->start
;
270 list_add(&cur
->fl_entry
, &mm
->fl_entry
);
272 list_del(&cur
->ml_entry
);
273 if (mm
->num_unused
< MM_UNUSED_TARGET
) {
274 list_add(&cur
->fl_entry
, &mm
->unused_nodes
);
281 EXPORT_SYMBOL(drm_mm_put_block
);
283 struct drm_mm_node
*drm_mm_search_free(const struct drm_mm
*mm
,
285 unsigned alignment
, int best_match
)
287 struct list_head
*list
;
288 const struct list_head
*free_stack
= &mm
->fl_entry
;
289 struct drm_mm_node
*entry
;
290 struct drm_mm_node
*best
;
291 unsigned long best_size
;
297 list_for_each(list
, free_stack
) {
298 entry
= list_entry(list
, struct drm_mm_node
, fl_entry
);
301 if (entry
->size
< size
)
305 register unsigned tmp
= entry
->start
% alignment
;
307 wasted
+= alignment
- tmp
;
310 if (entry
->size
>= size
+ wasted
) {
313 if (size
< best_size
) {
315 best_size
= entry
->size
;
322 EXPORT_SYMBOL(drm_mm_search_free
);
324 int drm_mm_clean(struct drm_mm
* mm
)
326 struct list_head
*head
= &mm
->ml_entry
;
328 return (head
->next
->next
== head
);
330 EXPORT_SYMBOL(drm_mm_clean
);
332 int drm_mm_init(struct drm_mm
* mm
, unsigned long start
, unsigned long size
)
334 INIT_LIST_HEAD(&mm
->ml_entry
);
335 INIT_LIST_HEAD(&mm
->fl_entry
);
336 INIT_LIST_HEAD(&mm
->unused_nodes
);
338 spin_lock_init(&mm
->unused_lock
);
340 return drm_mm_create_tail_node(mm
, start
, size
, 0);
342 EXPORT_SYMBOL(drm_mm_init
);
344 void drm_mm_takedown(struct drm_mm
* mm
)
346 struct list_head
*bnode
= mm
->fl_entry
.next
;
347 struct drm_mm_node
*entry
;
348 struct drm_mm_node
*next
;
350 entry
= list_entry(bnode
, struct drm_mm_node
, fl_entry
);
352 if (entry
->ml_entry
.next
!= &mm
->ml_entry
||
353 entry
->fl_entry
.next
!= &mm
->fl_entry
) {
354 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
358 list_del(&entry
->fl_entry
);
359 list_del(&entry
->ml_entry
);
362 spin_lock(&mm
->unused_lock
);
363 list_for_each_entry_safe(entry
, next
, &mm
->unused_nodes
, fl_entry
) {
364 list_del(&entry
->fl_entry
);
368 spin_unlock(&mm
->unused_lock
);
370 BUG_ON(mm
->num_unused
!= 0);
372 EXPORT_SYMBOL(drm_mm_takedown
);