3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * Copyright (c) 2009, Intel Corporation.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files(the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice(including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
31 * Generic simple memory manager implementation. Intended to be used as a base
32 * class implementation for more advanced memory managers.
34 * Note that the algorithm used is quite simple and there might be substantial
35 * performance gains if a smarter free list is implemented.
36 * Currently it is just an
37 * unordered stack of free regions. This could easily be improved if an RB-tree
38 * is used instead. At least if we expect heavy fragmentation.
40 * Aligned allocations can also see improvement.
43 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
47 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
48 * Use is subject to license terms.
54 drm_mm_tail_space(struct drm_mm
*mm
)
56 struct list_head
*tail_node
;
57 struct drm_mm_node
*entry
;
59 tail_node
= mm
->ml_entry
.prev
;
60 entry
= list_entry(tail_node
, struct drm_mm_node
, ml_entry
);
68 drm_mm_remove_space_from_tail(struct drm_mm
*mm
, unsigned long size
)
70 struct list_head
*tail_node
;
71 struct drm_mm_node
*entry
;
73 tail_node
= mm
->ml_entry
.prev
;
74 entry
= list_entry(tail_node
, struct drm_mm_node
, ml_entry
);
78 if (entry
->size
<= size
)
87 drm_mm_create_tail_node(struct drm_mm
*mm
,
91 struct drm_mm_node
*child
;
93 child
= (struct drm_mm_node
*)
94 drm_alloc(sizeof (*child
), DRM_MEM_MM
);
100 child
->start
= start
;
103 list_add_tail(&child
->ml_entry
, &mm
->ml_entry
, (caddr_t
)child
);
104 list_add_tail(&child
->fl_entry
, &mm
->fl_entry
, (caddr_t
)child
);
111 drm_mm_add_space_to_tail(struct drm_mm
*mm
, unsigned long size
)
113 struct list_head
*tail_node
;
114 struct drm_mm_node
*entry
;
116 tail_node
= mm
->ml_entry
.prev
;
117 entry
= list_entry(tail_node
, struct drm_mm_node
, ml_entry
);
119 return (drm_mm_create_tail_node(mm
,
120 entry
->start
+ entry
->size
, size
));
126 static struct drm_mm_node
*
127 drm_mm_split_at_start(struct drm_mm_node
*parent
,
130 struct drm_mm_node
*child
;
132 child
= (struct drm_mm_node
*)
133 drm_alloc(sizeof (*child
), DRM_MEM_MM
);
137 INIT_LIST_HEAD(&child
->fl_entry
);
141 child
->start
= parent
->start
;
142 child
->mm
= parent
->mm
;
144 list_add_tail(&child
->ml_entry
, &parent
->ml_entry
, (caddr_t
)child
);
145 INIT_LIST_HEAD(&child
->fl_entry
);
147 parent
->size
-= size
;
148 parent
->start
+= size
;
153 * Put a block. Merge with the previous and / or next block if they are free.
154 * Otherwise add to the free stack.
158 drm_mm_put_block(struct drm_mm_node
*cur
)
161 struct drm_mm
*mm
= cur
->mm
;
162 struct list_head
*cur_head
= &cur
->ml_entry
;
163 struct list_head
*root_head
= &mm
->ml_entry
;
164 struct drm_mm_node
*prev_node
= NULL
;
165 struct drm_mm_node
*next_node
;
169 if (cur_head
->prev
!= root_head
) {
170 prev_node
= list_entry(cur_head
->prev
,
171 struct drm_mm_node
, ml_entry
);
172 if (prev_node
->free
) {
173 prev_node
->size
+= cur
->size
;
177 if (cur_head
->next
!= root_head
) {
178 next_node
= list_entry(cur_head
->next
,
179 struct drm_mm_node
, ml_entry
);
180 if (next_node
->free
) {
182 prev_node
->size
+= next_node
->size
;
183 list_del(&next_node
->ml_entry
);
184 list_del(&next_node
->fl_entry
);
186 sizeof (*next_node
), DRM_MEM_MM
);
188 next_node
->size
+= cur
->size
;
189 next_node
->start
= cur
->start
;
196 list_add(&cur
->fl_entry
, &mm
->fl_entry
, (caddr_t
)cur
);
198 list_del(&cur
->ml_entry
);
199 drm_free(cur
, sizeof (*cur
), DRM_MEM_MM
);
204 drm_mm_get_block(struct drm_mm_node
*parent
,
209 struct drm_mm_node
*align_splitoff
= NULL
;
210 struct drm_mm_node
*child
;
214 tmp
= parent
->start
% alignment
;
217 align_splitoff
= drm_mm_split_at_start(parent
, alignment
- tmp
);
222 if (parent
->size
== size
) {
223 list_del_init(&parent
->fl_entry
);
227 child
= drm_mm_split_at_start(parent
, size
);
231 drm_mm_put_block(align_splitoff
);
237 drm_mm_search_free(const struct drm_mm
*mm
,
242 struct list_head
*list
;
243 const struct list_head
*free_stack
= &mm
->fl_entry
;
244 struct drm_mm_node
*entry
;
245 struct drm_mm_node
*best
;
246 unsigned long best_size
;
252 list_for_each(list
, free_stack
) {
253 entry
= list_entry(list
, struct drm_mm_node
, fl_entry
);
256 if (entry
->size
< size
)
260 register unsigned tmp
= entry
->start
% alignment
;
262 wasted
+= alignment
- tmp
;
266 if (entry
->size
>= size
+ wasted
) {
269 if (size
< best_size
) {
271 best_size
= entry
->size
;
280 drm_mm_clean(struct drm_mm
*mm
)
282 struct list_head
*head
= &mm
->ml_entry
;
284 return (head
->next
->next
== head
);
288 drm_mm_init(struct drm_mm
*mm
, unsigned long start
, unsigned long size
)
290 INIT_LIST_HEAD(&mm
->ml_entry
);
291 INIT_LIST_HEAD(&mm
->fl_entry
);
293 return (drm_mm_create_tail_node(mm
, start
, size
));
298 drm_mm_takedown(struct drm_mm
*mm
)
300 struct list_head
*bnode
= mm
->fl_entry
.next
;
301 struct drm_mm_node
*entry
;
303 entry
= list_entry(bnode
, struct drm_mm_node
, fl_entry
);
305 if (entry
->ml_entry
.next
!= &mm
->ml_entry
||
306 entry
->fl_entry
.next
!= &mm
->fl_entry
) {
307 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
311 list_del(&entry
->fl_entry
);
312 list_del(&entry
->ml_entry
);
314 drm_free(entry
, sizeof (*entry
), DRM_MEM_MM
);
318 drm_mm_clean_ml(const struct drm_mm
*mm
)
320 const struct list_head
*mlstack
= &mm
->ml_entry
;
321 struct list_head
*list
, *temp
;
322 struct drm_mm_node
*entry
;
324 if (mlstack
->next
== NULL
)
327 list_for_each_safe(list
, temp
, mlstack
) {
328 entry
= list_entry(list
, struct drm_mm_node
, ml_entry
);
329 DRM_DEBUG("ml_entry 0x%x, size 0x%x, start 0x%x",
330 entry
, entry
->size
, entry
->start
);
332 list_del(&entry
->fl_entry
);
333 list_del(&entry
->ml_entry
);
334 drm_free(entry
, sizeof (*entry
), DRM_MEM_MM
);