Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / gpu / drm / nouveau / nvkm / core / mm.c
blobf78a06a6b2f16d19b5fb4d5a9d7538b38e1a372b
1 /*
2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Ben Skeggs
24 #include <core/mm.h>
26 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
27 list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry)
29 void
30 nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
32 struct nvkm_mm_node *node;
34 pr_err("nvkm: %s\n", header);
35 pr_err("nvkm: node list:\n");
36 list_for_each_entry(node, &mm->nodes, nl_entry) {
37 pr_err("nvkm: \t%08x %08x %d\n",
38 node->offset, node->length, node->type);
40 pr_err("nvkm: free list:\n");
41 list_for_each_entry(node, &mm->free, fl_entry) {
42 pr_err("nvkm: \t%08x %08x %d\n",
43 node->offset, node->length, node->type);
47 void
48 nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis)
50 struct nvkm_mm_node *this = *pthis;
52 if (this) {
53 struct nvkm_mm_node *prev = node(this, prev);
54 struct nvkm_mm_node *next = node(this, next);
56 if (prev && prev->type == NVKM_MM_TYPE_NONE) {
57 prev->length += this->length;
58 list_del(&this->nl_entry);
59 kfree(this); this = prev;
62 if (next && next->type == NVKM_MM_TYPE_NONE) {
63 next->offset = this->offset;
64 next->length += this->length;
65 if (this->type == NVKM_MM_TYPE_NONE)
66 list_del(&this->fl_entry);
67 list_del(&this->nl_entry);
68 kfree(this); this = NULL;
71 if (this && this->type != NVKM_MM_TYPE_NONE) {
72 list_for_each_entry(prev, &mm->free, fl_entry) {
73 if (this->offset < prev->offset)
74 break;
77 list_add_tail(&this->fl_entry, &prev->fl_entry);
78 this->type = NVKM_MM_TYPE_NONE;
82 *pthis = NULL;
85 static struct nvkm_mm_node *
86 region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
88 struct nvkm_mm_node *b;
90 if (a->length == size)
91 return a;
93 b = kmalloc(sizeof(*b), GFP_KERNEL);
94 if (unlikely(b == NULL))
95 return NULL;
97 b->offset = a->offset;
98 b->length = size;
99 b->heap = a->heap;
100 b->type = a->type;
101 a->offset += size;
102 a->length -= size;
103 list_add_tail(&b->nl_entry, &a->nl_entry);
104 if (b->type == NVKM_MM_TYPE_NONE)
105 list_add_tail(&b->fl_entry, &a->fl_entry);
107 return b;
111 nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
112 u32 align, struct nvkm_mm_node **pnode)
114 struct nvkm_mm_node *prev, *this, *next;
115 u32 mask = align - 1;
116 u32 splitoff;
117 u32 s, e;
119 BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
121 list_for_each_entry(this, &mm->free, fl_entry) {
122 if (unlikely(heap != NVKM_MM_HEAP_ANY)) {
123 if (this->heap != heap)
124 continue;
126 e = this->offset + this->length;
127 s = this->offset;
129 prev = node(this, prev);
130 if (prev && prev->type != type)
131 s = roundup(s, mm->block_size);
133 next = node(this, next);
134 if (next && next->type != type)
135 e = rounddown(e, mm->block_size);
137 s = (s + mask) & ~mask;
138 e &= ~mask;
139 if (s > e || e - s < size_min)
140 continue;
142 splitoff = s - this->offset;
143 if (splitoff && !region_head(mm, this, splitoff))
144 return -ENOMEM;
146 this = region_head(mm, this, min(size_max, e - s));
147 if (!this)
148 return -ENOMEM;
150 this->next = NULL;
151 this->type = type;
152 list_del(&this->fl_entry);
153 *pnode = this;
154 return 0;
157 return -ENOSPC;
160 static struct nvkm_mm_node *
161 region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
163 struct nvkm_mm_node *b;
165 if (a->length == size)
166 return a;
168 b = kmalloc(sizeof(*b), GFP_KERNEL);
169 if (unlikely(b == NULL))
170 return NULL;
172 a->length -= size;
173 b->offset = a->offset + a->length;
174 b->length = size;
175 b->heap = a->heap;
176 b->type = a->type;
178 list_add(&b->nl_entry, &a->nl_entry);
179 if (b->type == NVKM_MM_TYPE_NONE)
180 list_add(&b->fl_entry, &a->fl_entry);
182 return b;
186 nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
187 u32 align, struct nvkm_mm_node **pnode)
189 struct nvkm_mm_node *prev, *this, *next;
190 u32 mask = align - 1;
192 BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
194 list_for_each_entry_reverse(this, &mm->free, fl_entry) {
195 u32 e = this->offset + this->length;
196 u32 s = this->offset;
197 u32 c = 0, a;
198 if (unlikely(heap != NVKM_MM_HEAP_ANY)) {
199 if (this->heap != heap)
200 continue;
203 prev = node(this, prev);
204 if (prev && prev->type != type)
205 s = roundup(s, mm->block_size);
207 next = node(this, next);
208 if (next && next->type != type) {
209 e = rounddown(e, mm->block_size);
210 c = next->offset - e;
213 s = (s + mask) & ~mask;
214 a = e - s;
215 if (s > e || a < size_min)
216 continue;
218 a = min(a, size_max);
219 s = (e - a) & ~mask;
220 c += (e - s) - a;
222 if (c && !region_tail(mm, this, c))
223 return -ENOMEM;
225 this = region_tail(mm, this, a);
226 if (!this)
227 return -ENOMEM;
229 this->next = NULL;
230 this->type = type;
231 list_del(&this->fl_entry);
232 *pnode = this;
233 return 0;
236 return -ENOSPC;
240 nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
242 struct nvkm_mm_node *node, *prev;
243 u32 next;
245 if (nvkm_mm_initialised(mm)) {
246 prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry);
247 next = prev->offset + prev->length;
248 if (next != offset) {
249 BUG_ON(next > offset);
250 if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
251 return -ENOMEM;
252 node->type = NVKM_MM_TYPE_HOLE;
253 node->offset = next;
254 node->length = offset - next;
255 list_add_tail(&node->nl_entry, &mm->nodes);
257 BUG_ON(block != mm->block_size);
258 } else {
259 INIT_LIST_HEAD(&mm->nodes);
260 INIT_LIST_HEAD(&mm->free);
261 mm->block_size = block;
262 mm->heap_nodes = 0;
265 node = kzalloc(sizeof(*node), GFP_KERNEL);
266 if (!node)
267 return -ENOMEM;
269 if (length) {
270 node->offset = roundup(offset, mm->block_size);
271 node->length = rounddown(offset + length, mm->block_size);
272 node->length -= node->offset;
275 list_add_tail(&node->nl_entry, &mm->nodes);
276 list_add_tail(&node->fl_entry, &mm->free);
277 node->heap = heap;
278 mm->heap_nodes++;
279 return 0;
283 nvkm_mm_fini(struct nvkm_mm *mm)
285 struct nvkm_mm_node *node, *temp;
286 int nodes = 0;
288 if (!nvkm_mm_initialised(mm))
289 return 0;
291 list_for_each_entry(node, &mm->nodes, nl_entry) {
292 if (node->type != NVKM_MM_TYPE_HOLE) {
293 if (++nodes > mm->heap_nodes) {
294 nvkm_mm_dump(mm, "mm not clean!");
295 return -EBUSY;
300 list_for_each_entry_safe(node, temp, &mm->nodes, nl_entry) {
301 list_del(&node->nl_entry);
302 kfree(node);
305 mm->heap_nodes = 0;
306 return 0;