percpu, x86: don't use PMD_SIZE as embedded atom_size on 32bit
[zen-stable.git] / drivers / gpu / drm / nouveau / nouveau_vm.c
blob2bf6c0350b4bbd4fdd5f380f488d4acb581ab978
1 /*
2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Ben Skeggs
25 #include "drmP.h"
26 #include "nouveau_drv.h"
27 #include "nouveau_mm.h"
28 #include "nouveau_vm.h"
30 void
31 nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
33 struct nouveau_vm *vm = vma->vm;
34 struct nouveau_mm_node *r;
35 int big = vma->node->type != vm->spg_shift;
36 u32 offset = vma->node->offset + (delta >> 12);
37 u32 bits = vma->node->type - 12;
38 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
39 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
40 u32 max = 1 << (vm->pgt_bits - bits);
41 u32 end, len;
43 delta = 0;
44 list_for_each_entry(r, &node->regions, rl_entry) {
45 u64 phys = (u64)r->offset << 12;
46 u32 num = r->length >> bits;
48 while (num) {
49 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
51 end = (pte + num);
52 if (unlikely(end >= max))
53 end = max;
54 len = end - pte;
56 vm->map(vma, pgt, node, pte, len, phys, delta);
58 num -= len;
59 pte += len;
60 if (unlikely(end >= max)) {
61 phys += len << (bits + 12);
62 pde++;
63 pte = 0;
66 delta += (u64)len << vma->node->type;
70 vm->flush(vm);
73 void
74 nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
76 nouveau_vm_map_at(vma, 0, node);
79 void
80 nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
81 struct nouveau_mem *mem)
83 struct nouveau_vm *vm = vma->vm;
84 dma_addr_t *list = mem->pages;
85 int big = vma->node->type != vm->spg_shift;
86 u32 offset = vma->node->offset + (delta >> 12);
87 u32 bits = vma->node->type - 12;
88 u32 num = length >> vma->node->type;
89 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
90 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
91 u32 max = 1 << (vm->pgt_bits - bits);
92 u32 end, len;
94 while (num) {
95 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
97 end = (pte + num);
98 if (unlikely(end >= max))
99 end = max;
100 len = end - pte;
102 vm->map_sg(vma, pgt, mem, pte, len, list);
104 num -= len;
105 pte += len;
106 list += len;
107 if (unlikely(end >= max)) {
108 pde++;
109 pte = 0;
113 vm->flush(vm);
116 void
117 nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
119 struct nouveau_vm *vm = vma->vm;
120 int big = vma->node->type != vm->spg_shift;
121 u32 offset = vma->node->offset + (delta >> 12);
122 u32 bits = vma->node->type - 12;
123 u32 num = length >> vma->node->type;
124 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
125 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
126 u32 max = 1 << (vm->pgt_bits - bits);
127 u32 end, len;
129 while (num) {
130 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
132 end = (pte + num);
133 if (unlikely(end >= max))
134 end = max;
135 len = end - pte;
137 vm->unmap(pgt, pte, len);
139 num -= len;
140 pte += len;
141 if (unlikely(end >= max)) {
142 pde++;
143 pte = 0;
147 vm->flush(vm);
150 void
151 nouveau_vm_unmap(struct nouveau_vma *vma)
153 nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
156 static void
157 nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
159 struct nouveau_vm_pgd *vpgd;
160 struct nouveau_vm_pgt *vpgt;
161 struct nouveau_gpuobj *pgt;
162 u32 pde;
164 for (pde = fpde; pde <= lpde; pde++) {
165 vpgt = &vm->pgt[pde - vm->fpde];
166 if (--vpgt->refcount[big])
167 continue;
169 pgt = vpgt->obj[big];
170 vpgt->obj[big] = NULL;
172 list_for_each_entry(vpgd, &vm->pgd_list, head) {
173 vm->map_pgt(vpgd->obj, pde, vpgt->obj);
176 mutex_unlock(&vm->mm.mutex);
177 nouveau_gpuobj_ref(NULL, &pgt);
178 mutex_lock(&vm->mm.mutex);
182 static int
183 nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
185 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
186 struct nouveau_vm_pgd *vpgd;
187 struct nouveau_gpuobj *pgt;
188 int big = (type != vm->spg_shift);
189 u32 pgt_size;
190 int ret;
192 pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
193 pgt_size *= 8;
195 mutex_unlock(&vm->mm.mutex);
196 ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
197 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
198 mutex_lock(&vm->mm.mutex);
199 if (unlikely(ret))
200 return ret;
202 /* someone beat us to filling the PDE while we didn't have the lock */
203 if (unlikely(vpgt->refcount[big]++)) {
204 mutex_unlock(&vm->mm.mutex);
205 nouveau_gpuobj_ref(NULL, &pgt);
206 mutex_lock(&vm->mm.mutex);
207 return 0;
210 vpgt->obj[big] = pgt;
211 list_for_each_entry(vpgd, &vm->pgd_list, head) {
212 vm->map_pgt(vpgd->obj, pde, vpgt->obj);
215 return 0;
219 nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
220 u32 access, struct nouveau_vma *vma)
222 u32 align = (1 << page_shift) >> 12;
223 u32 msize = size >> 12;
224 u32 fpde, lpde, pde;
225 int ret;
227 mutex_lock(&vm->mm.mutex);
228 ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node);
229 if (unlikely(ret != 0)) {
230 mutex_unlock(&vm->mm.mutex);
231 return ret;
234 fpde = (vma->node->offset >> vm->pgt_bits);
235 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
236 for (pde = fpde; pde <= lpde; pde++) {
237 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
238 int big = (vma->node->type != vm->spg_shift);
240 if (likely(vpgt->refcount[big])) {
241 vpgt->refcount[big]++;
242 continue;
245 ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
246 if (ret) {
247 if (pde != fpde)
248 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
249 nouveau_mm_put(&vm->mm, vma->node);
250 mutex_unlock(&vm->mm.mutex);
251 vma->node = NULL;
252 return ret;
255 mutex_unlock(&vm->mm.mutex);
257 vma->vm = vm;
258 vma->offset = (u64)vma->node->offset << 12;
259 vma->access = access;
260 return 0;
263 void
264 nouveau_vm_put(struct nouveau_vma *vma)
266 struct nouveau_vm *vm = vma->vm;
267 u32 fpde, lpde;
269 if (unlikely(vma->node == NULL))
270 return;
271 fpde = (vma->node->offset >> vm->pgt_bits);
272 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
274 mutex_lock(&vm->mm.mutex);
275 nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
276 nouveau_mm_put(&vm->mm, vma->node);
277 vma->node = NULL;
278 mutex_unlock(&vm->mm.mutex);
282 nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
283 struct nouveau_vm **pvm)
285 struct drm_nouveau_private *dev_priv = dev->dev_private;
286 struct nouveau_vm *vm;
287 u64 mm_length = (offset + length) - mm_offset;
288 u32 block, pgt_bits;
289 int ret;
291 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
292 if (!vm)
293 return -ENOMEM;
295 if (dev_priv->card_type == NV_50) {
296 vm->map_pgt = nv50_vm_map_pgt;
297 vm->map = nv50_vm_map;
298 vm->map_sg = nv50_vm_map_sg;
299 vm->unmap = nv50_vm_unmap;
300 vm->flush = nv50_vm_flush;
301 vm->spg_shift = 12;
302 vm->lpg_shift = 16;
304 pgt_bits = 29;
305 block = (1 << pgt_bits);
306 if (length < block)
307 block = length;
309 } else
310 if (dev_priv->card_type >= NV_C0) {
311 vm->map_pgt = nvc0_vm_map_pgt;
312 vm->map = nvc0_vm_map;
313 vm->map_sg = nvc0_vm_map_sg;
314 vm->unmap = nvc0_vm_unmap;
315 vm->flush = nvc0_vm_flush;
316 vm->spg_shift = 12;
317 vm->lpg_shift = 17;
318 pgt_bits = 27;
319 block = 4096;
320 } else {
321 kfree(vm);
322 return -ENOSYS;
325 vm->fpde = offset >> pgt_bits;
326 vm->lpde = (offset + length - 1) >> pgt_bits;
327 vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
328 if (!vm->pgt) {
329 kfree(vm);
330 return -ENOMEM;
333 INIT_LIST_HEAD(&vm->pgd_list);
334 vm->dev = dev;
335 vm->refcount = 1;
336 vm->pgt_bits = pgt_bits - 12;
338 ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
339 block >> 12);
340 if (ret) {
341 kfree(vm);
342 return ret;
345 *pvm = vm;
346 return 0;
349 static int
350 nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
352 struct nouveau_vm_pgd *vpgd;
353 int i;
355 if (!pgd)
356 return 0;
358 vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
359 if (!vpgd)
360 return -ENOMEM;
362 nouveau_gpuobj_ref(pgd, &vpgd->obj);
364 mutex_lock(&vm->mm.mutex);
365 for (i = vm->fpde; i <= vm->lpde; i++)
366 vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
367 list_add(&vpgd->head, &vm->pgd_list);
368 mutex_unlock(&vm->mm.mutex);
369 return 0;
372 static void
373 nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
375 struct nouveau_vm_pgd *vpgd, *tmp;
376 struct nouveau_gpuobj *pgd = NULL;
378 if (!mpgd)
379 return;
381 mutex_lock(&vm->mm.mutex);
382 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
383 if (vpgd->obj == mpgd) {
384 pgd = vpgd->obj;
385 list_del(&vpgd->head);
386 kfree(vpgd);
387 break;
390 mutex_unlock(&vm->mm.mutex);
392 nouveau_gpuobj_ref(NULL, &pgd);
395 static void
396 nouveau_vm_del(struct nouveau_vm *vm)
398 struct nouveau_vm_pgd *vpgd, *tmp;
400 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
401 nouveau_vm_unlink(vm, vpgd->obj);
404 nouveau_mm_fini(&vm->mm);
405 kfree(vm->pgt);
406 kfree(vm);
410 nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
411 struct nouveau_gpuobj *pgd)
413 struct nouveau_vm *vm;
414 int ret;
416 vm = ref;
417 if (vm) {
418 ret = nouveau_vm_link(vm, pgd);
419 if (ret)
420 return ret;
422 vm->refcount++;
425 vm = *ptr;
426 *ptr = ref;
428 if (vm) {
429 nouveau_vm_unlink(vm, pgd);
431 if (--vm->refcount == 0)
432 nouveau_vm_del(vm);
435 return 0;