Set memory attributes on page
[pscnv.git] / pscnv / nvc0_vm.c
blob8a71d7bbd29850aeb35346b48da2f5760540153e
1 /*
2 * Copyright 2010 Christoph Bumiller.
3 * All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "drm.h"
26 #include "nouveau_drv.h"
27 #include "nouveau_reg.h"
28 #include "pscnv_mem.h"
29 #include "pscnv_vm.h"
30 #include "pscnv_chan.h"
31 #include "nvc0_vm.h"
33 #define PSCNV_GEM_NOUSER 0x10 /* XXX */
35 static int nvc0_vm_map_kernel(struct pscnv_bo *bo);
36 static void nvc0_vm_takedown(struct drm_device *dev);
38 static int
39 nvc0_tlb_flush(struct pscnv_vspace *vs)
41 struct drm_device *dev = vs->dev;
42 uint32_t val;
44 BUG_ON(!nvc0_vs(vs)->pd);
46 NV_DEBUG(dev, "nvc0_tlb_flush 0x%010llx\n", nvc0_vs(vs)->pd->start);
48 val = nv_rd32(dev, 0x100c80);
50 nv_wr32(dev, 0x100cb8, nvc0_vs(vs)->pd->start >> 8);
51 nv_wr32(dev, 0x100cbc, 0x80000000 | ((vs->vid == -3) ? 0x5 : 0x1));
53 if (!nv_wait(dev, 0x100c80, ~0, val)) {
54 NV_ERROR(vs->dev, "tlb flush timed out\n");
55 return -EBUSY;
57 return 0;
60 static int
61 nvc0_vspace_fill_pde(struct pscnv_vspace *vs, struct nvc0_pgt *pgt)
63 struct drm_nouveau_private *dev_priv = vs->dev->dev_private;
64 const uint32_t size = NVC0_VM_SPTE_COUNT << (3 - pgt->limit);
65 int i;
66 uint32_t pde[2];
68 pgt->bo[1] = pscnv_mem_alloc(vs->dev, size, PSCNV_GEM_CONTIG, 0, 0x59);
69 if (!pgt->bo[1])
70 return -ENOMEM;
72 for (i = 0; i < size; i += 4)
73 nv_wv32(pgt->bo[1], i, 0);
75 pde[0] = pgt->limit << 2;
76 pde[1] = (pgt->bo[1]->start >> 8) | 1;
78 if (vs->vid != -3) {
79 pgt->bo[0] = pscnv_mem_alloc(vs->dev, NVC0_VM_LPTE_COUNT * 8,
80 PSCNV_GEM_CONTIG, 0, 0x79);
81 if (!pgt->bo[0])
82 return -ENOMEM;
84 nvc0_vm_map_kernel(pgt->bo[0]);
85 nvc0_vm_map_kernel(pgt->bo[1]);
87 for (i = 0; i < NVC0_VM_LPTE_COUNT * 8; i += 4)
88 nv_wv32(pgt->bo[0], i, 0);
90 pde[0] |= (pgt->bo[0]->start >> 8) | 1;
92 dev_priv->vm->bar_flush(vs->dev);
94 nv_wv32(nvc0_vs(vs)->pd, pgt->pde * 8 + 0, pde[0]);
95 nv_wv32(nvc0_vs(vs)->pd, pgt->pde * 8 + 4, pde[1]);
97 dev_priv->vm->bar_flush(vs->dev);
98 return nvc0_tlb_flush(vs);
101 static struct nvc0_pgt *
102 nvc0_vspace_pgt(struct pscnv_vspace *vs, unsigned int pde)
104 struct nvc0_pgt *pt;
105 struct list_head *pts = &nvc0_vs(vs)->ptht[NVC0_PDE_HASH(pde)];
107 BUG_ON(pde >= NVC0_VM_PDE_COUNT);
109 list_for_each_entry(pt, pts, head)
110 if (pt->pde == pde)
111 return pt;
113 NV_DEBUG(vs->dev, "creating new page table: %i[%u]\n", vs->vid, pde);
115 pt = kzalloc(sizeof *pt, GFP_KERNEL);
116 if (!pt)
117 return NULL;
118 pt->pde = pde;
119 pt->limit = 0;
121 if (nvc0_vspace_fill_pde(vs, pt)) {
122 kfree(pt);
123 return NULL;
126 list_add_tail(&pt->head, pts);
127 return pt;
130 static void
131 nvc0_pgt_del(struct pscnv_vspace *vs, struct nvc0_pgt *pgt)
133 pscnv_vram_free(pgt->bo[1]);
134 if (pgt->bo[0])
135 pscnv_vram_free(pgt->bo[0]);
136 list_del(&pgt->head);
138 nv_wv32(nvc0_vs(vs)->pd, pgt->pde * 8 + 0, 0);
139 nv_wv32(nvc0_vs(vs)->pd, pgt->pde * 8 + 4, 0);
141 kfree(pgt);
144 static int
145 nvc0_vspace_do_unmap(struct pscnv_vspace *vs, uint64_t offset, uint64_t size)
147 struct drm_nouveau_private *dev_priv = vs->dev->dev_private;
148 uint32_t space;
150 for (; size; offset += space) {
151 struct nvc0_pgt *pt;
152 int i, pte;
154 pt = nvc0_vspace_pgt(vs, NVC0_PDE(offset));
155 space = NVC0_VM_BLOCK_SIZE - (offset & NVC0_VM_BLOCK_MASK);
156 if (space > size)
157 space = size;
158 size -= space;
160 pte = NVC0_SPTE(offset);
161 for (i = 0; i < (space >> NVC0_SPAGE_SHIFT) * 8; i += 4)
162 nv_wv32(pt->bo[1], pte * 8 + i, 0);
164 if (!pt->bo[0])
165 continue;
167 pte = NVC0_LPTE(offset);
168 for (i = 0; i < (space >> NVC0_LPAGE_SHIFT) * 8; i += 4)
169 nv_wv32(pt->bo[0], pte * 8 + i, 0);
171 dev_priv->vm->bar_flush(vs->dev);
172 return nvc0_tlb_flush(vs);
175 static inline void
176 write_pt(struct pscnv_bo *pt, int pte, int count, uint64_t phys,
177 int psz, uint32_t pfl0, uint32_t pfl1)
179 int i;
180 uint32_t a = (phys >> 8) | pfl0;
181 uint32_t b = pfl1;
183 psz >>= 8;
185 for (i = pte * 8; i < (pte + count) * 8; i += 8, a += psz) {
186 nv_wv32(pt, i + 4, b);
187 nv_wv32(pt, i + 0, a);
191 static int
192 nvc0_vspace_place_map (struct pscnv_vspace *vs, struct pscnv_bo *bo,
193 uint64_t start, uint64_t end, int back,
194 struct pscnv_mm_node **res)
196 int flags = 0;
198 if ((bo->flags & PSCNV_GEM_MEMTYPE_MASK) == PSCNV_GEM_VRAM_LARGE)
199 flags = PSCNV_MM_LP;
200 if (back)
201 flags |= PSCNV_MM_FROMBACK;
203 return pscnv_mm_alloc(vs->mm, bo->size, flags, start, end, res);
206 static int
207 nvc0_vspace_do_map(struct pscnv_vspace *vs,
208 struct pscnv_bo *bo, uint64_t offset)
210 struct drm_nouveau_private *dev_priv = vs->dev->dev_private;
211 uint32_t pfl0, pfl1;
212 struct pscnv_mm_node *reg;
213 int i;
215 pfl0 = 1;
216 if (vs->vid >= 0 && (bo->flags & PSCNV_GEM_NOUSER))
217 pfl0 |= 2;
219 pfl1 = bo->tile_flags << 4;
221 switch (bo->flags & PSCNV_GEM_MEMTYPE_MASK) {
222 case PSCNV_GEM_SYSRAM_NOSNOOP:
223 pfl1 |= 0x2;
224 /* fall through */
225 case PSCNV_GEM_SYSRAM_SNOOP:
227 unsigned int pde = NVC0_PDE(offset);
228 unsigned int pte = (offset & NVC0_VM_BLOCK_MASK) >> PAGE_SHIFT;
229 struct nvc0_pgt *pt = nvc0_vspace_pgt(vs, pde);
230 pfl1 |= 0x5;
231 for (i = 0; i < (bo->size >> PAGE_SHIFT); ++i) {
232 uint64_t phys = bo->dmapages[i];
233 nv_wv32(pt->bo[1], pte * 8 + 4, pfl1);
234 nv_wv32(pt->bo[1], pte * 8 + 0, (phys >> 8) | pfl0);
235 pte++;
236 if ((pte & (NVC0_VM_BLOCK_MASK >> PAGE_SHIFT)) == 0) {
237 pte = 0;
238 pt = nvc0_vspace_pgt(vs, ++pde);
242 break;
243 case PSCNV_GEM_VRAM_SMALL:
244 case PSCNV_GEM_VRAM_LARGE:
245 for (reg = bo->mmnode; reg; reg = reg->next) {
246 uint32_t psh, psz;
247 uint64_t phys = reg->start, size = reg->size;
249 int s = (bo->flags & PSCNV_GEM_MEMTYPE_MASK) != PSCNV_GEM_VRAM_LARGE;
250 if (vs->vid == -3)
251 s = 1;
252 psh = s ? NVC0_SPAGE_SHIFT : NVC0_LPAGE_SHIFT;
253 psz = 1 << psh;
255 while (size) {
256 struct nvc0_pgt *pt;
257 int pte, count;
258 uint32_t space;
260 space = NVC0_VM_BLOCK_SIZE -
261 (offset & NVC0_VM_BLOCK_MASK);
262 if (space > size)
263 space = size;
264 size -= space;
266 pte = (offset & NVC0_VM_BLOCK_MASK) >> psh;
267 count = space >> psh;
268 pt = nvc0_vspace_pgt(vs, NVC0_PDE(offset));
270 write_pt(pt->bo[s], pte, count, phys, psz, pfl0, pfl1);
272 offset += space;
273 phys += space;
276 break;
277 default:
278 WARN(1, "Should not be here! Mask %08x\n", bo->flags & PSCNV_GEM_MEMTYPE_MASK);
279 return -ENOSYS;
281 dev_priv->vm->bar_flush(vs->dev);
282 return nvc0_tlb_flush(vs);
285 static int nvc0_vspace_new(struct pscnv_vspace *vs) {
286 int i, ret;
288 if (vs->size > 1ull << 40)
289 return -EINVAL;
291 vs->engdata = kzalloc(sizeof(struct nvc0_vspace), GFP_KERNEL);
292 if (!vs->engdata) {
293 NV_ERROR(vs->dev, "VM: Couldn't alloc vspace eng\n");
294 return -ENOMEM;
297 nvc0_vs(vs)->pd = pscnv_mem_alloc(vs->dev, NVC0_VM_PDE_COUNT * 8,
298 PSCNV_GEM_CONTIG, 0, 0xdeadcafe);
299 if (!nvc0_vs(vs)->pd) {
300 kfree(vs->engdata);
301 return -ENOMEM;
304 if (vs->vid != -3)
305 nvc0_vm_map_kernel(nvc0_vs(vs)->pd);
307 for (i = 0; i < NVC0_VM_PDE_COUNT; i++) {
308 nv_wv32(nvc0_vs(vs)->pd, i * 8, 0);
309 nv_wv32(nvc0_vs(vs)->pd, i * 8 + 4, 0);
312 for (i = 0; i < NVC0_PDE_HT_SIZE; ++i)
313 INIT_LIST_HEAD(&nvc0_vs(vs)->ptht[i]);
315 ret = pscnv_mm_init(vs->dev, 0, vs->size, 0x1000, 0x20000, 1, &vs->mm);
316 if (ret) {
317 pscnv_mem_free(nvc0_vs(vs)->pd);
318 kfree(vs->engdata);
320 return ret;
323 static void nvc0_vspace_free(struct pscnv_vspace *vs) {
324 int i;
325 for (i = 0; i < NVC0_PDE_HT_SIZE; i++) {
326 struct nvc0_pgt *pgt, *save;
327 list_for_each_entry_safe(pgt, save, &nvc0_vs(vs)->ptht[i], head)
328 nvc0_pgt_del(vs, pgt);
330 pscnv_mem_free(nvc0_vs(vs)->pd);
332 if (nvc0_vs(vs)->mmio_bo)
333 pscnv_mem_free(nvc0_vs(vs)->mmio_bo);
334 kfree(vs->engdata);
337 static int nvc0_vm_map_user(struct pscnv_bo *bo) {
338 struct drm_nouveau_private *dev_priv = bo->dev->dev_private;
339 struct nvc0_vm_engine *vme = nvc0_vm(dev_priv->vm);
340 if (bo->map1)
341 return 0;
342 return pscnv_vspace_map(vme->bar1vm, bo, 0, dev_priv->fb_size, 0, &bo->map1);
345 static int nvc0_vm_map_kernel(struct pscnv_bo *bo) {
346 struct drm_nouveau_private *dev_priv = bo->dev->dev_private;
347 struct nvc0_vm_engine *vme = nvc0_vm(dev_priv->vm);
348 if (bo->map3)
349 return 0;
350 return pscnv_vspace_map(vme->bar3vm, bo, 0, dev_priv->ramin_size, 0, &bo->map3);
354 nvc0_vm_init(struct drm_device *dev) {
355 struct drm_nouveau_private *dev_priv = dev->dev_private;
356 struct nvc0_pgt *pt;
357 struct nvc0_vm_engine *vme = kzalloc(sizeof *vme, GFP_KERNEL);
358 if (!vme) {
359 NV_ERROR(dev, "VM: Couldn't alloc engine\n");
360 return -ENOMEM;
362 vme->base.takedown = nvc0_vm_takedown;
363 vme->base.do_vspace_new = nvc0_vspace_new;
364 vme->base.do_vspace_free = nvc0_vspace_free;
365 vme->base.place_map = nvc0_vspace_place_map;
366 vme->base.do_map = nvc0_vspace_do_map;
367 vme->base.do_unmap = nvc0_vspace_do_unmap;
368 vme->base.map_user = nvc0_vm_map_user;
369 vme->base.map_kernel = nvc0_vm_map_kernel;
370 vme->base.bar_flush = nv84_vm_bar_flush;
371 dev_priv->vm = &vme->base;
373 dev_priv->vm_ramin_base = 0;
374 spin_lock_init(&dev_priv->vm->vs_lock);
376 nv_wr32(dev, 0x200, 0xfffffeff);
377 nv_wr32(dev, 0x200, 0xffffffff);
379 nv_wr32(dev, 0x100c80, 0x00208000);
381 vme->bar3vm = pscnv_vspace_new (dev, dev_priv->ramin_size, 0, 3);
382 if (!vme->bar3vm) {
383 kfree(vme);
384 dev_priv->vm = 0;
385 return -ENOMEM;
387 vme->bar3ch = pscnv_chan_new (dev, vme->bar3vm, 3);
388 if (!vme->bar3ch) {
389 pscnv_vspace_unref(vme->bar3vm);
390 kfree(vme);
391 dev_priv->vm = 0;
392 return -ENOMEM;
394 nv_wr32(dev, 0x1714, 0xc0000000 | vme->bar3ch->bo->start >> 12);
396 dev_priv->vm_ok = 1;
398 nvc0_vm_map_kernel(vme->bar3ch->bo);
399 nvc0_vm_map_kernel(nvc0_vs(vme->bar3vm)->pd);
400 pt = nvc0_vspace_pgt(vme->bar3vm, 0);
401 if (!pt) {
402 NV_ERROR(dev, "VM: failed to allocate RAMIN page table\n");
403 return -ENOMEM;
405 nvc0_vm_map_kernel(pt->bo[1]);
407 vme->bar1vm = pscnv_vspace_new (dev, dev_priv->fb_size, 0, 1);
408 if (!vme->bar1vm) {
409 dev_priv->vm_ok = 0;
410 pscnv_chan_unref(vme->bar3ch);
411 pscnv_vspace_unref(vme->bar3vm);
412 kfree(vme);
413 dev_priv->vm = 0;
414 return -ENOMEM;
416 vme->bar1ch = pscnv_chan_new (dev, vme->bar1vm, 1);
417 if (!vme->bar1ch) {
418 dev_priv->vm_ok = 0;
419 pscnv_vspace_unref(vme->bar1vm);
420 pscnv_chan_unref(vme->bar3ch);
421 pscnv_vspace_unref(vme->bar3vm);
422 kfree(vme);
423 dev_priv->vm = 0;
424 return -ENOMEM;
426 nv_wr32(dev, 0x1704, 0x80000000 | vme->bar1ch->bo->start >> 12);
427 return 0;
430 void
431 nvc0_vm_takedown(struct drm_device *dev) {
432 struct drm_nouveau_private *dev_priv = dev->dev_private;
433 struct nvc0_vm_engine *vme = nvc0_vm(dev_priv->vm);
434 /* XXX: write me. */
435 dev_priv->vm_ok = 0;
436 nv_wr32(dev, 0x1704, 0);
437 nv_wr32(dev, 0x1714, 0);
438 nv_wr32(dev, 0x1718, 0);
439 pscnv_chan_unref(vme->bar1ch);
440 pscnv_vspace_unref(vme->bar1vm);
441 pscnv_chan_unref(vme->bar3ch);
442 pscnv_vspace_unref(vme->bar3vm);
443 kfree(vme);
444 dev_priv->vm = 0;