2 * Copyright 2010 Christoph Bumiller.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "nouveau_drv.h"
27 #include "nouveau_reg.h"
28 #include "pscnv_mem.h"
30 #include "pscnv_chan.h"
33 #define PSCNV_GEM_NOUSER 0x10 /* XXX */
35 static int nvc0_vm_map_kernel(struct pscnv_bo
*bo
);
36 static void nvc0_vm_takedown(struct drm_device
*dev
);
39 nvc0_tlb_flush(struct pscnv_vspace
*vs
)
41 struct drm_device
*dev
= vs
->dev
;
44 BUG_ON(!nvc0_vs(vs
)->pd
);
46 NV_DEBUG(dev
, "nvc0_tlb_flush 0x%010llx\n", nvc0_vs(vs
)->pd
->start
);
48 val
= nv_rd32(dev
, 0x100c80);
50 nv_wr32(dev
, 0x100cb8, nvc0_vs(vs
)->pd
->start
>> 8);
51 nv_wr32(dev
, 0x100cbc, 0x80000000 | ((vs
->vid
== -3) ? 0x5 : 0x1));
53 if (!nv_wait(dev
, 0x100c80, ~0, val
)) {
54 NV_ERROR(vs
->dev
, "tlb flush timed out\n");
61 nvc0_vspace_fill_pde(struct pscnv_vspace
*vs
, struct nvc0_pgt
*pgt
)
63 struct drm_nouveau_private
*dev_priv
= vs
->dev
->dev_private
;
64 const uint32_t size
= NVC0_VM_SPTE_COUNT
<< (3 - pgt
->limit
);
68 pgt
->bo
[1] = pscnv_mem_alloc(vs
->dev
, size
, PSCNV_GEM_CONTIG
, 0, 0x59);
72 for (i
= 0; i
< size
; i
+= 4)
73 nv_wv32(pgt
->bo
[1], i
, 0);
75 pde
[0] = pgt
->limit
<< 2;
76 pde
[1] = (pgt
->bo
[1]->start
>> 8) | 1;
79 pgt
->bo
[0] = pscnv_mem_alloc(vs
->dev
, NVC0_VM_LPTE_COUNT
* 8,
80 PSCNV_GEM_CONTIG
, 0, 0x79);
84 nvc0_vm_map_kernel(pgt
->bo
[0]);
85 nvc0_vm_map_kernel(pgt
->bo
[1]);
87 for (i
= 0; i
< NVC0_VM_LPTE_COUNT
* 8; i
+= 4)
88 nv_wv32(pgt
->bo
[0], i
, 0);
90 pde
[0] |= (pgt
->bo
[0]->start
>> 8) | 1;
92 dev_priv
->vm
->bar_flush(vs
->dev
);
94 nv_wv32(nvc0_vs(vs
)->pd
, pgt
->pde
* 8 + 0, pde
[0]);
95 nv_wv32(nvc0_vs(vs
)->pd
, pgt
->pde
* 8 + 4, pde
[1]);
97 dev_priv
->vm
->bar_flush(vs
->dev
);
98 return nvc0_tlb_flush(vs
);
101 static struct nvc0_pgt
*
102 nvc0_vspace_pgt(struct pscnv_vspace
*vs
, unsigned int pde
)
105 struct list_head
*pts
= &nvc0_vs(vs
)->ptht
[NVC0_PDE_HASH(pde
)];
107 BUG_ON(pde
>= NVC0_VM_PDE_COUNT
);
109 list_for_each_entry(pt
, pts
, head
)
113 NV_DEBUG(vs
->dev
, "creating new page table: %i[%u]\n", vs
->vid
, pde
);
115 pt
= kzalloc(sizeof *pt
, GFP_KERNEL
);
121 if (nvc0_vspace_fill_pde(vs
, pt
)) {
126 list_add_tail(&pt
->head
, pts
);
131 nvc0_pgt_del(struct pscnv_vspace
*vs
, struct nvc0_pgt
*pgt
)
133 pscnv_vram_free(pgt
->bo
[1]);
135 pscnv_vram_free(pgt
->bo
[0]);
136 list_del(&pgt
->head
);
138 nv_wv32(nvc0_vs(vs
)->pd
, pgt
->pde
* 8 + 0, 0);
139 nv_wv32(nvc0_vs(vs
)->pd
, pgt
->pde
* 8 + 4, 0);
145 nvc0_vspace_do_unmap(struct pscnv_vspace
*vs
, uint64_t offset
, uint64_t size
)
147 struct drm_nouveau_private
*dev_priv
= vs
->dev
->dev_private
;
150 for (; size
; offset
+= space
) {
154 pt
= nvc0_vspace_pgt(vs
, NVC0_PDE(offset
));
155 space
= NVC0_VM_BLOCK_SIZE
- (offset
& NVC0_VM_BLOCK_MASK
);
160 pte
= NVC0_SPTE(offset
);
161 for (i
= 0; i
< (space
>> NVC0_SPAGE_SHIFT
) * 8; i
+= 4)
162 nv_wv32(pt
->bo
[1], pte
* 8 + i
, 0);
167 pte
= NVC0_LPTE(offset
);
168 for (i
= 0; i
< (space
>> NVC0_LPAGE_SHIFT
) * 8; i
+= 4)
169 nv_wv32(pt
->bo
[0], pte
* 8 + i
, 0);
171 dev_priv
->vm
->bar_flush(vs
->dev
);
172 return nvc0_tlb_flush(vs
);
176 write_pt(struct pscnv_bo
*pt
, int pte
, int count
, uint64_t phys
,
177 int psz
, uint32_t pfl0
, uint32_t pfl1
)
180 uint32_t a
= (phys
>> 8) | pfl0
;
185 for (i
= pte
* 8; i
< (pte
+ count
) * 8; i
+= 8, a
+= psz
) {
186 nv_wv32(pt
, i
+ 4, b
);
187 nv_wv32(pt
, i
+ 0, a
);
192 nvc0_vspace_place_map (struct pscnv_vspace
*vs
, struct pscnv_bo
*bo
,
193 uint64_t start
, uint64_t end
, int back
,
194 struct pscnv_mm_node
**res
)
198 if ((bo
->flags
& PSCNV_GEM_MEMTYPE_MASK
) == PSCNV_GEM_VRAM_LARGE
)
201 flags
|= PSCNV_MM_FROMBACK
;
203 return pscnv_mm_alloc(vs
->mm
, bo
->size
, flags
, start
, end
, res
);
207 nvc0_vspace_do_map(struct pscnv_vspace
*vs
,
208 struct pscnv_bo
*bo
, uint64_t offset
)
210 struct drm_nouveau_private
*dev_priv
= vs
->dev
->dev_private
;
212 struct pscnv_mm_node
*reg
;
216 if (vs
->vid
>= 0 && (bo
->flags
& PSCNV_GEM_NOUSER
))
219 pfl1
= bo
->tile_flags
<< 4;
221 switch (bo
->flags
& PSCNV_GEM_MEMTYPE_MASK
) {
222 case PSCNV_GEM_SYSRAM_NOSNOOP
:
225 case PSCNV_GEM_SYSRAM_SNOOP
:
227 unsigned int pde
= NVC0_PDE(offset
);
228 unsigned int pte
= (offset
& NVC0_VM_BLOCK_MASK
) >> PAGE_SHIFT
;
229 struct nvc0_pgt
*pt
= nvc0_vspace_pgt(vs
, pde
);
231 for (i
= 0; i
< (bo
->size
>> PAGE_SHIFT
); ++i
) {
232 uint64_t phys
= bo
->dmapages
[i
];
233 nv_wv32(pt
->bo
[1], pte
* 8 + 4, pfl1
);
234 nv_wv32(pt
->bo
[1], pte
* 8 + 0, (phys
>> 8) | pfl0
);
236 if ((pte
& (NVC0_VM_BLOCK_MASK
>> PAGE_SHIFT
)) == 0) {
238 pt
= nvc0_vspace_pgt(vs
, ++pde
);
243 case PSCNV_GEM_VRAM_SMALL
:
244 case PSCNV_GEM_VRAM_LARGE
:
245 for (reg
= bo
->mmnode
; reg
; reg
= reg
->next
) {
247 uint64_t phys
= reg
->start
, size
= reg
->size
;
249 int s
= (bo
->flags
& PSCNV_GEM_MEMTYPE_MASK
) != PSCNV_GEM_VRAM_LARGE
;
252 psh
= s
? NVC0_SPAGE_SHIFT
: NVC0_LPAGE_SHIFT
;
260 space
= NVC0_VM_BLOCK_SIZE
-
261 (offset
& NVC0_VM_BLOCK_MASK
);
266 pte
= (offset
& NVC0_VM_BLOCK_MASK
) >> psh
;
267 count
= space
>> psh
;
268 pt
= nvc0_vspace_pgt(vs
, NVC0_PDE(offset
));
270 write_pt(pt
->bo
[s
], pte
, count
, phys
, psz
, pfl0
, pfl1
);
278 WARN(1, "Should not be here! Mask %08x\n", bo
->flags
& PSCNV_GEM_MEMTYPE_MASK
);
281 dev_priv
->vm
->bar_flush(vs
->dev
);
282 return nvc0_tlb_flush(vs
);
285 static int nvc0_vspace_new(struct pscnv_vspace
*vs
) {
288 if (vs
->size
> 1ull << 40)
291 vs
->engdata
= kzalloc(sizeof(struct nvc0_vspace
), GFP_KERNEL
);
293 NV_ERROR(vs
->dev
, "VM: Couldn't alloc vspace eng\n");
297 nvc0_vs(vs
)->pd
= pscnv_mem_alloc(vs
->dev
, NVC0_VM_PDE_COUNT
* 8,
298 PSCNV_GEM_CONTIG
, 0, 0xdeadcafe);
299 if (!nvc0_vs(vs
)->pd
) {
305 nvc0_vm_map_kernel(nvc0_vs(vs
)->pd
);
307 for (i
= 0; i
< NVC0_VM_PDE_COUNT
; i
++) {
308 nv_wv32(nvc0_vs(vs
)->pd
, i
* 8, 0);
309 nv_wv32(nvc0_vs(vs
)->pd
, i
* 8 + 4, 0);
312 for (i
= 0; i
< NVC0_PDE_HT_SIZE
; ++i
)
313 INIT_LIST_HEAD(&nvc0_vs(vs
)->ptht
[i
]);
315 ret
= pscnv_mm_init(vs
->dev
, 0, vs
->size
, 0x1000, 0x20000, 1, &vs
->mm
);
317 pscnv_mem_free(nvc0_vs(vs
)->pd
);
323 static void nvc0_vspace_free(struct pscnv_vspace
*vs
) {
325 for (i
= 0; i
< NVC0_PDE_HT_SIZE
; i
++) {
326 struct nvc0_pgt
*pgt
, *save
;
327 list_for_each_entry_safe(pgt
, save
, &nvc0_vs(vs
)->ptht
[i
], head
)
328 nvc0_pgt_del(vs
, pgt
);
330 pscnv_mem_free(nvc0_vs(vs
)->pd
);
332 if (nvc0_vs(vs
)->mmio_bo
)
333 pscnv_mem_free(nvc0_vs(vs
)->mmio_bo
);
337 static int nvc0_vm_map_user(struct pscnv_bo
*bo
) {
338 struct drm_nouveau_private
*dev_priv
= bo
->dev
->dev_private
;
339 struct nvc0_vm_engine
*vme
= nvc0_vm(dev_priv
->vm
);
342 return pscnv_vspace_map(vme
->bar1vm
, bo
, 0, dev_priv
->fb_size
, 0, &bo
->map1
);
345 static int nvc0_vm_map_kernel(struct pscnv_bo
*bo
) {
346 struct drm_nouveau_private
*dev_priv
= bo
->dev
->dev_private
;
347 struct nvc0_vm_engine
*vme
= nvc0_vm(dev_priv
->vm
);
350 return pscnv_vspace_map(vme
->bar3vm
, bo
, 0, dev_priv
->ramin_size
, 0, &bo
->map3
);
354 nvc0_vm_init(struct drm_device
*dev
) {
355 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
357 struct nvc0_vm_engine
*vme
= kzalloc(sizeof *vme
, GFP_KERNEL
);
359 NV_ERROR(dev
, "VM: Couldn't alloc engine\n");
362 vme
->base
.takedown
= nvc0_vm_takedown
;
363 vme
->base
.do_vspace_new
= nvc0_vspace_new
;
364 vme
->base
.do_vspace_free
= nvc0_vspace_free
;
365 vme
->base
.place_map
= nvc0_vspace_place_map
;
366 vme
->base
.do_map
= nvc0_vspace_do_map
;
367 vme
->base
.do_unmap
= nvc0_vspace_do_unmap
;
368 vme
->base
.map_user
= nvc0_vm_map_user
;
369 vme
->base
.map_kernel
= nvc0_vm_map_kernel
;
370 vme
->base
.bar_flush
= nv84_vm_bar_flush
;
371 dev_priv
->vm
= &vme
->base
;
373 dev_priv
->vm_ramin_base
= 0;
374 spin_lock_init(&dev_priv
->vm
->vs_lock
);
376 nv_wr32(dev
, 0x200, 0xfffffeff);
377 nv_wr32(dev
, 0x200, 0xffffffff);
379 nv_wr32(dev
, 0x100c80, 0x00208000);
381 vme
->bar3vm
= pscnv_vspace_new (dev
, dev_priv
->ramin_size
, 0, 3);
387 vme
->bar3ch
= pscnv_chan_new (dev
, vme
->bar3vm
, 3);
389 pscnv_vspace_unref(vme
->bar3vm
);
394 nv_wr32(dev
, 0x1714, 0xc0000000 | vme
->bar3ch
->bo
->start
>> 12);
398 nvc0_vm_map_kernel(vme
->bar3ch
->bo
);
399 nvc0_vm_map_kernel(nvc0_vs(vme
->bar3vm
)->pd
);
400 pt
= nvc0_vspace_pgt(vme
->bar3vm
, 0);
402 NV_ERROR(dev
, "VM: failed to allocate RAMIN page table\n");
405 nvc0_vm_map_kernel(pt
->bo
[1]);
407 vme
->bar1vm
= pscnv_vspace_new (dev
, dev_priv
->fb_size
, 0, 1);
410 pscnv_chan_unref(vme
->bar3ch
);
411 pscnv_vspace_unref(vme
->bar3vm
);
416 vme
->bar1ch
= pscnv_chan_new (dev
, vme
->bar1vm
, 1);
419 pscnv_vspace_unref(vme
->bar1vm
);
420 pscnv_chan_unref(vme
->bar3ch
);
421 pscnv_vspace_unref(vme
->bar3vm
);
426 nv_wr32(dev
, 0x1704, 0x80000000 | vme
->bar1ch
->bo
->start
>> 12);
431 nvc0_vm_takedown(struct drm_device
*dev
) {
432 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
433 struct nvc0_vm_engine
*vme
= nvc0_vm(dev_priv
->vm
);
436 nv_wr32(dev
, 0x1704, 0);
437 nv_wr32(dev
, 0x1714, 0);
438 nv_wr32(dev
, 0x1718, 0);
439 pscnv_chan_unref(vme
->bar1ch
);
440 pscnv_vspace_unref(vme
->bar1vm
);
441 pscnv_chan_unref(vme
->bar3ch
);
442 pscnv_vspace_unref(vme
->bar3vm
);