2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_vm.h"
31 nvc0_vm_map_pgt(struct nouveau_gpuobj
*pgd
, u32 index
,
32 struct nouveau_gpuobj
*pgt
[2])
34 u32 pde
[2] = { 0, 0 };
37 pde
[1] = 0x00000001 | (pgt
[0]->vinst
>> 8);
39 pde
[0] = 0x00000001 | (pgt
[1]->vinst
>> 8);
41 nv_wo32(pgd
, (index
* 8) + 0, pde
[0]);
42 nv_wo32(pgd
, (index
* 8) + 4, pde
[1]);
46 nvc0_vm_addr(struct nouveau_vma
*vma
, u64 phys
, u32 memtype
, u32 target
)
50 phys
|= 0x00000001; /* present */
51 if (vma
->access
& NV_MEM_ACCESS_SYS
)
54 phys
|= ((u64
)target
<< 32);
55 phys
|= ((u64
)memtype
<< 36);
61 nvc0_vm_map(struct nouveau_vma
*vma
, struct nouveau_gpuobj
*pgt
,
62 struct nouveau_mem
*mem
, u32 pte
, u32 cnt
, u64 phys
, u64 delta
)
64 u32 next
= 1 << (vma
->node
->type
- 8);
66 phys
= nvc0_vm_addr(vma
, phys
, mem
->memtype
, 0);
69 nv_wo32(pgt
, pte
+ 0, lower_32_bits(phys
));
70 nv_wo32(pgt
, pte
+ 4, upper_32_bits(phys
));
77 nvc0_vm_map_sg(struct nouveau_vma
*vma
, struct nouveau_gpuobj
*pgt
,
78 struct nouveau_mem
*mem
, u32 pte
, u32 cnt
, dma_addr_t
*list
)
82 u64 phys
= nvc0_vm_addr(vma
, *list
++, mem
->memtype
, 5);
83 nv_wo32(pgt
, pte
+ 0, lower_32_bits(phys
));
84 nv_wo32(pgt
, pte
+ 4, upper_32_bits(phys
));
90 nvc0_vm_unmap(struct nouveau_gpuobj
*pgt
, u32 pte
, u32 cnt
)
94 nv_wo32(pgt
, pte
+ 0, 0x00000000);
95 nv_wo32(pgt
, pte
+ 4, 0x00000000);
101 nvc0_vm_flush(struct nouveau_vm
*vm
)
103 struct drm_nouveau_private
*dev_priv
= vm
->dev
->dev_private
;
104 struct nouveau_instmem_engine
*pinstmem
= &dev_priv
->engine
.instmem
;
105 struct drm_device
*dev
= vm
->dev
;
106 struct nouveau_vm_pgd
*vpgd
;
111 if (vm
== dev_priv
->bar1_vm
|| vm
== dev_priv
->bar3_vm
)
114 pinstmem
->flush(vm
->dev
);
116 spin_lock_irqsave(&dev_priv
->vm_lock
, flags
);
117 list_for_each_entry(vpgd
, &vm
->pgd_list
, head
) {
118 /* looks like maybe a "free flush slots" counter, the
119 * faster you write to 0x100cbc to more it decreases
121 if (!nv_wait_ne(dev
, 0x100c80, 0x00ff0000, 0x00000000)) {
122 NV_ERROR(dev
, "vm timeout 0: 0x%08x %d\n",
123 nv_rd32(dev
, 0x100c80), engine
);
125 nv_wr32(dev
, 0x100cb8, vpgd
->obj
->vinst
>> 8);
126 nv_wr32(dev
, 0x100cbc, 0x80000000 | engine
);
127 /* wait for flush to be queued? */
128 if (!nv_wait(dev
, 0x100c80, 0x00008000, 0x00008000)) {
129 NV_ERROR(dev
, "vm timeout 1: 0x%08x %d\n",
130 nv_rd32(dev
, 0x100c80), engine
);
133 spin_unlock_irqrestore(&dev_priv
->vm_lock
, flags
);