2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_vm.h"
31 nv50_vm_map_pgt(struct nouveau_gpuobj
*pgd
, u32 pde
,
32 struct nouveau_gpuobj
*pgt
[2])
34 u64 phys
= 0xdeadcafe00000000ULL
;
38 phys
= 0x00000003 | pgt
[0]->vinst
; /* present, 4KiB pages */
39 coverage
= (pgt
[0]->size
>> 3) << 12;
42 phys
= 0x00000001 | pgt
[1]->vinst
; /* present */
43 coverage
= (pgt
[1]->size
>> 3) << 16;
47 if (coverage
<= 32 * 1024 * 1024)
49 else if (coverage
<= 64 * 1024 * 1024)
51 else if (coverage
<= 128 * 1024 * 1024)
55 nv_wo32(pgd
, (pde
* 8) + 0, lower_32_bits(phys
));
56 nv_wo32(pgd
, (pde
* 8) + 4, upper_32_bits(phys
));
60 vm_addr(struct nouveau_vma
*vma
, u64 phys
, u32 memtype
, u32 target
)
62 phys
|= 1; /* present */
63 phys
|= (u64
)memtype
<< 40;
65 if (vma
->access
& NV_MEM_ACCESS_SYS
)
67 if (!(vma
->access
& NV_MEM_ACCESS_WO
))
73 nv50_vm_map(struct nouveau_vma
*vma
, struct nouveau_gpuobj
*pgt
,
74 struct nouveau_mem
*mem
, u32 pte
, u32 cnt
, u64 phys
, u64 delta
)
76 struct drm_nouveau_private
*dev_priv
= vma
->vm
->dev
->dev_private
;
77 u32 comp
= (mem
->memtype
& 0x180) >> 7;
81 /* IGPs don't have real VRAM, re-target to stolen system memory */
83 if (dev_priv
->vram_sys_base
) {
84 phys
+= dev_priv
->vram_sys_base
;
88 phys
= vm_addr(vma
, phys
, mem
->memtype
, target
);
93 u32 offset_h
= upper_32_bits(phys
);
94 u32 offset_l
= lower_32_bits(phys
);
96 for (i
= 7; i
>= 0; i
--) {
98 if (cnt
>= block
&& !(pte
& (block
- 1)))
101 offset_l
|= (i
<< 7);
103 phys
+= block
<< (vma
->node
->type
- 3);
106 u32 tag
= mem
->tag
->start
+ ((delta
>> 16) * comp
);
107 offset_h
|= (tag
<< 17);
108 delta
+= block
<< (vma
->node
->type
- 3);
112 nv_wo32(pgt
, pte
+ 0, offset_l
);
113 nv_wo32(pgt
, pte
+ 4, offset_h
);
121 nv50_vm_map_sg(struct nouveau_vma
*vma
, struct nouveau_gpuobj
*pgt
,
122 struct nouveau_mem
*mem
, u32 pte
, u32 cnt
, dma_addr_t
*list
)
124 u32 target
= (vma
->access
& NV_MEM_ACCESS_NOSNOOP
) ? 3 : 2;
127 u64 phys
= vm_addr(vma
, (u64
)*list
++, mem
->memtype
, target
);
128 nv_wo32(pgt
, pte
+ 0, lower_32_bits(phys
));
129 nv_wo32(pgt
, pte
+ 4, upper_32_bits(phys
));
135 nv50_vm_unmap(struct nouveau_gpuobj
*pgt
, u32 pte
, u32 cnt
)
139 nv_wo32(pgt
, pte
+ 0, 0x00000000);
140 nv_wo32(pgt
, pte
+ 4, 0x00000000);
146 nv50_vm_flush(struct nouveau_vm
*vm
)
148 struct drm_nouveau_private
*dev_priv
= vm
->dev
->dev_private
;
149 struct nouveau_instmem_engine
*pinstmem
= &dev_priv
->engine
.instmem
;
152 pinstmem
->flush(vm
->dev
);
155 if (vm
== dev_priv
->bar1_vm
|| vm
== dev_priv
->bar3_vm
) {
156 nv50_vm_flush_engine(vm
->dev
, 6);
160 for (i
= 0; i
< NVOBJ_ENGINE_NR
; i
++) {
161 if (atomic_read(&vm
->engref
[i
]))
162 dev_priv
->eng
[i
]->tlb_flush(vm
->dev
, i
);
167 nv50_vm_flush_engine(struct drm_device
*dev
, int engine
)
169 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
172 spin_lock_irqsave(&dev_priv
->vm_lock
, flags
);
173 nv_wr32(dev
, 0x100c80, (engine
<< 16) | 1);
174 if (!nv_wait(dev
, 0x100c80, 0x00000001, 0x00000000))
175 NV_ERROR(dev
, "vm flush timeout: engine %d\n", engine
);
176 spin_unlock_irqrestore(&dev_priv
->vm_lock
, flags
);