2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_vm.h"
31 nv50_vm_map_pgt(struct nouveau_gpuobj
*pgd
, u32 pde
,
32 struct nouveau_gpuobj
*pgt
[2])
34 u64 phys
= 0xdeadcafe00000000ULL
;
38 phys
= 0x00000003 | pgt
[0]->vinst
; /* present, 4KiB pages */
39 coverage
= (pgt
[0]->size
>> 3) << 12;
42 phys
= 0x00000001 | pgt
[1]->vinst
; /* present */
43 coverage
= (pgt
[1]->size
>> 3) << 16;
47 if (coverage
<= 32 * 1024 * 1024)
49 else if (coverage
<= 64 * 1024 * 1024)
51 else if (coverage
< 128 * 1024 * 1024)
55 nv_wo32(pgd
, (pde
* 8) + 0, lower_32_bits(phys
));
56 nv_wo32(pgd
, (pde
* 8) + 4, upper_32_bits(phys
));
60 nv50_vm_addr(struct nouveau_vma
*vma
, u64 phys
, u32 memtype
, u32 target
)
62 struct drm_nouveau_private
*dev_priv
= vma
->vm
->dev
->dev_private
;
64 phys
|= 1; /* present */
65 phys
|= (u64
)memtype
<< 40;
67 /* IGPs don't have real VRAM, re-target to stolen system memory */
68 if (target
== 0 && dev_priv
->vram_sys_base
) {
69 phys
+= dev_priv
->vram_sys_base
;
75 if (vma
->access
& NV_MEM_ACCESS_SYS
)
78 if (!(vma
->access
& NV_MEM_ACCESS_WO
))
85 nv50_vm_map(struct nouveau_vma
*vma
, struct nouveau_gpuobj
*pgt
,
86 struct nouveau_mem
*mem
, u32 pte
, u32 cnt
, u64 phys
, u64 delta
)
88 u32 comp
= (mem
->memtype
& 0x180) >> 7;
92 phys
= nv50_vm_addr(vma
, phys
, mem
->memtype
, 0);
97 u32 offset_h
= upper_32_bits(phys
);
98 u32 offset_l
= lower_32_bits(phys
);
100 for (i
= 7; i
>= 0; i
--) {
101 block
= 1 << (i
+ 3);
102 if (cnt
>= block
&& !(pte
& (block
- 1)))
105 offset_l
|= (i
<< 7);
107 phys
+= block
<< (vma
->node
->type
- 3);
110 u32 tag
= mem
->tag
->start
+ ((delta
>> 16) * comp
);
111 offset_h
|= (tag
<< 17);
112 delta
+= block
<< (vma
->node
->type
- 3);
116 nv_wo32(pgt
, pte
+ 0, offset_l
);
117 nv_wo32(pgt
, pte
+ 4, offset_h
);
125 nv50_vm_map_sg(struct nouveau_vma
*vma
, struct nouveau_gpuobj
*pgt
,
126 struct nouveau_mem
*mem
, u32 pte
, u32 cnt
, dma_addr_t
*list
)
130 u64 phys
= nv50_vm_addr(vma
, (u64
)*list
++, mem
->memtype
, 2);
131 nv_wo32(pgt
, pte
+ 0, lower_32_bits(phys
));
132 nv_wo32(pgt
, pte
+ 4, upper_32_bits(phys
));
138 nv50_vm_unmap(struct nouveau_gpuobj
*pgt
, u32 pte
, u32 cnt
)
142 nv_wo32(pgt
, pte
+ 0, 0x00000000);
143 nv_wo32(pgt
, pte
+ 4, 0x00000000);
149 nv50_vm_flush(struct nouveau_vm
*vm
)
151 struct drm_nouveau_private
*dev_priv
= vm
->dev
->dev_private
;
152 struct nouveau_instmem_engine
*pinstmem
= &dev_priv
->engine
.instmem
;
153 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
156 pinstmem
->flush(vm
->dev
);
159 if (vm
== dev_priv
->bar1_vm
|| vm
== dev_priv
->bar3_vm
) {
160 nv50_vm_flush_engine(vm
->dev
, 6);
164 pfifo
->tlb_flush(vm
->dev
);
165 for (i
= 0; i
< NVOBJ_ENGINE_NR
; i
++) {
166 if (atomic_read(&vm
->engref
[i
]))
167 dev_priv
->eng
[i
]->tlb_flush(vm
->dev
, i
);
172 nv50_vm_flush_engine(struct drm_device
*dev
, int engine
)
174 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
177 spin_lock_irqsave(&dev_priv
->vm_lock
, flags
);
178 nv_wr32(dev
, 0x100c80, (engine
<< 16) | 1);
179 if (!nv_wait(dev
, 0x100c80, 0x00000001, 0x00000000))
180 NV_ERROR(dev
, "vm flush timeout: engine %d\n", engine
);
181 spin_unlock_irqrestore(&dev_priv
->vm_lock
, flags
);