2 * Copyright 2017 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <subdev/fb.h>
25 #include <subdev/timer.h>
26 #include <engine/gr.h>
28 #include <nvif/if500d.h>
29 #include <nvif/unpack.h>
32 nv50_vmm_pgt_pte(struct nvkm_vmm
*vmm
, struct nvkm_mmu_pt
*pt
,
33 u32 ptei
, u32 ptes
, struct nvkm_vmm_map
*map
, u64 addr
)
35 u64 next
= addr
+ map
->type
, data
;
39 map
->type
+= ptes
* map
->ctag
;
42 for (log2blk
= 7; log2blk
>= 0; log2blk
--) {
44 if (ptes
>= pten
&& IS_ALIGNED(ptei
, pten
))
48 data
= next
| (log2blk
<< 7);
49 next
+= pten
* map
->next
;
53 VMM_WO064(pt
, vmm
, ptei
++ * 8, data
);
58 nv50_vmm_pgt_sgl(struct nvkm_vmm
*vmm
, struct nvkm_mmu_pt
*pt
,
59 u32 ptei
, u32 ptes
, struct nvkm_vmm_map
*map
)
61 VMM_MAP_ITER_SGL(vmm
, pt
, ptei
, ptes
, map
, nv50_vmm_pgt_pte
);
65 nv50_vmm_pgt_dma(struct nvkm_vmm
*vmm
, struct nvkm_mmu_pt
*pt
,
66 u32 ptei
, u32 ptes
, struct nvkm_vmm_map
*map
)
68 if (map
->page
->shift
== PAGE_SHIFT
) {
69 VMM_SPAM(vmm
, "DMAA %08x %08x PTE(s)", ptei
, ptes
);
70 nvkm_kmap(pt
->memory
);
72 const u64 data
= *map
->dma
++ + map
->type
;
73 VMM_WO064(pt
, vmm
, ptei
++ * 8, data
);
74 map
->type
+= map
->ctag
;
76 nvkm_done(pt
->memory
);
80 VMM_MAP_ITER_DMA(vmm
, pt
, ptei
, ptes
, map
, nv50_vmm_pgt_pte
);
84 nv50_vmm_pgt_mem(struct nvkm_vmm
*vmm
, struct nvkm_mmu_pt
*pt
,
85 u32 ptei
, u32 ptes
, struct nvkm_vmm_map
*map
)
87 VMM_MAP_ITER_MEM(vmm
, pt
, ptei
, ptes
, map
, nv50_vmm_pgt_pte
);
91 nv50_vmm_pgt_unmap(struct nvkm_vmm
*vmm
,
92 struct nvkm_mmu_pt
*pt
, u32 ptei
, u32 ptes
)
94 VMM_FO064(pt
, vmm
, ptei
* 8, 0ULL, ptes
);
97 static const struct nvkm_vmm_desc_func
99 .unmap
= nv50_vmm_pgt_unmap
,
100 .mem
= nv50_vmm_pgt_mem
,
101 .dma
= nv50_vmm_pgt_dma
,
102 .sgl
= nv50_vmm_pgt_sgl
,
106 nv50_vmm_pde(struct nvkm_vmm
*vmm
, struct nvkm_vmm_pt
*pgt
, u64
*pdata
)
108 struct nvkm_mmu_pt
*pt
;
109 u64 data
= 0xdeadcafe00000000ULL
;
110 if (pgt
&& (pt
= pgt
->pt
[0])) {
112 case 16: data
= 0x00000001; break;
113 case 12: data
= 0x00000003;
114 switch (nvkm_memory_size(pt
->memory
)) {
115 case 0x100000: data
|= 0x00000000; break;
116 case 0x040000: data
|= 0x00000020; break;
117 case 0x020000: data
|= 0x00000040; break;
118 case 0x010000: data
|= 0x00000060; break;
129 switch (nvkm_memory_target(pt
->memory
)) {
130 case NVKM_MEM_TARGET_VRAM
: data
|= 0x00000000; break;
131 case NVKM_MEM_TARGET_HOST
: data
|= 0x00000008; break;
132 case NVKM_MEM_TARGET_NCOH
: data
|= 0x0000000c; break;
145 nv50_vmm_pgd_pde(struct nvkm_vmm
*vmm
, struct nvkm_vmm_pt
*pgd
, u32 pdei
)
147 struct nvkm_vmm_join
*join
;
148 u32 pdeo
= vmm
->mmu
->func
->vmm
.pd_offset
+ (pdei
* 8);
151 if (!nv50_vmm_pde(vmm
, pgd
->pde
[pdei
], &data
))
154 list_for_each_entry(join
, &vmm
->join
, head
) {
155 nvkm_kmap(join
->inst
);
156 nvkm_wo64(join
->inst
, pdeo
, data
);
157 nvkm_done(join
->inst
);
161 static const struct nvkm_vmm_desc_func
163 .pde
= nv50_vmm_pgd_pde
,
166 const struct nvkm_vmm_desc
167 nv50_vmm_desc_12
[] = {
168 { PGT
, 17, 8, 0x1000, &nv50_vmm_pgt
},
169 { PGD
, 11, 0, 0x0000, &nv50_vmm_pgd
},
173 const struct nvkm_vmm_desc
174 nv50_vmm_desc_16
[] = {
175 { PGT
, 13, 8, 0x1000, &nv50_vmm_pgt
},
176 { PGD
, 11, 0, 0x0000, &nv50_vmm_pgd
},
181 nv50_vmm_flush(struct nvkm_vmm
*vmm
, int level
)
183 struct nvkm_subdev
*subdev
= &vmm
->mmu
->subdev
;
184 struct nvkm_device
*device
= subdev
->device
;
187 mutex_lock(&subdev
->mutex
);
188 for (i
= 0; i
< NVKM_SUBDEV_NR
; i
++) {
189 if (!atomic_read(&vmm
->engref
[i
]))
192 /* unfortunate hw bug workaround... */
193 if (i
== NVKM_ENGINE_GR
&& device
->gr
) {
194 int ret
= nvkm_gr_tlb_flush(device
->gr
);
200 case NVKM_ENGINE_GR
: id
= 0x00; break;
201 case NVKM_ENGINE_VP
:
202 case NVKM_ENGINE_MSPDEC
: id
= 0x01; break;
203 case NVKM_SUBDEV_BAR
: id
= 0x06; break;
204 case NVKM_ENGINE_MSPPP
:
205 case NVKM_ENGINE_MPEG
: id
= 0x08; break;
206 case NVKM_ENGINE_BSP
:
207 case NVKM_ENGINE_MSVLD
: id
= 0x09; break;
208 case NVKM_ENGINE_CIPHER
:
209 case NVKM_ENGINE_SEC
: id
= 0x0a; break;
210 case NVKM_ENGINE_CE0
: id
= 0x0d; break;
215 nvkm_wr32(device
, 0x100c80, (id
<< 16) | 1);
216 if (nvkm_msec(device
, 2000,
217 if (!(nvkm_rd32(device
, 0x100c80) & 0x00000001))
220 nvkm_error(subdev
, "%s mmu invalidate timeout\n",
221 nvkm_subdev_name
[i
]);
223 mutex_unlock(&subdev
->mutex
);
227 nv50_vmm_valid(struct nvkm_vmm
*vmm
, void *argv
, u32 argc
,
228 struct nvkm_vmm_map
*map
)
230 const struct nvkm_vmm_page
*page
= map
->page
;
232 struct nv50_vmm_map_vn vn
;
233 struct nv50_vmm_map_v0 v0
;
235 struct nvkm_device
*device
= vmm
->mmu
->subdev
.device
;
236 struct nvkm_ram
*ram
= device
->fb
->ram
;
237 struct nvkm_memory
*memory
= map
->memory
;
238 u8 aper
, kind
, kind_inv
, comp
, priv
, ro
;
239 int kindn
, ret
= -ENOSYS
;
242 map
->type
= map
->ctag
= 0;
243 map
->next
= 1 << page
->shift
;
245 if (!(ret
= nvif_unpack(ret
, &argv
, &argc
, args
->v0
, 0, 0, false))) {
247 priv
= !!args
->v0
.priv
;
248 kind
= args
->v0
.kind
& 0x7f;
249 comp
= args
->v0
.comp
& 0x03;
251 if (!(ret
= nvif_unvers(ret
, &argv
, &argc
, args
->vn
))) {
257 VMM_DEBUG(vmm
, "args");
261 switch (nvkm_memory_target(memory
)) {
262 case NVKM_MEM_TARGET_VRAM
:
264 map
->type
|= ram
->stolen
;
270 case NVKM_MEM_TARGET_HOST
:
273 case NVKM_MEM_TARGET_NCOH
:
281 kindm
= vmm
->mmu
->func
->kind(vmm
->mmu
, &kindn
, &kind_inv
);
282 if (kind
>= kindn
|| kindm
[kind
] == kind_inv
) {
283 VMM_DEBUG(vmm
, "kind %02x", kind
);
287 if (map
->mem
&& map
->mem
->type
!= kindm
[kind
]) {
288 VMM_DEBUG(vmm
, "kind %02x bankswz: %d %d", kind
,
289 kindm
[kind
], map
->mem
->type
);
294 u32 tags
= (nvkm_memory_size(memory
) >> 16) * comp
;
295 if (aper
!= 0 || !(page
->type
& NVKM_VMM_PAGE_COMP
)) {
296 VMM_DEBUG(vmm
, "comp %d %02x", aper
, page
->type
);
300 ret
= nvkm_memory_tags_get(memory
, device
, tags
, NULL
,
303 VMM_DEBUG(vmm
, "comp %d", ret
);
308 u32 tags
= map
->tags
->mn
->offset
+ (map
->offset
>> 16);
309 map
->ctag
|= (u64
)comp
<< 49;
310 map
->type
|= (u64
)comp
<< 47;
311 map
->type
|= (u64
)tags
<< 49;
312 map
->next
|= map
->ctag
;
316 map
->type
|= BIT(0); /* Valid. */
317 map
->type
|= (u64
)ro
<< 3;
318 map
->type
|= (u64
)aper
<< 4;
319 map
->type
|= (u64
)priv
<< 6;
320 map
->type
|= (u64
)kind
<< 40;
325 nv50_vmm_part(struct nvkm_vmm
*vmm
, struct nvkm_memory
*inst
)
327 struct nvkm_vmm_join
*join
;
329 list_for_each_entry(join
, &vmm
->join
, head
) {
330 if (join
->inst
== inst
) {
331 list_del(&join
->head
);
339 nv50_vmm_join(struct nvkm_vmm
*vmm
, struct nvkm_memory
*inst
)
341 const u32 pd_offset
= vmm
->mmu
->func
->vmm
.pd_offset
;
342 struct nvkm_vmm_join
*join
;
347 if (!(join
= kmalloc(sizeof(*join
), GFP_KERNEL
)))
350 list_add_tail(&join
->head
, &vmm
->join
);
352 nvkm_kmap(join
->inst
);
353 for (pdei
= vmm
->start
>> 29; pdei
<= (vmm
->limit
- 1) >> 29; pdei
++) {
354 if (!nv50_vmm_pde(vmm
, vmm
->pd
->pde
[pdei
], &data
)) {
358 nvkm_wo64(join
->inst
, pd_offset
+ (pdei
* 8), data
);
360 nvkm_done(join
->inst
);
364 static const struct nvkm_vmm_func
366 .join
= nv50_vmm_join
,
367 .part
= nv50_vmm_part
,
368 .valid
= nv50_vmm_valid
,
369 .flush
= nv50_vmm_flush
,
370 .page_block
= 1 << 29,
372 { 16, &nv50_vmm_desc_16
[0], NVKM_VMM_PAGE_xVxC
},
373 { 12, &nv50_vmm_desc_12
[0], NVKM_VMM_PAGE_xVHx
},
379 nv50_vmm_new(struct nvkm_mmu
*mmu
, bool managed
, u64 addr
, u64 size
,
380 void *argv
, u32 argc
, struct lock_class_key
*key
, const char *name
,
381 struct nvkm_vmm
**pvmm
)
383 return nv04_vmm_new_(&nv50_vmm
, mmu
, 0, managed
, addr
, size
,
384 argv
, argc
, key
, name
, pvmm
);