2 * Copyright 2017 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <subdev/fb.h>
25 #include <subdev/ltc.h>
26 #include <subdev/timer.h>
28 #include <nvif/if900d.h>
29 #include <nvif/unpack.h>
32 gf100_vmm_pgt_pte(struct nvkm_vmm
*vmm
, struct nvkm_mmu_pt
*pt
,
33 u32 ptei
, u32 ptes
, struct nvkm_vmm_map
*map
, u64 addr
)
35 u64 base
= (addr
>> 8) | map
->type
;
38 if (map
->ctag
&& !(map
->next
& (1ULL << 44))) {
40 data
= base
| ((map
->ctag
>> 1) << 44);
41 if (!(map
->ctag
++ & 1))
44 VMM_WO064(pt
, vmm
, ptei
++ * 8, data
);
48 map
->type
+= ptes
* map
->ctag
;
51 VMM_WO064(pt
, vmm
, ptei
++ * 8, data
);
58 gf100_vmm_pgt_sgl(struct nvkm_vmm
*vmm
, struct nvkm_mmu_pt
*pt
,
59 u32 ptei
, u32 ptes
, struct nvkm_vmm_map
*map
)
61 VMM_MAP_ITER_SGL(vmm
, pt
, ptei
, ptes
, map
, gf100_vmm_pgt_pte
);
65 gf100_vmm_pgt_dma(struct nvkm_vmm
*vmm
, struct nvkm_mmu_pt
*pt
,
66 u32 ptei
, u32 ptes
, struct nvkm_vmm_map
*map
)
68 if (map
->page
->shift
== PAGE_SHIFT
) {
69 VMM_SPAM(vmm
, "DMAA %08x %08x PTE(s)", ptei
, ptes
);
70 nvkm_kmap(pt
->memory
);
72 const u64 data
= (*map
->dma
++ >> 8) | map
->type
;
73 VMM_WO064(pt
, vmm
, ptei
++ * 8, data
);
74 map
->type
+= map
->ctag
;
76 nvkm_done(pt
->memory
);
80 VMM_MAP_ITER_DMA(vmm
, pt
, ptei
, ptes
, map
, gf100_vmm_pgt_pte
);
84 gf100_vmm_pgt_mem(struct nvkm_vmm
*vmm
, struct nvkm_mmu_pt
*pt
,
85 u32 ptei
, u32 ptes
, struct nvkm_vmm_map
*map
)
87 VMM_MAP_ITER_MEM(vmm
, pt
, ptei
, ptes
, map
, gf100_vmm_pgt_pte
);
91 gf100_vmm_pgt_unmap(struct nvkm_vmm
*vmm
,
92 struct nvkm_mmu_pt
*pt
, u32 ptei
, u32 ptes
)
94 VMM_FO064(pt
, vmm
, ptei
* 8, 0ULL, ptes
);
97 const struct nvkm_vmm_desc_func
99 .unmap
= gf100_vmm_pgt_unmap
,
100 .mem
= gf100_vmm_pgt_mem
,
101 .dma
= gf100_vmm_pgt_dma
,
102 .sgl
= gf100_vmm_pgt_sgl
,
106 gf100_vmm_pgd_pde(struct nvkm_vmm
*vmm
, struct nvkm_vmm_pt
*pgd
, u32 pdei
)
108 struct nvkm_vmm_pt
*pgt
= pgd
->pde
[pdei
];
109 struct nvkm_mmu_pt
*pd
= pgd
->pt
[0];
110 struct nvkm_mmu_pt
*pt
;
113 if ((pt
= pgt
->pt
[0])) {
114 switch (nvkm_memory_target(pt
->memory
)) {
115 case NVKM_MEM_TARGET_VRAM
: data
|= 1ULL << 0; break;
116 case NVKM_MEM_TARGET_HOST
: data
|= 2ULL << 0;
117 data
|= BIT_ULL(35); /* VOL */
119 case NVKM_MEM_TARGET_NCOH
: data
|= 3ULL << 0; break;
124 data
|= pt
->addr
>> 8;
127 if ((pt
= pgt
->pt
[1])) {
128 switch (nvkm_memory_target(pt
->memory
)) {
129 case NVKM_MEM_TARGET_VRAM
: data
|= 1ULL << 32; break;
130 case NVKM_MEM_TARGET_HOST
: data
|= 2ULL << 32;
131 data
|= BIT_ULL(34); /* VOL */
133 case NVKM_MEM_TARGET_NCOH
: data
|= 3ULL << 32; break;
138 data
|= pt
->addr
<< 24;
141 nvkm_kmap(pd
->memory
);
142 VMM_WO064(pd
, vmm
, pdei
* 8, data
);
143 nvkm_done(pd
->memory
);
146 const struct nvkm_vmm_desc_func
148 .unmap
= gf100_vmm_pgt_unmap
,
149 .pde
= gf100_vmm_pgd_pde
,
152 static const struct nvkm_vmm_desc
153 gf100_vmm_desc_17_12
[] = {
154 { SPT
, 15, 8, 0x1000, &gf100_vmm_pgt
},
155 { PGD
, 13, 8, 0x1000, &gf100_vmm_pgd
},
159 static const struct nvkm_vmm_desc
160 gf100_vmm_desc_17_17
[] = {
161 { LPT
, 10, 8, 0x1000, &gf100_vmm_pgt
},
162 { PGD
, 13, 8, 0x1000, &gf100_vmm_pgd
},
166 static const struct nvkm_vmm_desc
167 gf100_vmm_desc_16_12
[] = {
168 { SPT
, 14, 8, 0x1000, &gf100_vmm_pgt
},
169 { PGD
, 14, 8, 0x1000, &gf100_vmm_pgd
},
173 static const struct nvkm_vmm_desc
174 gf100_vmm_desc_16_16
[] = {
175 { LPT
, 10, 8, 0x1000, &gf100_vmm_pgt
},
176 { PGD
, 14, 8, 0x1000, &gf100_vmm_pgd
},
181 gf100_vmm_invalidate_pdb(struct nvkm_vmm
*vmm
, u64 addr
)
183 struct nvkm_device
*device
= vmm
->mmu
->subdev
.device
;
184 nvkm_wr32(device
, 0x100cb8, addr
);
188 gf100_vmm_invalidate(struct nvkm_vmm
*vmm
, u32 type
)
190 struct nvkm_subdev
*subdev
= &vmm
->mmu
->subdev
;
191 struct nvkm_device
*device
= subdev
->device
;
192 struct nvkm_mmu_pt
*pd
= vmm
->pd
->pt
[0];
195 mutex_lock(&subdev
->mutex
);
196 /* Looks like maybe a "free flush slots" counter, the
197 * faster you write to 0x100cbc to more it decreases.
199 nvkm_msec(device
, 2000,
200 if (nvkm_rd32(device
, 0x100c80) & 0x00ff0000)
204 if (!(type
& 0x00000002) /* ALL_PDB. */) {
205 switch (nvkm_memory_target(pd
->memory
)) {
206 case NVKM_MEM_TARGET_VRAM
: addr
|= 0x00000000; break;
207 case NVKM_MEM_TARGET_HOST
: addr
|= 0x00000002; break;
208 case NVKM_MEM_TARGET_NCOH
: addr
|= 0x00000003; break;
213 addr
|= (vmm
->pd
->pt
[0]->addr
>> 12) << 4;
215 vmm
->func
->invalidate_pdb(vmm
, addr
);
218 nvkm_wr32(device
, 0x100cbc, 0x80000000 | type
);
220 /* Wait for flush to be queued? */
221 nvkm_msec(device
, 2000,
222 if (nvkm_rd32(device
, 0x100c80) & 0x00008000)
225 mutex_unlock(&subdev
->mutex
);
229 gf100_vmm_flush(struct nvkm_vmm
*vmm
, int depth
)
231 u32 type
= 0x00000001; /* PAGE_ALL */
232 if (atomic_read(&vmm
->engref
[NVKM_SUBDEV_BAR
]))
233 type
|= 0x00000004; /* HUB_ONLY */
234 gf100_vmm_invalidate(vmm
, type
);
238 gf100_vmm_valid(struct nvkm_vmm
*vmm
, void *argv
, u32 argc
,
239 struct nvkm_vmm_map
*map
)
241 const enum nvkm_memory_target target
= nvkm_memory_target(map
->memory
);
242 const struct nvkm_vmm_page
*page
= map
->page
;
243 const bool gm20x
= page
->desc
->func
->sparse
!= NULL
;
245 struct gf100_vmm_map_vn vn
;
246 struct gf100_vmm_map_v0 v0
;
248 struct nvkm_device
*device
= vmm
->mmu
->subdev
.device
;
249 struct nvkm_memory
*memory
= map
->memory
;
250 u8 kind
, kind_inv
, priv
, ro
, vol
;
251 int kindn
, aper
, ret
= -ENOSYS
;
254 map
->next
= (1 << page
->shift
) >> 8;
255 map
->type
= map
->ctag
= 0;
257 if (!(ret
= nvif_unpack(ret
, &argv
, &argc
, args
->v0
, 0, 0, false))) {
258 vol
= !!args
->v0
.vol
;
260 priv
= !!args
->v0
.priv
;
261 kind
= args
->v0
.kind
;
263 if (!(ret
= nvif_unvers(ret
, &argv
, &argc
, args
->vn
))) {
264 vol
= target
== NVKM_MEM_TARGET_HOST
;
269 VMM_DEBUG(vmm
, "args");
273 aper
= vmm
->func
->aper(target
);
274 if (WARN_ON(aper
< 0))
277 kindm
= vmm
->mmu
->func
->kind(vmm
->mmu
, &kindn
, &kind_inv
);
278 if (kind
>= kindn
|| kindm
[kind
] == kind_inv
) {
279 VMM_DEBUG(vmm
, "kind %02x", kind
);
283 if (kindm
[kind
] != kind
) {
284 u32 comp
= (page
->shift
== 16 && !gm20x
) ? 16 : 17;
285 u32 tags
= ALIGN(nvkm_memory_size(memory
), 1 << 17) >> comp
;
286 if (aper
!= 0 || !(page
->type
& NVKM_VMM_PAGE_COMP
)) {
287 VMM_DEBUG(vmm
, "comp %d %02x", aper
, page
->type
);
291 ret
= nvkm_memory_tags_get(memory
, device
, tags
,
295 VMM_DEBUG(vmm
, "comp %d", ret
);
300 u64 tags
= map
->tags
->mn
->offset
+ (map
->offset
>> 17);
301 if (page
->shift
== 17 || !gm20x
) {
302 map
->type
|= tags
<< 44;
303 map
->ctag
|= 1ULL << 44;
304 map
->next
|= 1ULL << 44;
306 map
->ctag
|= tags
<< 1 | 1;
314 map
->type
|= (u64
)priv
<< 1;
315 map
->type
|= (u64
) ro
<< 2;
316 map
->type
|= (u64
) vol
<< 32;
317 map
->type
|= (u64
)aper
<< 33;
318 map
->type
|= (u64
)kind
<< 36;
323 gf100_vmm_aper(enum nvkm_memory_target target
)
326 case NVKM_MEM_TARGET_VRAM
: return 0;
327 case NVKM_MEM_TARGET_HOST
: return 2;
328 case NVKM_MEM_TARGET_NCOH
: return 3;
335 gf100_vmm_part(struct nvkm_vmm
*vmm
, struct nvkm_memory
*inst
)
337 nvkm_fo64(inst
, 0x0200, 0x00000000, 2);
341 gf100_vmm_join_(struct nvkm_vmm
*vmm
, struct nvkm_memory
*inst
, u64 base
)
343 struct nvkm_mmu_pt
*pd
= vmm
->pd
->pt
[0];
345 switch (nvkm_memory_target(pd
->memory
)) {
346 case NVKM_MEM_TARGET_VRAM
: base
|= 0ULL << 0; break;
347 case NVKM_MEM_TARGET_HOST
: base
|= 2ULL << 0;
348 base
|= BIT_ULL(2) /* VOL. */;
350 case NVKM_MEM_TARGET_NCOH
: base
|= 3ULL << 0; break;
358 nvkm_wo64(inst
, 0x0200, base
);
359 nvkm_wo64(inst
, 0x0208, vmm
->limit
- 1);
365 gf100_vmm_join(struct nvkm_vmm
*vmm
, struct nvkm_memory
*inst
)
367 return gf100_vmm_join_(vmm
, inst
, 0);
370 static const struct nvkm_vmm_func
372 .join
= gf100_vmm_join
,
373 .part
= gf100_vmm_part
,
374 .aper
= gf100_vmm_aper
,
375 .valid
= gf100_vmm_valid
,
376 .flush
= gf100_vmm_flush
,
377 .invalidate_pdb
= gf100_vmm_invalidate_pdb
,
379 { 17, &gf100_vmm_desc_17_17
[0], NVKM_VMM_PAGE_xVxC
},
380 { 12, &gf100_vmm_desc_17_12
[0], NVKM_VMM_PAGE_xVHx
},
385 static const struct nvkm_vmm_func
387 .join
= gf100_vmm_join
,
388 .part
= gf100_vmm_part
,
389 .aper
= gf100_vmm_aper
,
390 .valid
= gf100_vmm_valid
,
391 .flush
= gf100_vmm_flush
,
392 .invalidate_pdb
= gf100_vmm_invalidate_pdb
,
394 { 16, &gf100_vmm_desc_16_16
[0], NVKM_VMM_PAGE_xVxC
},
395 { 12, &gf100_vmm_desc_16_12
[0], NVKM_VMM_PAGE_xVHx
},
401 gf100_vmm_new_(const struct nvkm_vmm_func
*func_16
,
402 const struct nvkm_vmm_func
*func_17
,
403 struct nvkm_mmu
*mmu
, bool managed
, u64 addr
, u64 size
,
404 void *argv
, u32 argc
, struct lock_class_key
*key
,
405 const char *name
, struct nvkm_vmm
**pvmm
)
407 switch (mmu
->subdev
.device
->fb
->page
) {
408 case 16: return nv04_vmm_new_(func_16
, mmu
, 0, managed
, addr
, size
,
409 argv
, argc
, key
, name
, pvmm
);
410 case 17: return nv04_vmm_new_(func_17
, mmu
, 0, managed
, addr
, size
,
411 argv
, argc
, key
, name
, pvmm
);
419 gf100_vmm_new(struct nvkm_mmu
*mmu
, bool managed
, u64 addr
, u64 size
,
420 void *argv
, u32 argc
, struct lock_class_key
*key
,
421 const char *name
, struct nvkm_vmm
**pvmm
)
423 return gf100_vmm_new_(&gf100_vmm_16
, &gf100_vmm_17
, mmu
, managed
, addr
,
424 size
, argv
, argc
, key
, name
, pvmm
);