2 * Copyright 2017 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <subdev/timer.h>
27 nv44_vmm_pgt_fill(struct nvkm_vmm
*vmm
, struct nvkm_mmu_pt
*pt
,
28 dma_addr_t
*list
, u32 ptei
, u32 ptes
)
30 u32 pteo
= (ptei
<< 2) & ~0x0000000f;
33 tmp
[0] = nvkm_ro32(pt
->memory
, pteo
+ 0x0);
34 tmp
[1] = nvkm_ro32(pt
->memory
, pteo
+ 0x4);
35 tmp
[2] = nvkm_ro32(pt
->memory
, pteo
+ 0x8);
36 tmp
[3] = nvkm_ro32(pt
->memory
, pteo
+ 0xc);
39 u32 addr
= (list
? *list
++ : vmm
->null
) >> 12;
40 switch (ptei
++ & 0x3) {
42 tmp
[0] &= ~0x07ffffff;
46 tmp
[0] &= ~0xf8000000;
48 tmp
[1] &= ~0x003fffff;
52 tmp
[1] &= ~0xffc00000;
54 tmp
[2] &= ~0x0001ffff;
58 tmp
[2] &= ~0xfffe0000;
60 tmp
[3] &= ~0x00000fff;
66 VMM_WO032(pt
, vmm
, pteo
+ 0x0, tmp
[0]);
67 VMM_WO032(pt
, vmm
, pteo
+ 0x4, tmp
[1]);
68 VMM_WO032(pt
, vmm
, pteo
+ 0x8, tmp
[2]);
69 VMM_WO032(pt
, vmm
, pteo
+ 0xc, tmp
[3] | 0x40000000);
73 nv44_vmm_pgt_pte(struct nvkm_vmm
*vmm
, struct nvkm_mmu_pt
*pt
,
74 u32 ptei
, u32 ptes
, struct nvkm_vmm_map
*map
, u64 addr
)
79 const u32 pten
= min(ptes
, 4 - (ptei
& 3));
80 for (i
= 0; i
< pten
; i
++, addr
+= 0x1000)
82 nv44_vmm_pgt_fill(vmm
, pt
, tmp
, ptei
, pten
);
88 for (i
= 0; i
< 4; i
++, addr
+= 0x1000)
90 VMM_WO032(pt
, vmm
, ptei
++ * 4, tmp
[0] >> 0 | tmp
[1] << 27);
91 VMM_WO032(pt
, vmm
, ptei
++ * 4, tmp
[1] >> 5 | tmp
[2] << 22);
92 VMM_WO032(pt
, vmm
, ptei
++ * 4, tmp
[2] >> 10 | tmp
[3] << 17);
93 VMM_WO032(pt
, vmm
, ptei
++ * 4, tmp
[3] >> 15 | 0x40000000);
98 for (i
= 0; i
< ptes
; i
++, addr
+= 0x1000)
100 nv44_vmm_pgt_fill(vmm
, pt
, tmp
, ptei
, ptes
);
105 nv44_vmm_pgt_sgl(struct nvkm_vmm
*vmm
, struct nvkm_mmu_pt
*pt
,
106 u32 ptei
, u32 ptes
, struct nvkm_vmm_map
*map
)
108 VMM_MAP_ITER_SGL(vmm
, pt
, ptei
, ptes
, map
, nv44_vmm_pgt_pte
);
112 nv44_vmm_pgt_dma(struct nvkm_vmm
*vmm
, struct nvkm_mmu_pt
*pt
,
113 u32 ptei
, u32 ptes
, struct nvkm_vmm_map
*map
)
116 nvkm_kmap(pt
->memory
);
118 const u32 pten
= min(ptes
, 4 - (ptei
& 3));
119 nv44_vmm_pgt_fill(vmm
, pt
, map
->dma
, ptei
, pten
);
127 for (i
= 0; i
< 4; i
++)
128 tmp
[i
] = *map
->dma
++ >> 12;
129 VMM_WO032(pt
, vmm
, ptei
++ * 4, tmp
[0] >> 0 | tmp
[1] << 27);
130 VMM_WO032(pt
, vmm
, ptei
++ * 4, tmp
[1] >> 5 | tmp
[2] << 22);
131 VMM_WO032(pt
, vmm
, ptei
++ * 4, tmp
[2] >> 10 | tmp
[3] << 17);
132 VMM_WO032(pt
, vmm
, ptei
++ * 4, tmp
[3] >> 15 | 0x40000000);
137 nv44_vmm_pgt_fill(vmm
, pt
, map
->dma
, ptei
, ptes
);
140 nvkm_done(pt
->memory
);
142 VMM_MAP_ITER_DMA(vmm
, pt
, ptei
, ptes
, map
, nv44_vmm_pgt_pte
);
147 nv44_vmm_pgt_unmap(struct nvkm_vmm
*vmm
,
148 struct nvkm_mmu_pt
*pt
, u32 ptei
, u32 ptes
)
150 nvkm_kmap(pt
->memory
);
152 const u32 pten
= min(ptes
, 4 - (ptei
& 3));
153 nv44_vmm_pgt_fill(vmm
, pt
, NULL
, ptei
, pten
);
159 VMM_WO032(pt
, vmm
, ptei
++ * 4, 0x00000000);
160 VMM_WO032(pt
, vmm
, ptei
++ * 4, 0x00000000);
161 VMM_WO032(pt
, vmm
, ptei
++ * 4, 0x00000000);
162 VMM_WO032(pt
, vmm
, ptei
++ * 4, 0x00000000);
167 nv44_vmm_pgt_fill(vmm
, pt
, NULL
, ptei
, ptes
);
168 nvkm_done(pt
->memory
);
171 static const struct nvkm_vmm_desc_func
172 nv44_vmm_desc_pgt
= {
173 .unmap
= nv44_vmm_pgt_unmap
,
174 .dma
= nv44_vmm_pgt_dma
,
175 .sgl
= nv44_vmm_pgt_sgl
,
178 static const struct nvkm_vmm_desc
179 nv44_vmm_desc_12
[] = {
180 { PGT
, 17, 4, 0x80000, &nv44_vmm_desc_pgt
},
185 nv44_vmm_flush(struct nvkm_vmm
*vmm
, int level
)
187 struct nvkm_device
*device
= vmm
->mmu
->subdev
.device
;
188 nvkm_wr32(device
, 0x100814, vmm
->limit
- 4096);
189 nvkm_wr32(device
, 0x100808, 0x000000020);
190 nvkm_msec(device
, 2000,
191 if (nvkm_rd32(device
, 0x100808) & 0x00000001)
194 nvkm_wr32(device
, 0x100808, 0x00000000);
197 static const struct nvkm_vmm_func
199 .valid
= nv04_vmm_valid
,
200 .flush
= nv44_vmm_flush
,
202 { 12, &nv44_vmm_desc_12
[0], NVKM_VMM_PAGE_HOST
},
208 nv44_vmm_new(struct nvkm_mmu
*mmu
, bool managed
, u64 addr
, u64 size
,
209 void *argv
, u32 argc
, struct lock_class_key
*key
, const char *name
,
210 struct nvkm_vmm
**pvmm
)
212 struct nvkm_subdev
*subdev
= &mmu
->subdev
;
213 struct nvkm_vmm
*vmm
;
216 ret
= nv04_vmm_new_(&nv44_vmm
, mmu
, 0, managed
, addr
, size
,
217 argv
, argc
, key
, name
, &vmm
);
222 vmm
->nullp
= dma_alloc_coherent(subdev
->device
->dev
, 16 * 1024,
223 &vmm
->null
, GFP_KERNEL
);
225 nvkm_warn(subdev
, "unable to allocate dummy pages\n");