1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016-2018 Etnaviv Project
6 #include <linux/platform_device.h>
7 #include <linux/sizes.h>
8 #include <linux/slab.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/bitops.h>
12 #include "etnaviv_cmdbuf.h"
13 #include "etnaviv_gpu.h"
14 #include "etnaviv_mmu.h"
15 #include "etnaviv_iommu.h"
16 #include "state.xml.h"
17 #include "state_hi.xml.h"
19 #define MMUv2_PTE_PRESENT BIT(0)
20 #define MMUv2_PTE_EXCEPTION BIT(1)
21 #define MMUv2_PTE_WRITEABLE BIT(2)
23 #define MMUv2_MTLB_MASK 0xffc00000
24 #define MMUv2_MTLB_SHIFT 22
25 #define MMUv2_STLB_MASK 0x003ff000
26 #define MMUv2_STLB_SHIFT 12
28 #define MMUv2_MAX_STLB_ENTRIES 1024
30 struct etnaviv_iommuv2_domain
{
31 struct etnaviv_iommu_domain base
;
32 /* P(age) T(able) A(rray) */
35 /* M(aster) TLB aka first level pagetable */
38 /* S(lave) TLB aka second level pagetable */
39 u32
*stlb_cpu
[MMUv2_MAX_STLB_ENTRIES
];
40 dma_addr_t stlb_dma
[MMUv2_MAX_STLB_ENTRIES
];
43 static struct etnaviv_iommuv2_domain
*
44 to_etnaviv_domain(struct etnaviv_iommu_domain
*domain
)
46 return container_of(domain
, struct etnaviv_iommuv2_domain
, base
);
50 etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain
*etnaviv_domain
,
53 if (etnaviv_domain
->stlb_cpu
[stlb
])
56 etnaviv_domain
->stlb_cpu
[stlb
] =
57 dma_alloc_wc(etnaviv_domain
->base
.dev
, SZ_4K
,
58 &etnaviv_domain
->stlb_dma
[stlb
],
61 if (!etnaviv_domain
->stlb_cpu
[stlb
])
64 memset32(etnaviv_domain
->stlb_cpu
[stlb
], MMUv2_PTE_EXCEPTION
,
67 etnaviv_domain
->mtlb_cpu
[stlb
] = etnaviv_domain
->stlb_dma
[stlb
] |
72 static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain
*domain
,
73 unsigned long iova
, phys_addr_t paddr
,
74 size_t size
, int prot
)
76 struct etnaviv_iommuv2_domain
*etnaviv_domain
=
77 to_etnaviv_domain(domain
);
78 int mtlb_entry
, stlb_entry
, ret
;
79 u32 entry
= lower_32_bits(paddr
) | MMUv2_PTE_PRESENT
;
84 if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT
))
85 entry
|= (upper_32_bits(paddr
) & 0xff) << 4;
87 if (prot
& ETNAVIV_PROT_WRITE
)
88 entry
|= MMUv2_PTE_WRITEABLE
;
90 mtlb_entry
= (iova
& MMUv2_MTLB_MASK
) >> MMUv2_MTLB_SHIFT
;
91 stlb_entry
= (iova
& MMUv2_STLB_MASK
) >> MMUv2_STLB_SHIFT
;
93 ret
= etnaviv_iommuv2_ensure_stlb(etnaviv_domain
, mtlb_entry
);
97 etnaviv_domain
->stlb_cpu
[mtlb_entry
][stlb_entry
] = entry
;
102 static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain
*domain
,
103 unsigned long iova
, size_t size
)
105 struct etnaviv_iommuv2_domain
*etnaviv_domain
=
106 to_etnaviv_domain(domain
);
107 int mtlb_entry
, stlb_entry
;
112 mtlb_entry
= (iova
& MMUv2_MTLB_MASK
) >> MMUv2_MTLB_SHIFT
;
113 stlb_entry
= (iova
& MMUv2_STLB_MASK
) >> MMUv2_STLB_SHIFT
;
115 etnaviv_domain
->stlb_cpu
[mtlb_entry
][stlb_entry
] = MMUv2_PTE_EXCEPTION
;
120 static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain
*etnaviv_domain
)
124 /* allocate scratch page */
125 etnaviv_domain
->base
.bad_page_cpu
=
126 dma_alloc_wc(etnaviv_domain
->base
.dev
, SZ_4K
,
127 &etnaviv_domain
->base
.bad_page_dma
,
129 if (!etnaviv_domain
->base
.bad_page_cpu
) {
134 memset32(etnaviv_domain
->base
.bad_page_cpu
, 0xdead55aa,
135 SZ_4K
/ sizeof(u32
));
137 etnaviv_domain
->pta_cpu
= dma_alloc_wc(etnaviv_domain
->base
.dev
,
138 SZ_4K
, &etnaviv_domain
->pta_dma
,
140 if (!etnaviv_domain
->pta_cpu
) {
145 etnaviv_domain
->mtlb_cpu
= dma_alloc_wc(etnaviv_domain
->base
.dev
,
146 SZ_4K
, &etnaviv_domain
->mtlb_dma
,
148 if (!etnaviv_domain
->mtlb_cpu
) {
153 memset32(etnaviv_domain
->mtlb_cpu
, MMUv2_PTE_EXCEPTION
,
154 MMUv2_MAX_STLB_ENTRIES
);
159 if (etnaviv_domain
->base
.bad_page_cpu
)
160 dma_free_wc(etnaviv_domain
->base
.dev
, SZ_4K
,
161 etnaviv_domain
->base
.bad_page_cpu
,
162 etnaviv_domain
->base
.bad_page_dma
);
164 if (etnaviv_domain
->pta_cpu
)
165 dma_free_wc(etnaviv_domain
->base
.dev
, SZ_4K
,
166 etnaviv_domain
->pta_cpu
, etnaviv_domain
->pta_dma
);
168 if (etnaviv_domain
->mtlb_cpu
)
169 dma_free_wc(etnaviv_domain
->base
.dev
, SZ_4K
,
170 etnaviv_domain
->mtlb_cpu
, etnaviv_domain
->mtlb_dma
);
175 static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain
*domain
)
177 struct etnaviv_iommuv2_domain
*etnaviv_domain
=
178 to_etnaviv_domain(domain
);
181 dma_free_wc(etnaviv_domain
->base
.dev
, SZ_4K
,
182 etnaviv_domain
->base
.bad_page_cpu
,
183 etnaviv_domain
->base
.bad_page_dma
);
185 dma_free_wc(etnaviv_domain
->base
.dev
, SZ_4K
,
186 etnaviv_domain
->pta_cpu
, etnaviv_domain
->pta_dma
);
188 dma_free_wc(etnaviv_domain
->base
.dev
, SZ_4K
,
189 etnaviv_domain
->mtlb_cpu
, etnaviv_domain
->mtlb_dma
);
191 for (i
= 0; i
< MMUv2_MAX_STLB_ENTRIES
; i
++) {
192 if (etnaviv_domain
->stlb_cpu
[i
])
193 dma_free_wc(etnaviv_domain
->base
.dev
, SZ_4K
,
194 etnaviv_domain
->stlb_cpu
[i
],
195 etnaviv_domain
->stlb_dma
[i
]);
198 vfree(etnaviv_domain
);
201 static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain
*domain
)
203 struct etnaviv_iommuv2_domain
*etnaviv_domain
=
204 to_etnaviv_domain(domain
);
205 size_t dump_size
= SZ_4K
;
208 for (i
= 0; i
< MMUv2_MAX_STLB_ENTRIES
; i
++)
209 if (etnaviv_domain
->mtlb_cpu
[i
] & MMUv2_PTE_PRESENT
)
215 static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain
*domain
, void *buf
)
217 struct etnaviv_iommuv2_domain
*etnaviv_domain
=
218 to_etnaviv_domain(domain
);
221 memcpy(buf
, etnaviv_domain
->mtlb_cpu
, SZ_4K
);
223 for (i
= 0; i
< MMUv2_MAX_STLB_ENTRIES
; i
++, buf
+= SZ_4K
)
224 if (etnaviv_domain
->mtlb_cpu
[i
] & MMUv2_PTE_PRESENT
)
225 memcpy(buf
, etnaviv_domain
->stlb_cpu
[i
], SZ_4K
);
228 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu
*gpu
)
230 struct etnaviv_iommuv2_domain
*etnaviv_domain
=
231 to_etnaviv_domain(gpu
->mmu
->domain
);
234 /* If the MMU is already enabled the state is still there. */
235 if (gpu_read(gpu
, VIVS_MMUv2_CONTROL
) & VIVS_MMUv2_CONTROL_ENABLE
)
238 prefetch
= etnaviv_buffer_config_mmuv2(gpu
,
239 (u32
)etnaviv_domain
->mtlb_dma
,
240 (u32
)etnaviv_domain
->base
.bad_page_dma
);
241 etnaviv_gpu_start_fe(gpu
, (u32
)etnaviv_cmdbuf_get_pa(&gpu
->buffer
),
243 etnaviv_gpu_wait_idle(gpu
, 100);
245 gpu_write(gpu
, VIVS_MMUv2_CONTROL
, VIVS_MMUv2_CONTROL_ENABLE
);
248 static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu
*gpu
)
250 struct etnaviv_iommuv2_domain
*etnaviv_domain
=
251 to_etnaviv_domain(gpu
->mmu
->domain
);
254 /* If the MMU is already enabled the state is still there. */
255 if (gpu_read(gpu
, VIVS_MMUv2_SEC_CONTROL
) & VIVS_MMUv2_SEC_CONTROL_ENABLE
)
258 gpu_write(gpu
, VIVS_MMUv2_PTA_ADDRESS_LOW
,
259 lower_32_bits(etnaviv_domain
->pta_dma
));
260 gpu_write(gpu
, VIVS_MMUv2_PTA_ADDRESS_HIGH
,
261 upper_32_bits(etnaviv_domain
->pta_dma
));
262 gpu_write(gpu
, VIVS_MMUv2_PTA_CONTROL
, VIVS_MMUv2_PTA_CONTROL_ENABLE
);
264 gpu_write(gpu
, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW
,
265 lower_32_bits(etnaviv_domain
->base
.bad_page_dma
));
266 gpu_write(gpu
, VIVS_MMUv2_SEC_SAFE_ADDR_LOW
,
267 lower_32_bits(etnaviv_domain
->base
.bad_page_dma
));
268 gpu_write(gpu
, VIVS_MMUv2_SAFE_ADDRESS_CONFIG
,
269 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
270 upper_32_bits(etnaviv_domain
->base
.bad_page_dma
)) |
271 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
272 upper_32_bits(etnaviv_domain
->base
.bad_page_dma
)));
274 etnaviv_domain
->pta_cpu
[0] = etnaviv_domain
->mtlb_dma
|
275 VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K
;
277 /* trigger a PTA load through the FE */
278 prefetch
= etnaviv_buffer_config_pta(gpu
);
279 etnaviv_gpu_start_fe(gpu
, (u32
)etnaviv_cmdbuf_get_pa(&gpu
->buffer
),
281 etnaviv_gpu_wait_idle(gpu
, 100);
283 gpu_write(gpu
, VIVS_MMUv2_SEC_CONTROL
, VIVS_MMUv2_SEC_CONTROL_ENABLE
);
286 void etnaviv_iommuv2_restore(struct etnaviv_gpu
*gpu
)
288 switch (gpu
->sec_mode
) {
290 etnaviv_iommuv2_restore_nonsec(gpu
);
292 case ETNA_SEC_KERNEL
:
293 etnaviv_iommuv2_restore_sec(gpu
);
296 WARN(1, "unhandled GPU security mode\n");
301 static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops
= {
302 .free
= etnaviv_iommuv2_domain_free
,
303 .map
= etnaviv_iommuv2_map
,
304 .unmap
= etnaviv_iommuv2_unmap
,
305 .dump_size
= etnaviv_iommuv2_dump_size
,
306 .dump
= etnaviv_iommuv2_dump
,
309 struct etnaviv_iommu_domain
*
310 etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu
*gpu
)
312 struct etnaviv_iommuv2_domain
*etnaviv_domain
;
313 struct etnaviv_iommu_domain
*domain
;
316 etnaviv_domain
= vzalloc(sizeof(*etnaviv_domain
));
320 domain
= &etnaviv_domain
->base
;
322 domain
->dev
= gpu
->dev
;
324 domain
->size
= (u64
)SZ_1G
* 4;
325 domain
->ops
= &etnaviv_iommuv2_ops
;
327 ret
= etnaviv_iommuv2_init(etnaviv_domain
);
331 return &etnaviv_domain
->base
;
334 vfree(etnaviv_domain
);