perf bpf: Move perf_event_output() from stdio.h to bpf.h
[linux/fpc-iii.git] / drivers / gpu / drm / etnaviv / etnaviv_iommu_v2.c
blobf1c88d8ad5ba880fefbd123c74986ce6154713af
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2016-2018 Etnaviv Project
4 */
6 #include <linux/platform_device.h>
7 #include <linux/sizes.h>
8 #include <linux/slab.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/bitops.h>
12 #include "etnaviv_cmdbuf.h"
13 #include "etnaviv_gpu.h"
14 #include "etnaviv_mmu.h"
15 #include "etnaviv_iommu.h"
16 #include "state.xml.h"
17 #include "state_hi.xml.h"
19 #define MMUv2_PTE_PRESENT BIT(0)
20 #define MMUv2_PTE_EXCEPTION BIT(1)
21 #define MMUv2_PTE_WRITEABLE BIT(2)
23 #define MMUv2_MTLB_MASK 0xffc00000
24 #define MMUv2_MTLB_SHIFT 22
25 #define MMUv2_STLB_MASK 0x003ff000
26 #define MMUv2_STLB_SHIFT 12
28 #define MMUv2_MAX_STLB_ENTRIES 1024
30 struct etnaviv_iommuv2_domain {
31 struct etnaviv_iommu_domain base;
32 /* P(age) T(able) A(rray) */
33 u64 *pta_cpu;
34 dma_addr_t pta_dma;
35 /* M(aster) TLB aka first level pagetable */
36 u32 *mtlb_cpu;
37 dma_addr_t mtlb_dma;
38 /* S(lave) TLB aka second level pagetable */
39 u32 *stlb_cpu[MMUv2_MAX_STLB_ENTRIES];
40 dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
43 static struct etnaviv_iommuv2_domain *
44 to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
46 return container_of(domain, struct etnaviv_iommuv2_domain, base);
49 static int
50 etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain *etnaviv_domain,
51 int stlb)
53 if (etnaviv_domain->stlb_cpu[stlb])
54 return 0;
56 etnaviv_domain->stlb_cpu[stlb] =
57 dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
58 &etnaviv_domain->stlb_dma[stlb],
59 GFP_KERNEL);
61 if (!etnaviv_domain->stlb_cpu[stlb])
62 return -ENOMEM;
64 memset32(etnaviv_domain->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
65 SZ_4K / sizeof(u32));
67 etnaviv_domain->mtlb_cpu[stlb] = etnaviv_domain->stlb_dma[stlb] |
68 MMUv2_PTE_PRESENT;
69 return 0;
72 static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
73 unsigned long iova, phys_addr_t paddr,
74 size_t size, int prot)
76 struct etnaviv_iommuv2_domain *etnaviv_domain =
77 to_etnaviv_domain(domain);
78 int mtlb_entry, stlb_entry, ret;
79 u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
81 if (size != SZ_4K)
82 return -EINVAL;
84 if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
85 entry |= (upper_32_bits(paddr) & 0xff) << 4;
87 if (prot & ETNAVIV_PROT_WRITE)
88 entry |= MMUv2_PTE_WRITEABLE;
90 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
91 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
93 ret = etnaviv_iommuv2_ensure_stlb(etnaviv_domain, mtlb_entry);
94 if (ret)
95 return ret;
97 etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
99 return 0;
102 static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
103 unsigned long iova, size_t size)
105 struct etnaviv_iommuv2_domain *etnaviv_domain =
106 to_etnaviv_domain(domain);
107 int mtlb_entry, stlb_entry;
109 if (size != SZ_4K)
110 return -EINVAL;
112 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
113 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
115 etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
117 return SZ_4K;
120 static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
122 int ret;
124 /* allocate scratch page */
125 etnaviv_domain->base.bad_page_cpu =
126 dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
127 &etnaviv_domain->base.bad_page_dma,
128 GFP_KERNEL);
129 if (!etnaviv_domain->base.bad_page_cpu) {
130 ret = -ENOMEM;
131 goto fail_mem;
134 memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
135 SZ_4K / sizeof(u32));
137 etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
138 SZ_4K, &etnaviv_domain->pta_dma,
139 GFP_KERNEL);
140 if (!etnaviv_domain->pta_cpu) {
141 ret = -ENOMEM;
142 goto fail_mem;
145 etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
146 SZ_4K, &etnaviv_domain->mtlb_dma,
147 GFP_KERNEL);
148 if (!etnaviv_domain->mtlb_cpu) {
149 ret = -ENOMEM;
150 goto fail_mem;
153 memset32(etnaviv_domain->mtlb_cpu, MMUv2_PTE_EXCEPTION,
154 MMUv2_MAX_STLB_ENTRIES);
156 return 0;
158 fail_mem:
159 if (etnaviv_domain->base.bad_page_cpu)
160 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
161 etnaviv_domain->base.bad_page_cpu,
162 etnaviv_domain->base.bad_page_dma);
164 if (etnaviv_domain->pta_cpu)
165 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
166 etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
168 if (etnaviv_domain->mtlb_cpu)
169 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
170 etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
172 return ret;
175 static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
177 struct etnaviv_iommuv2_domain *etnaviv_domain =
178 to_etnaviv_domain(domain);
179 int i;
181 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
182 etnaviv_domain->base.bad_page_cpu,
183 etnaviv_domain->base.bad_page_dma);
185 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
186 etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
188 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
189 etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
191 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
192 if (etnaviv_domain->stlb_cpu[i])
193 dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
194 etnaviv_domain->stlb_cpu[i],
195 etnaviv_domain->stlb_dma[i]);
198 vfree(etnaviv_domain);
201 static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
203 struct etnaviv_iommuv2_domain *etnaviv_domain =
204 to_etnaviv_domain(domain);
205 size_t dump_size = SZ_4K;
206 int i;
208 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
209 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
210 dump_size += SZ_4K;
212 return dump_size;
215 static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
217 struct etnaviv_iommuv2_domain *etnaviv_domain =
218 to_etnaviv_domain(domain);
219 int i;
221 memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
222 buf += SZ_4K;
223 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
224 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
225 memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
228 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
230 struct etnaviv_iommuv2_domain *etnaviv_domain =
231 to_etnaviv_domain(gpu->mmu->domain);
232 u16 prefetch;
234 /* If the MMU is already enabled the state is still there. */
235 if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
236 return;
238 prefetch = etnaviv_buffer_config_mmuv2(gpu,
239 (u32)etnaviv_domain->mtlb_dma,
240 (u32)etnaviv_domain->base.bad_page_dma);
241 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
242 prefetch);
243 etnaviv_gpu_wait_idle(gpu, 100);
245 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
248 static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
250 struct etnaviv_iommuv2_domain *etnaviv_domain =
251 to_etnaviv_domain(gpu->mmu->domain);
252 u16 prefetch;
254 /* If the MMU is already enabled the state is still there. */
255 if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
256 return;
258 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
259 lower_32_bits(etnaviv_domain->pta_dma));
260 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
261 upper_32_bits(etnaviv_domain->pta_dma));
262 gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
264 gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
265 lower_32_bits(etnaviv_domain->base.bad_page_dma));
266 gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
267 lower_32_bits(etnaviv_domain->base.bad_page_dma));
268 gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
269 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
270 upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
271 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
272 upper_32_bits(etnaviv_domain->base.bad_page_dma)));
274 etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
275 VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
277 /* trigger a PTA load through the FE */
278 prefetch = etnaviv_buffer_config_pta(gpu);
279 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
280 prefetch);
281 etnaviv_gpu_wait_idle(gpu, 100);
283 gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
286 void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
288 switch (gpu->sec_mode) {
289 case ETNA_SEC_NONE:
290 etnaviv_iommuv2_restore_nonsec(gpu);
291 break;
292 case ETNA_SEC_KERNEL:
293 etnaviv_iommuv2_restore_sec(gpu);
294 break;
295 default:
296 WARN(1, "unhandled GPU security mode\n");
297 break;
301 static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
302 .free = etnaviv_iommuv2_domain_free,
303 .map = etnaviv_iommuv2_map,
304 .unmap = etnaviv_iommuv2_unmap,
305 .dump_size = etnaviv_iommuv2_dump_size,
306 .dump = etnaviv_iommuv2_dump,
309 struct etnaviv_iommu_domain *
310 etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
312 struct etnaviv_iommuv2_domain *etnaviv_domain;
313 struct etnaviv_iommu_domain *domain;
314 int ret;
316 etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
317 if (!etnaviv_domain)
318 return NULL;
320 domain = &etnaviv_domain->base;
322 domain->dev = gpu->dev;
323 domain->base = 0;
324 domain->size = (u64)SZ_1G * 4;
325 domain->ops = &etnaviv_iommuv2_ops;
327 ret = etnaviv_iommuv2_init(etnaviv_domain);
328 if (ret)
329 goto out_free;
331 return &etnaviv_domain->base;
333 out_free:
334 vfree(etnaviv_domain);
335 return NULL;