Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cris-mirror.git] / drivers / gpu / drm / etnaviv / etnaviv_iommu_v2.c
blob1e956e266aa38428efb0fb680656a12c184e1653
1 /*
2 * Copyright (C) 2016 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/platform_device.h>
18 #include <linux/sizes.h>
19 #include <linux/slab.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/bitops.h>
23 #include "etnaviv_cmdbuf.h"
24 #include "etnaviv_gpu.h"
25 #include "etnaviv_mmu.h"
26 #include "etnaviv_iommu.h"
27 #include "state.xml.h"
28 #include "state_hi.xml.h"
30 #define MMUv2_PTE_PRESENT BIT(0)
31 #define MMUv2_PTE_EXCEPTION BIT(1)
32 #define MMUv2_PTE_WRITEABLE BIT(2)
34 #define MMUv2_MTLB_MASK 0xffc00000
35 #define MMUv2_MTLB_SHIFT 22
36 #define MMUv2_STLB_MASK 0x003ff000
37 #define MMUv2_STLB_SHIFT 12
39 #define MMUv2_MAX_STLB_ENTRIES 1024
41 struct etnaviv_iommuv2_domain {
42 struct etnaviv_iommu_domain base;
43 /* M(aster) TLB aka first level pagetable */
44 u32 *mtlb_cpu;
45 dma_addr_t mtlb_dma;
46 /* S(lave) TLB aka second level pagetable */
47 u32 *stlb_cpu[1024];
48 dma_addr_t stlb_dma[1024];
51 static struct etnaviv_iommuv2_domain *
52 to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
54 return container_of(domain, struct etnaviv_iommuv2_domain, base);
57 static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
58 unsigned long iova, phys_addr_t paddr,
59 size_t size, int prot)
61 struct etnaviv_iommuv2_domain *etnaviv_domain =
62 to_etnaviv_domain(domain);
63 int mtlb_entry, stlb_entry;
64 u32 entry = (u32)paddr | MMUv2_PTE_PRESENT;
66 if (size != SZ_4K)
67 return -EINVAL;
69 if (prot & ETNAVIV_PROT_WRITE)
70 entry |= MMUv2_PTE_WRITEABLE;
72 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
73 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
75 etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
77 return 0;
80 static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
81 unsigned long iova, size_t size)
83 struct etnaviv_iommuv2_domain *etnaviv_domain =
84 to_etnaviv_domain(domain);
85 int mtlb_entry, stlb_entry;
87 if (size != SZ_4K)
88 return -EINVAL;
90 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
91 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
93 etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
95 return SZ_4K;
98 static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
100 u32 *p;
101 int ret, i, j;
103 /* allocate scratch page */
104 etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
105 etnaviv_domain->base.dev,
106 SZ_4K,
107 &etnaviv_domain->base.bad_page_dma,
108 GFP_KERNEL);
109 if (!etnaviv_domain->base.bad_page_cpu) {
110 ret = -ENOMEM;
111 goto fail_mem;
113 p = etnaviv_domain->base.bad_page_cpu;
114 for (i = 0; i < SZ_4K / 4; i++)
115 *p++ = 0xdead55aa;
117 etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
118 SZ_4K,
119 &etnaviv_domain->mtlb_dma,
120 GFP_KERNEL);
121 if (!etnaviv_domain->mtlb_cpu) {
122 ret = -ENOMEM;
123 goto fail_mem;
126 /* pre-populate STLB pages (may want to switch to on-demand later) */
127 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
128 etnaviv_domain->stlb_cpu[i] =
129 dma_alloc_coherent(etnaviv_domain->base.dev,
130 SZ_4K,
131 &etnaviv_domain->stlb_dma[i],
132 GFP_KERNEL);
133 if (!etnaviv_domain->stlb_cpu[i]) {
134 ret = -ENOMEM;
135 goto fail_mem;
137 p = etnaviv_domain->stlb_cpu[i];
138 for (j = 0; j < SZ_4K / 4; j++)
139 *p++ = MMUv2_PTE_EXCEPTION;
141 etnaviv_domain->mtlb_cpu[i] = etnaviv_domain->stlb_dma[i] |
142 MMUv2_PTE_PRESENT;
145 return 0;
147 fail_mem:
148 if (etnaviv_domain->base.bad_page_cpu)
149 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
150 etnaviv_domain->base.bad_page_cpu,
151 etnaviv_domain->base.bad_page_dma);
153 if (etnaviv_domain->mtlb_cpu)
154 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
155 etnaviv_domain->mtlb_cpu,
156 etnaviv_domain->mtlb_dma);
158 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
159 if (etnaviv_domain->stlb_cpu[i])
160 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
161 etnaviv_domain->stlb_cpu[i],
162 etnaviv_domain->stlb_dma[i]);
165 return ret;
168 static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
170 struct etnaviv_iommuv2_domain *etnaviv_domain =
171 to_etnaviv_domain(domain);
172 int i;
174 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
175 etnaviv_domain->base.bad_page_cpu,
176 etnaviv_domain->base.bad_page_dma);
178 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
179 etnaviv_domain->mtlb_cpu,
180 etnaviv_domain->mtlb_dma);
182 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
183 if (etnaviv_domain->stlb_cpu[i])
184 dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
185 etnaviv_domain->stlb_cpu[i],
186 etnaviv_domain->stlb_dma[i]);
189 vfree(etnaviv_domain);
192 static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
194 struct etnaviv_iommuv2_domain *etnaviv_domain =
195 to_etnaviv_domain(domain);
196 size_t dump_size = SZ_4K;
197 int i;
199 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
200 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
201 dump_size += SZ_4K;
203 return dump_size;
206 static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
208 struct etnaviv_iommuv2_domain *etnaviv_domain =
209 to_etnaviv_domain(domain);
210 int i;
212 memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
213 buf += SZ_4K;
214 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
215 if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
216 memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
219 void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
221 struct etnaviv_iommuv2_domain *etnaviv_domain =
222 to_etnaviv_domain(gpu->mmu->domain);
223 u16 prefetch;
225 /* If the MMU is already enabled the state is still there. */
226 if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
227 return;
229 prefetch = etnaviv_buffer_config_mmuv2(gpu,
230 (u32)etnaviv_domain->mtlb_dma,
231 (u32)etnaviv_domain->base.bad_page_dma);
232 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
233 prefetch);
234 etnaviv_gpu_wait_idle(gpu, 100);
236 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
239 const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
240 .free = etnaviv_iommuv2_domain_free,
241 .map = etnaviv_iommuv2_map,
242 .unmap = etnaviv_iommuv2_unmap,
243 .dump_size = etnaviv_iommuv2_dump_size,
244 .dump = etnaviv_iommuv2_dump,
247 struct etnaviv_iommu_domain *
248 etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
250 struct etnaviv_iommuv2_domain *etnaviv_domain;
251 struct etnaviv_iommu_domain *domain;
252 int ret;
254 etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
255 if (!etnaviv_domain)
256 return NULL;
258 domain = &etnaviv_domain->base;
260 domain->dev = gpu->dev;
261 domain->base = 0;
262 domain->size = (u64)SZ_1G * 4;
263 domain->ops = &etnaviv_iommuv2_ops;
265 ret = etnaviv_iommuv2_init(etnaviv_domain);
266 if (ret)
267 goto out_free;
269 return &etnaviv_domain->base;
271 out_free:
272 vfree(etnaviv_domain);
273 return NULL;