2 * Copyright (C) 2016 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/iommu.h>
18 #include <linux/platform_device.h>
19 #include <linux/sizes.h>
20 #include <linux/slab.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/bitops.h>
24 #include "etnaviv_gpu.h"
25 #include "etnaviv_mmu.h"
26 #include "etnaviv_iommu.h"
27 #include "state.xml.h"
28 #include "state_hi.xml.h"
30 #define MMUv2_PTE_PRESENT BIT(0)
31 #define MMUv2_PTE_EXCEPTION BIT(1)
32 #define MMUv2_PTE_WRITEABLE BIT(2)
34 #define MMUv2_MTLB_MASK 0xffc00000
35 #define MMUv2_MTLB_SHIFT 22
36 #define MMUv2_STLB_MASK 0x003ff000
37 #define MMUv2_STLB_SHIFT 12
39 #define MMUv2_MAX_STLB_ENTRIES 1024
41 struct etnaviv_iommuv2_domain
{
42 struct iommu_domain domain
;
45 dma_addr_t bad_page_dma
;
46 /* M(aster) TLB aka first level pagetable */
49 /* S(lave) TLB aka second level pagetable */
51 dma_addr_t stlb_dma
[1024];
54 static struct etnaviv_iommuv2_domain
*to_etnaviv_domain(struct iommu_domain
*domain
)
56 return container_of(domain
, struct etnaviv_iommuv2_domain
, domain
);
59 static int etnaviv_iommuv2_map(struct iommu_domain
*domain
, unsigned long iova
,
60 phys_addr_t paddr
, size_t size
, int prot
)
62 struct etnaviv_iommuv2_domain
*etnaviv_domain
=
63 to_etnaviv_domain(domain
);
64 int mtlb_entry
, stlb_entry
;
65 u32 entry
= (u32
)paddr
| MMUv2_PTE_PRESENT
;
70 if (prot
& IOMMU_WRITE
)
71 entry
|= MMUv2_PTE_WRITEABLE
;
73 mtlb_entry
= (iova
& MMUv2_MTLB_MASK
) >> MMUv2_MTLB_SHIFT
;
74 stlb_entry
= (iova
& MMUv2_STLB_MASK
) >> MMUv2_STLB_SHIFT
;
76 etnaviv_domain
->stlb_cpu
[mtlb_entry
][stlb_entry
] = entry
;
81 static size_t etnaviv_iommuv2_unmap(struct iommu_domain
*domain
,
82 unsigned long iova
, size_t size
)
84 struct etnaviv_iommuv2_domain
*etnaviv_domain
=
85 to_etnaviv_domain(domain
);
86 int mtlb_entry
, stlb_entry
;
91 mtlb_entry
= (iova
& MMUv2_MTLB_MASK
) >> MMUv2_MTLB_SHIFT
;
92 stlb_entry
= (iova
& MMUv2_STLB_MASK
) >> MMUv2_STLB_SHIFT
;
94 etnaviv_domain
->stlb_cpu
[mtlb_entry
][stlb_entry
] = MMUv2_PTE_EXCEPTION
;
99 static phys_addr_t
etnaviv_iommuv2_iova_to_phys(struct iommu_domain
*domain
,
102 struct etnaviv_iommuv2_domain
*etnaviv_domain
=
103 to_etnaviv_domain(domain
);
104 int mtlb_entry
, stlb_entry
;
106 mtlb_entry
= (iova
& MMUv2_MTLB_MASK
) >> MMUv2_MTLB_SHIFT
;
107 stlb_entry
= (iova
& MMUv2_STLB_MASK
) >> MMUv2_STLB_SHIFT
;
109 return etnaviv_domain
->stlb_cpu
[mtlb_entry
][stlb_entry
] & ~(SZ_4K
- 1);
112 static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain
*etnaviv_domain
)
117 /* allocate scratch page */
118 etnaviv_domain
->bad_page_cpu
= dma_alloc_coherent(etnaviv_domain
->dev
,
120 &etnaviv_domain
->bad_page_dma
,
122 if (!etnaviv_domain
->bad_page_cpu
) {
126 p
= etnaviv_domain
->bad_page_cpu
;
127 for (i
= 0; i
< SZ_4K
/ 4; i
++)
130 etnaviv_domain
->mtlb_cpu
= dma_alloc_coherent(etnaviv_domain
->dev
,
132 &etnaviv_domain
->mtlb_dma
,
134 if (!etnaviv_domain
->mtlb_cpu
) {
139 /* pre-populate STLB pages (may want to switch to on-demand later) */
140 for (i
= 0; i
< MMUv2_MAX_STLB_ENTRIES
; i
++) {
141 etnaviv_domain
->stlb_cpu
[i
] =
142 dma_alloc_coherent(etnaviv_domain
->dev
,
144 &etnaviv_domain
->stlb_dma
[i
],
146 if (!etnaviv_domain
->stlb_cpu
[i
]) {
150 p
= etnaviv_domain
->stlb_cpu
[i
];
151 for (j
= 0; j
< SZ_4K
/ 4; j
++)
152 *p
++ = MMUv2_PTE_EXCEPTION
;
154 etnaviv_domain
->mtlb_cpu
[i
] = etnaviv_domain
->stlb_dma
[i
] |
161 if (etnaviv_domain
->bad_page_cpu
)
162 dma_free_coherent(etnaviv_domain
->dev
, SZ_4K
,
163 etnaviv_domain
->bad_page_cpu
,
164 etnaviv_domain
->bad_page_dma
);
166 if (etnaviv_domain
->mtlb_cpu
)
167 dma_free_coherent(etnaviv_domain
->dev
, SZ_4K
,
168 etnaviv_domain
->mtlb_cpu
,
169 etnaviv_domain
->mtlb_dma
);
171 for (i
= 0; i
< MMUv2_MAX_STLB_ENTRIES
; i
++) {
172 if (etnaviv_domain
->stlb_cpu
[i
])
173 dma_free_coherent(etnaviv_domain
->dev
, SZ_4K
,
174 etnaviv_domain
->stlb_cpu
[i
],
175 etnaviv_domain
->stlb_dma
[i
]);
181 static void etnaviv_iommuv2_domain_free(struct iommu_domain
*domain
)
183 struct etnaviv_iommuv2_domain
*etnaviv_domain
=
184 to_etnaviv_domain(domain
);
187 dma_free_coherent(etnaviv_domain
->dev
, SZ_4K
,
188 etnaviv_domain
->bad_page_cpu
,
189 etnaviv_domain
->bad_page_dma
);
191 dma_free_coherent(etnaviv_domain
->dev
, SZ_4K
,
192 etnaviv_domain
->mtlb_cpu
,
193 etnaviv_domain
->mtlb_dma
);
195 for (i
= 0; i
< MMUv2_MAX_STLB_ENTRIES
; i
++) {
196 if (etnaviv_domain
->stlb_cpu
[i
])
197 dma_free_coherent(etnaviv_domain
->dev
, SZ_4K
,
198 etnaviv_domain
->stlb_cpu
[i
],
199 etnaviv_domain
->stlb_dma
[i
]);
202 vfree(etnaviv_domain
);
205 static size_t etnaviv_iommuv2_dump_size(struct iommu_domain
*domain
)
207 struct etnaviv_iommuv2_domain
*etnaviv_domain
=
208 to_etnaviv_domain(domain
);
209 size_t dump_size
= SZ_4K
;
212 for (i
= 0; i
< MMUv2_MAX_STLB_ENTRIES
; i
++)
213 if (etnaviv_domain
->mtlb_cpu
[i
] & MMUv2_PTE_PRESENT
)
219 static void etnaviv_iommuv2_dump(struct iommu_domain
*domain
, void *buf
)
221 struct etnaviv_iommuv2_domain
*etnaviv_domain
=
222 to_etnaviv_domain(domain
);
225 memcpy(buf
, etnaviv_domain
->mtlb_cpu
, SZ_4K
);
227 for (i
= 0; i
< MMUv2_MAX_STLB_ENTRIES
; i
++, buf
+= SZ_4K
)
228 if (etnaviv_domain
->mtlb_cpu
[i
] & MMUv2_PTE_PRESENT
)
229 memcpy(buf
, etnaviv_domain
->stlb_cpu
[i
], SZ_4K
);
232 static struct etnaviv_iommu_ops etnaviv_iommu_ops
= {
234 .domain_free
= etnaviv_iommuv2_domain_free
,
235 .map
= etnaviv_iommuv2_map
,
236 .unmap
= etnaviv_iommuv2_unmap
,
237 .iova_to_phys
= etnaviv_iommuv2_iova_to_phys
,
238 .pgsize_bitmap
= SZ_4K
,
240 .dump_size
= etnaviv_iommuv2_dump_size
,
241 .dump
= etnaviv_iommuv2_dump
,
244 void etnaviv_iommuv2_restore(struct etnaviv_gpu
*gpu
)
246 struct etnaviv_iommuv2_domain
*etnaviv_domain
=
247 to_etnaviv_domain(gpu
->mmu
->domain
);
250 /* If the MMU is already enabled the state is still there. */
251 if (gpu_read(gpu
, VIVS_MMUv2_CONTROL
) & VIVS_MMUv2_CONTROL_ENABLE
)
254 prefetch
= etnaviv_buffer_config_mmuv2(gpu
,
255 (u32
)etnaviv_domain
->mtlb_dma
,
256 (u32
)etnaviv_domain
->bad_page_dma
);
257 etnaviv_gpu_start_fe(gpu
, gpu
->buffer
->paddr
, prefetch
);
258 etnaviv_gpu_wait_idle(gpu
, 100);
260 gpu_write(gpu
, VIVS_MMUv2_CONTROL
, VIVS_MMUv2_CONTROL_ENABLE
);
262 struct iommu_domain
*etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu
*gpu
)
264 struct etnaviv_iommuv2_domain
*etnaviv_domain
;
267 etnaviv_domain
= vzalloc(sizeof(*etnaviv_domain
));
271 etnaviv_domain
->dev
= gpu
->dev
;
273 etnaviv_domain
->domain
.type
= __IOMMU_DOMAIN_PAGING
;
274 etnaviv_domain
->domain
.ops
= &etnaviv_iommu_ops
.ops
;
275 etnaviv_domain
->domain
.pgsize_bitmap
= SZ_4K
;
276 etnaviv_domain
->domain
.geometry
.aperture_start
= 0;
277 etnaviv_domain
->domain
.geometry
.aperture_end
= ~0UL & ~(SZ_4K
- 1);
279 ret
= etnaviv_iommuv2_init(etnaviv_domain
);
283 return &etnaviv_domain
->domain
;
286 vfree(etnaviv_domain
);