1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #include "common.xml.h"
7 #include "etnaviv_cmdbuf.h"
8 #include "etnaviv_drv.h"
9 #include "etnaviv_gem.h"
10 #include "etnaviv_gpu.h"
11 #include "etnaviv_iommu.h"
12 #include "etnaviv_mmu.h"
14 static void etnaviv_domain_unmap(struct etnaviv_iommu_domain
*domain
,
15 unsigned long iova
, size_t size
)
17 size_t unmapped_page
, unmapped
= 0;
18 size_t pgsize
= SZ_4K
;
20 if (!IS_ALIGNED(iova
| size
, pgsize
)) {
21 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
26 while (unmapped
< size
) {
27 unmapped_page
= domain
->ops
->unmap(domain
, iova
, pgsize
);
31 iova
+= unmapped_page
;
32 unmapped
+= unmapped_page
;
36 static int etnaviv_domain_map(struct etnaviv_iommu_domain
*domain
,
37 unsigned long iova
, phys_addr_t paddr
,
38 size_t size
, int prot
)
40 unsigned long orig_iova
= iova
;
41 size_t pgsize
= SZ_4K
;
42 size_t orig_size
= size
;
45 if (!IS_ALIGNED(iova
| paddr
| size
, pgsize
)) {
46 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
47 iova
, &paddr
, size
, pgsize
);
52 ret
= domain
->ops
->map(domain
, iova
, paddr
, pgsize
, prot
);
61 /* unroll mapping in case something went wrong */
63 etnaviv_domain_unmap(domain
, orig_iova
, orig_size
- size
);
68 static int etnaviv_iommu_map(struct etnaviv_iommu
*iommu
, u32 iova
,
69 struct sg_table
*sgt
, unsigned len
, int prot
)
71 struct etnaviv_iommu_domain
*domain
= iommu
->domain
;
72 struct scatterlist
*sg
;
73 unsigned int da
= iova
;
80 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
81 u32 pa
= sg_dma_address(sg
) - sg
->offset
;
82 size_t bytes
= sg_dma_len(sg
) + sg
->offset
;
84 VERB("map[%d]: %08x %08x(%zx)", i
, iova
, pa
, bytes
);
86 ret
= etnaviv_domain_map(domain
, da
, pa
, bytes
, prot
);
98 for_each_sg(sgt
->sgl
, sg
, i
, j
) {
99 size_t bytes
= sg_dma_len(sg
) + sg
->offset
;
101 etnaviv_domain_unmap(domain
, da
, bytes
);
107 static void etnaviv_iommu_unmap(struct etnaviv_iommu
*iommu
, u32 iova
,
108 struct sg_table
*sgt
, unsigned len
)
110 struct etnaviv_iommu_domain
*domain
= iommu
->domain
;
111 struct scatterlist
*sg
;
112 unsigned int da
= iova
;
115 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
116 size_t bytes
= sg_dma_len(sg
) + sg
->offset
;
118 etnaviv_domain_unmap(domain
, da
, bytes
);
120 VERB("unmap[%d]: %08x(%zx)", i
, iova
, bytes
);
122 BUG_ON(!PAGE_ALIGNED(bytes
));
128 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu
*mmu
,
129 struct etnaviv_vram_mapping
*mapping
)
131 struct etnaviv_gem_object
*etnaviv_obj
= mapping
->object
;
133 etnaviv_iommu_unmap(mmu
, mapping
->vram_node
.start
,
134 etnaviv_obj
->sgt
, etnaviv_obj
->base
.size
);
135 drm_mm_remove_node(&mapping
->vram_node
);
138 static int etnaviv_iommu_find_iova(struct etnaviv_iommu
*mmu
,
139 struct drm_mm_node
*node
, size_t size
)
141 struct etnaviv_vram_mapping
*free
= NULL
;
142 enum drm_mm_insert_mode mode
= DRM_MM_INSERT_LOW
;
145 lockdep_assert_held(&mmu
->lock
);
148 struct etnaviv_vram_mapping
*m
, *n
;
149 struct drm_mm_scan scan
;
150 struct list_head list
;
153 ret
= drm_mm_insert_node_in_range(&mmu
->mm
, node
,
154 size
, 0, 0, 0, U64_MAX
, mode
);
158 /* Try to retire some entries */
159 drm_mm_scan_init(&scan
, &mmu
->mm
, size
, 0, 0, mode
);
162 INIT_LIST_HEAD(&list
);
163 list_for_each_entry(free
, &mmu
->mappings
, mmu_node
) {
164 /* If this vram node has not been used, skip this. */
165 if (!free
->vram_node
.mm
)
169 * If the iova is pinned, then it's in-use,
170 * so we must keep its mapping.
175 list_add(&free
->scan_node
, &list
);
176 if (drm_mm_scan_add_block(&scan
, &free
->vram_node
)) {
183 /* Nothing found, clean up and fail */
184 list_for_each_entry_safe(m
, n
, &list
, scan_node
)
185 BUG_ON(drm_mm_scan_remove_block(&scan
, &m
->vram_node
));
190 * drm_mm does not allow any other operations while
191 * scanning, so we have to remove all blocks first.
192 * If drm_mm_scan_remove_block() returns false, we
193 * can leave the block pinned.
195 list_for_each_entry_safe(m
, n
, &list
, scan_node
)
196 if (!drm_mm_scan_remove_block(&scan
, &m
->vram_node
))
197 list_del_init(&m
->scan_node
);
200 * Unmap the blocks which need to be reaped from the MMU.
201 * Clear the mmu pointer to prevent the mapping_get finding
204 list_for_each_entry_safe(m
, n
, &list
, scan_node
) {
205 etnaviv_iommu_remove_mapping(mmu
, m
);
207 list_del_init(&m
->mmu_node
);
208 list_del_init(&m
->scan_node
);
211 mode
= DRM_MM_INSERT_EVICT
;
214 * We removed enough mappings so that the new allocation will
215 * succeed, retry the allocation one more time.
222 int etnaviv_iommu_map_gem(struct etnaviv_iommu
*mmu
,
223 struct etnaviv_gem_object
*etnaviv_obj
, u32 memory_base
,
224 struct etnaviv_vram_mapping
*mapping
)
226 struct sg_table
*sgt
= etnaviv_obj
->sgt
;
227 struct drm_mm_node
*node
;
230 lockdep_assert_held(&etnaviv_obj
->lock
);
232 mutex_lock(&mmu
->lock
);
234 /* v1 MMU can optimize single entry (contiguous) scatterlists */
235 if (mmu
->version
== ETNAVIV_IOMMU_V1
&&
236 sgt
->nents
== 1 && !(etnaviv_obj
->flags
& ETNA_BO_FORCE_MMU
)) {
239 iova
= sg_dma_address(sgt
->sgl
) - memory_base
;
240 if (iova
< 0x80000000 - sg_dma_len(sgt
->sgl
)) {
241 mapping
->iova
= iova
;
242 list_add_tail(&mapping
->mmu_node
, &mmu
->mappings
);
248 node
= &mapping
->vram_node
;
250 ret
= etnaviv_iommu_find_iova(mmu
, node
, etnaviv_obj
->base
.size
);
254 mapping
->iova
= node
->start
;
255 ret
= etnaviv_iommu_map(mmu
, node
->start
, sgt
, etnaviv_obj
->base
.size
,
256 ETNAVIV_PROT_READ
| ETNAVIV_PROT_WRITE
);
259 drm_mm_remove_node(node
);
263 list_add_tail(&mapping
->mmu_node
, &mmu
->mappings
);
266 mutex_unlock(&mmu
->lock
);
271 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu
*mmu
,
272 struct etnaviv_vram_mapping
*mapping
)
274 WARN_ON(mapping
->use
);
276 mutex_lock(&mmu
->lock
);
278 /* If the vram node is on the mm, unmap and remove the node */
279 if (mapping
->vram_node
.mm
== &mmu
->mm
)
280 etnaviv_iommu_remove_mapping(mmu
, mapping
);
282 list_del(&mapping
->mmu_node
);
284 mutex_unlock(&mmu
->lock
);
287 void etnaviv_iommu_destroy(struct etnaviv_iommu
*mmu
)
289 drm_mm_takedown(&mmu
->mm
);
290 mmu
->domain
->ops
->free(mmu
->domain
);
294 struct etnaviv_iommu
*etnaviv_iommu_new(struct etnaviv_gpu
*gpu
)
296 enum etnaviv_iommu_version version
;
297 struct etnaviv_iommu
*mmu
;
299 mmu
= kzalloc(sizeof(*mmu
), GFP_KERNEL
);
301 return ERR_PTR(-ENOMEM
);
303 if (!(gpu
->identity
.minor_features1
& chipMinorFeatures1_MMU_VERSION
)) {
304 mmu
->domain
= etnaviv_iommuv1_domain_alloc(gpu
);
305 version
= ETNAVIV_IOMMU_V1
;
307 mmu
->domain
= etnaviv_iommuv2_domain_alloc(gpu
);
308 version
= ETNAVIV_IOMMU_V2
;
312 dev_err(gpu
->dev
, "Failed to allocate GPU IOMMU domain\n");
314 return ERR_PTR(-ENOMEM
);
318 mmu
->version
= version
;
319 mutex_init(&mmu
->lock
);
320 INIT_LIST_HEAD(&mmu
->mappings
);
322 drm_mm_init(&mmu
->mm
, mmu
->domain
->base
, mmu
->domain
->size
);
327 void etnaviv_iommu_restore(struct etnaviv_gpu
*gpu
)
329 if (gpu
->mmu
->version
== ETNAVIV_IOMMU_V1
)
330 etnaviv_iommuv1_restore(gpu
);
332 etnaviv_iommuv2_restore(gpu
);
335 int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu
*gpu
, dma_addr_t paddr
,
336 struct drm_mm_node
*vram_node
, size_t size
,
339 struct etnaviv_iommu
*mmu
= gpu
->mmu
;
341 if (mmu
->version
== ETNAVIV_IOMMU_V1
) {
342 *iova
= paddr
- gpu
->memory_base
;
347 mutex_lock(&mmu
->lock
);
348 ret
= etnaviv_iommu_find_iova(mmu
, vram_node
, size
);
350 mutex_unlock(&mmu
->lock
);
353 ret
= etnaviv_domain_map(mmu
->domain
, vram_node
->start
, paddr
,
354 size
, ETNAVIV_PROT_READ
);
356 drm_mm_remove_node(vram_node
);
357 mutex_unlock(&mmu
->lock
);
361 mutex_unlock(&mmu
->lock
);
363 *iova
= (u32
)vram_node
->start
;
368 void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu
*gpu
,
369 struct drm_mm_node
*vram_node
, size_t size
,
372 struct etnaviv_iommu
*mmu
= gpu
->mmu
;
374 if (mmu
->version
== ETNAVIV_IOMMU_V2
) {
375 mutex_lock(&mmu
->lock
);
376 etnaviv_domain_unmap(mmu
->domain
, iova
, size
);
377 drm_mm_remove_node(vram_node
);
378 mutex_unlock(&mmu
->lock
);
381 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu
*iommu
)
383 return iommu
->domain
->ops
->dump_size(iommu
->domain
);
386 void etnaviv_iommu_dump(struct etnaviv_iommu
*iommu
, void *buf
)
388 iommu
->domain
->ops
->dump(iommu
->domain
, buf
);