1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
9 #include "common.xml.h"
10 #include "etnaviv_cmdbuf.h"
11 #include "etnaviv_drv.h"
12 #include "etnaviv_gem.h"
13 #include "etnaviv_gpu.h"
14 #include "etnaviv_mmu.h"
16 static void etnaviv_context_unmap(struct etnaviv_iommu_context
*context
,
17 unsigned long iova
, size_t size
)
19 size_t unmapped_page
, unmapped
= 0;
20 size_t pgsize
= SZ_4K
;
22 if (!IS_ALIGNED(iova
| size
, pgsize
)) {
23 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
28 while (unmapped
< size
) {
29 unmapped_page
= context
->global
->ops
->unmap(context
, iova
,
34 iova
+= unmapped_page
;
35 unmapped
+= unmapped_page
;
39 static int etnaviv_context_map(struct etnaviv_iommu_context
*context
,
40 unsigned long iova
, phys_addr_t paddr
,
41 size_t size
, int prot
)
43 unsigned long orig_iova
= iova
;
44 size_t pgsize
= SZ_4K
;
45 size_t orig_size
= size
;
48 if (!IS_ALIGNED(iova
| paddr
| size
, pgsize
)) {
49 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
50 iova
, &paddr
, size
, pgsize
);
55 ret
= context
->global
->ops
->map(context
, iova
, paddr
, pgsize
,
65 /* unroll mapping in case something went wrong */
67 etnaviv_context_unmap(context
, orig_iova
, orig_size
- size
);
72 static int etnaviv_iommu_map(struct etnaviv_iommu_context
*context
, u32 iova
,
73 struct sg_table
*sgt
, unsigned len
, int prot
)
74 { struct scatterlist
*sg
;
75 unsigned int da
= iova
;
82 for_each_sgtable_dma_sg(sgt
, sg
, i
) {
83 u32 pa
= sg_dma_address(sg
) - sg
->offset
;
84 size_t bytes
= sg_dma_len(sg
) + sg
->offset
;
86 VERB("map[%d]: %08x %08x(%zx)", i
, iova
, pa
, bytes
);
88 ret
= etnaviv_context_map(context
, da
, pa
, bytes
, prot
);
98 etnaviv_context_unmap(context
, iova
, da
- iova
);
102 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context
*context
, u32 iova
,
103 struct sg_table
*sgt
, unsigned len
)
105 struct scatterlist
*sg
;
106 unsigned int da
= iova
;
109 for_each_sgtable_dma_sg(sgt
, sg
, i
) {
110 size_t bytes
= sg_dma_len(sg
) + sg
->offset
;
112 etnaviv_context_unmap(context
, da
, bytes
);
114 VERB("unmap[%d]: %08x(%zx)", i
, iova
, bytes
);
116 BUG_ON(!PAGE_ALIGNED(bytes
));
122 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context
*context
,
123 struct etnaviv_vram_mapping
*mapping
)
125 struct etnaviv_gem_object
*etnaviv_obj
= mapping
->object
;
127 lockdep_assert_held(&context
->lock
);
129 etnaviv_iommu_unmap(context
, mapping
->vram_node
.start
,
130 etnaviv_obj
->sgt
, etnaviv_obj
->base
.size
);
131 drm_mm_remove_node(&mapping
->vram_node
);
134 static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context
*context
,
135 struct drm_mm_node
*node
, size_t size
)
137 struct etnaviv_vram_mapping
*free
= NULL
;
138 enum drm_mm_insert_mode mode
= DRM_MM_INSERT_LOW
;
141 lockdep_assert_held(&context
->lock
);
144 struct etnaviv_vram_mapping
*m
, *n
;
145 struct drm_mm_scan scan
;
146 struct list_head list
;
149 ret
= drm_mm_insert_node_in_range(&context
->mm
, node
,
150 size
, 0, 0, 0, U64_MAX
, mode
);
154 /* Try to retire some entries */
155 drm_mm_scan_init(&scan
, &context
->mm
, size
, 0, 0, mode
);
158 INIT_LIST_HEAD(&list
);
159 list_for_each_entry(free
, &context
->mappings
, mmu_node
) {
160 /* If this vram node has not been used, skip this. */
161 if (!free
->vram_node
.mm
)
165 * If the iova is pinned, then it's in-use,
166 * so we must keep its mapping.
171 list_add(&free
->scan_node
, &list
);
172 if (drm_mm_scan_add_block(&scan
, &free
->vram_node
)) {
179 /* Nothing found, clean up and fail */
180 list_for_each_entry_safe(m
, n
, &list
, scan_node
)
181 BUG_ON(drm_mm_scan_remove_block(&scan
, &m
->vram_node
));
186 * drm_mm does not allow any other operations while
187 * scanning, so we have to remove all blocks first.
188 * If drm_mm_scan_remove_block() returns false, we
189 * can leave the block pinned.
191 list_for_each_entry_safe(m
, n
, &list
, scan_node
)
192 if (!drm_mm_scan_remove_block(&scan
, &m
->vram_node
))
193 list_del_init(&m
->scan_node
);
196 * Unmap the blocks which need to be reaped from the MMU.
197 * Clear the mmu pointer to prevent the mapping_get finding
200 list_for_each_entry_safe(m
, n
, &list
, scan_node
) {
201 etnaviv_iommu_remove_mapping(context
, m
);
203 list_del_init(&m
->mmu_node
);
204 list_del_init(&m
->scan_node
);
207 mode
= DRM_MM_INSERT_EVICT
;
210 * We removed enough mappings so that the new allocation will
211 * succeed, retry the allocation one more time.
218 static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context
*context
,
219 struct drm_mm_node
*node
, size_t size
, u64 va
)
221 lockdep_assert_held(&context
->lock
);
223 return drm_mm_insert_node_in_range(&context
->mm
, node
, size
, 0, 0, va
,
224 va
+ size
, DRM_MM_INSERT_LOWEST
);
227 int etnaviv_iommu_map_gem(struct etnaviv_iommu_context
*context
,
228 struct etnaviv_gem_object
*etnaviv_obj
, u32 memory_base
,
229 struct etnaviv_vram_mapping
*mapping
, u64 va
)
231 struct sg_table
*sgt
= etnaviv_obj
->sgt
;
232 struct drm_mm_node
*node
;
235 lockdep_assert_held(&etnaviv_obj
->lock
);
237 mutex_lock(&context
->lock
);
239 /* v1 MMU can optimize single entry (contiguous) scatterlists */
240 if (context
->global
->version
== ETNAVIV_IOMMU_V1
&&
241 sgt
->nents
== 1 && !(etnaviv_obj
->flags
& ETNA_BO_FORCE_MMU
)) {
244 iova
= sg_dma_address(sgt
->sgl
) - memory_base
;
245 if (iova
< 0x80000000 - sg_dma_len(sgt
->sgl
)) {
246 mapping
->iova
= iova
;
247 list_add_tail(&mapping
->mmu_node
, &context
->mappings
);
253 node
= &mapping
->vram_node
;
256 ret
= etnaviv_iommu_insert_exact(context
, node
,
257 etnaviv_obj
->base
.size
, va
);
259 ret
= etnaviv_iommu_find_iova(context
, node
,
260 etnaviv_obj
->base
.size
);
264 mapping
->iova
= node
->start
;
265 ret
= etnaviv_iommu_map(context
, node
->start
, sgt
, etnaviv_obj
->base
.size
,
266 ETNAVIV_PROT_READ
| ETNAVIV_PROT_WRITE
);
269 drm_mm_remove_node(node
);
273 list_add_tail(&mapping
->mmu_node
, &context
->mappings
);
274 context
->flush_seq
++;
276 mutex_unlock(&context
->lock
);
281 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context
*context
,
282 struct etnaviv_vram_mapping
*mapping
)
284 WARN_ON(mapping
->use
);
286 mutex_lock(&context
->lock
);
288 /* If the vram node is on the mm, unmap and remove the node */
289 if (mapping
->vram_node
.mm
== &context
->mm
)
290 etnaviv_iommu_remove_mapping(context
, mapping
);
292 list_del(&mapping
->mmu_node
);
293 context
->flush_seq
++;
294 mutex_unlock(&context
->lock
);
297 static void etnaviv_iommu_context_free(struct kref
*kref
)
299 struct etnaviv_iommu_context
*context
=
300 container_of(kref
, struct etnaviv_iommu_context
, refcount
);
302 etnaviv_cmdbuf_suballoc_unmap(context
, &context
->cmdbuf_mapping
);
304 context
->global
->ops
->free(context
);
306 void etnaviv_iommu_context_put(struct etnaviv_iommu_context
*context
)
308 kref_put(&context
->refcount
, etnaviv_iommu_context_free
);
311 struct etnaviv_iommu_context
*
312 etnaviv_iommu_context_init(struct etnaviv_iommu_global
*global
,
313 struct etnaviv_cmdbuf_suballoc
*suballoc
)
315 struct etnaviv_iommu_context
*ctx
;
318 if (global
->version
== ETNAVIV_IOMMU_V1
)
319 ctx
= etnaviv_iommuv1_context_alloc(global
);
321 ctx
= etnaviv_iommuv2_context_alloc(global
);
326 ret
= etnaviv_cmdbuf_suballoc_map(suballoc
, ctx
, &ctx
->cmdbuf_mapping
,
327 global
->memory_base
);
331 if (global
->version
== ETNAVIV_IOMMU_V1
&&
332 ctx
->cmdbuf_mapping
.iova
> 0x80000000) {
334 "command buffer outside valid memory window\n");
341 etnaviv_cmdbuf_suballoc_unmap(ctx
, &ctx
->cmdbuf_mapping
);
343 global
->ops
->free(ctx
);
347 void etnaviv_iommu_restore(struct etnaviv_gpu
*gpu
,
348 struct etnaviv_iommu_context
*context
)
350 context
->global
->ops
->restore(gpu
, context
);
353 int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context
*context
,
354 struct etnaviv_vram_mapping
*mapping
,
355 u32 memory_base
, dma_addr_t paddr
,
358 mutex_lock(&context
->lock
);
360 if (mapping
->use
> 0) {
362 mutex_unlock(&context
->lock
);
367 * For MMUv1 we don't add the suballoc region to the pagetables, as
368 * those GPUs can only work with cmdbufs accessed through the linear
369 * window. Instead we manufacture a mapping to make it look uniform
370 * to the upper layers.
372 if (context
->global
->version
== ETNAVIV_IOMMU_V1
) {
373 mapping
->iova
= paddr
- memory_base
;
375 struct drm_mm_node
*node
= &mapping
->vram_node
;
378 ret
= etnaviv_iommu_find_iova(context
, node
, size
);
380 mutex_unlock(&context
->lock
);
384 mapping
->iova
= node
->start
;
385 ret
= etnaviv_context_map(context
, node
->start
, paddr
, size
,
388 drm_mm_remove_node(node
);
389 mutex_unlock(&context
->lock
);
393 context
->flush_seq
++;
396 list_add_tail(&mapping
->mmu_node
, &context
->mappings
);
399 mutex_unlock(&context
->lock
);
404 void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context
*context
,
405 struct etnaviv_vram_mapping
*mapping
)
407 struct drm_mm_node
*node
= &mapping
->vram_node
;
409 mutex_lock(&context
->lock
);
412 if (mapping
->use
> 0 || context
->global
->version
== ETNAVIV_IOMMU_V1
) {
413 mutex_unlock(&context
->lock
);
417 etnaviv_context_unmap(context
, node
->start
, node
->size
);
418 drm_mm_remove_node(node
);
419 mutex_unlock(&context
->lock
);
422 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context
*context
)
424 return context
->global
->ops
->dump_size(context
);
427 void etnaviv_iommu_dump(struct etnaviv_iommu_context
*context
, void *buf
)
429 context
->global
->ops
->dump(context
, buf
);
432 int etnaviv_iommu_global_init(struct etnaviv_gpu
*gpu
)
434 enum etnaviv_iommu_version version
= ETNAVIV_IOMMU_V1
;
435 struct etnaviv_drm_private
*priv
= gpu
->drm
->dev_private
;
436 struct etnaviv_iommu_global
*global
;
437 struct device
*dev
= gpu
->drm
->dev
;
439 if (gpu
->identity
.minor_features1
& chipMinorFeatures1_MMU_VERSION
)
440 version
= ETNAVIV_IOMMU_V2
;
442 if (priv
->mmu_global
) {
443 if (priv
->mmu_global
->version
!= version
) {
445 "MMU version doesn't match global version\n");
449 priv
->mmu_global
->use
++;
453 global
= kzalloc(sizeof(*global
), GFP_KERNEL
);
457 global
->bad_page_cpu
= dma_alloc_wc(dev
, SZ_4K
, &global
->bad_page_dma
,
459 if (!global
->bad_page_cpu
)
462 memset32(global
->bad_page_cpu
, 0xdead55aa, SZ_4K
/ sizeof(u32
));
464 if (version
== ETNAVIV_IOMMU_V2
) {
465 global
->v2
.pta_cpu
= dma_alloc_wc(dev
, ETNAVIV_PTA_SIZE
,
466 &global
->v2
.pta_dma
, GFP_KERNEL
);
467 if (!global
->v2
.pta_cpu
)
472 global
->version
= version
;
474 mutex_init(&global
->lock
);
476 if (version
== ETNAVIV_IOMMU_V1
)
477 global
->ops
= &etnaviv_iommuv1_ops
;
479 global
->ops
= &etnaviv_iommuv2_ops
;
481 priv
->mmu_global
= global
;
486 dma_free_wc(dev
, SZ_4K
, global
->bad_page_cpu
, global
->bad_page_dma
);
493 void etnaviv_iommu_global_fini(struct etnaviv_gpu
*gpu
)
495 struct etnaviv_drm_private
*priv
= gpu
->drm
->dev_private
;
496 struct etnaviv_iommu_global
*global
= priv
->mmu_global
;
498 if (--global
->use
> 0)
501 if (global
->v2
.pta_cpu
)
502 dma_free_wc(global
->dev
, ETNAVIV_PTA_SIZE
,
503 global
->v2
.pta_cpu
, global
->v2
.pta_dma
);
505 if (global
->bad_page_cpu
)
506 dma_free_wc(global
->dev
, SZ_4K
,
507 global
->bad_page_cpu
, global
->bad_page_dma
);
509 mutex_destroy(&global
->lock
);
512 priv
->mmu_global
= NULL
;