1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
9 #include "common.xml.h"
10 #include "etnaviv_cmdbuf.h"
11 #include "etnaviv_drv.h"
12 #include "etnaviv_gem.h"
13 #include "etnaviv_gpu.h"
14 #include "etnaviv_mmu.h"
16 static void etnaviv_context_unmap(struct etnaviv_iommu_context
*context
,
17 unsigned long iova
, size_t size
)
19 size_t unmapped_page
, unmapped
= 0;
20 size_t pgsize
= SZ_4K
;
22 if (!IS_ALIGNED(iova
| size
, pgsize
)) {
23 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
28 while (unmapped
< size
) {
29 unmapped_page
= context
->global
->ops
->unmap(context
, iova
,
34 iova
+= unmapped_page
;
35 unmapped
+= unmapped_page
;
39 static int etnaviv_context_map(struct etnaviv_iommu_context
*context
,
40 unsigned long iova
, phys_addr_t paddr
,
41 size_t size
, int prot
)
43 unsigned long orig_iova
= iova
;
44 size_t pgsize
= SZ_4K
;
45 size_t orig_size
= size
;
48 if (!IS_ALIGNED(iova
| paddr
| size
, pgsize
)) {
49 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
50 iova
, &paddr
, size
, pgsize
);
55 ret
= context
->global
->ops
->map(context
, iova
, paddr
, pgsize
,
65 /* unroll mapping in case something went wrong */
67 etnaviv_context_unmap(context
, orig_iova
, orig_size
- size
);
72 static int etnaviv_iommu_map(struct etnaviv_iommu_context
*context
, u32 iova
,
73 struct sg_table
*sgt
, unsigned len
, int prot
)
74 { struct scatterlist
*sg
;
75 unsigned int da
= iova
;
82 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
83 u32 pa
= sg_dma_address(sg
) - sg
->offset
;
84 size_t bytes
= sg_dma_len(sg
) + sg
->offset
;
86 VERB("map[%d]: %08x %08x(%zx)", i
, iova
, pa
, bytes
);
88 ret
= etnaviv_context_map(context
, da
, pa
, bytes
, prot
);
100 for_each_sg(sgt
->sgl
, sg
, i
, j
) {
101 size_t bytes
= sg_dma_len(sg
) + sg
->offset
;
103 etnaviv_context_unmap(context
, da
, bytes
);
109 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context
*context
, u32 iova
,
110 struct sg_table
*sgt
, unsigned len
)
112 struct scatterlist
*sg
;
113 unsigned int da
= iova
;
116 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
117 size_t bytes
= sg_dma_len(sg
) + sg
->offset
;
119 etnaviv_context_unmap(context
, da
, bytes
);
121 VERB("unmap[%d]: %08x(%zx)", i
, iova
, bytes
);
123 BUG_ON(!PAGE_ALIGNED(bytes
));
129 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context
*context
,
130 struct etnaviv_vram_mapping
*mapping
)
132 struct etnaviv_gem_object
*etnaviv_obj
= mapping
->object
;
134 etnaviv_iommu_unmap(context
, mapping
->vram_node
.start
,
135 etnaviv_obj
->sgt
, etnaviv_obj
->base
.size
);
136 drm_mm_remove_node(&mapping
->vram_node
);
139 static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context
*context
,
140 struct drm_mm_node
*node
, size_t size
)
142 struct etnaviv_vram_mapping
*free
= NULL
;
143 enum drm_mm_insert_mode mode
= DRM_MM_INSERT_LOW
;
146 lockdep_assert_held(&context
->lock
);
149 struct etnaviv_vram_mapping
*m
, *n
;
150 struct drm_mm_scan scan
;
151 struct list_head list
;
154 ret
= drm_mm_insert_node_in_range(&context
->mm
, node
,
155 size
, 0, 0, 0, U64_MAX
, mode
);
159 /* Try to retire some entries */
160 drm_mm_scan_init(&scan
, &context
->mm
, size
, 0, 0, mode
);
163 INIT_LIST_HEAD(&list
);
164 list_for_each_entry(free
, &context
->mappings
, mmu_node
) {
165 /* If this vram node has not been used, skip this. */
166 if (!free
->vram_node
.mm
)
170 * If the iova is pinned, then it's in-use,
171 * so we must keep its mapping.
176 list_add(&free
->scan_node
, &list
);
177 if (drm_mm_scan_add_block(&scan
, &free
->vram_node
)) {
184 /* Nothing found, clean up and fail */
185 list_for_each_entry_safe(m
, n
, &list
, scan_node
)
186 BUG_ON(drm_mm_scan_remove_block(&scan
, &m
->vram_node
));
191 * drm_mm does not allow any other operations while
192 * scanning, so we have to remove all blocks first.
193 * If drm_mm_scan_remove_block() returns false, we
194 * can leave the block pinned.
196 list_for_each_entry_safe(m
, n
, &list
, scan_node
)
197 if (!drm_mm_scan_remove_block(&scan
, &m
->vram_node
))
198 list_del_init(&m
->scan_node
);
201 * Unmap the blocks which need to be reaped from the MMU.
202 * Clear the mmu pointer to prevent the mapping_get finding
205 list_for_each_entry_safe(m
, n
, &list
, scan_node
) {
206 etnaviv_iommu_remove_mapping(context
, m
);
208 list_del_init(&m
->mmu_node
);
209 list_del_init(&m
->scan_node
);
212 mode
= DRM_MM_INSERT_EVICT
;
215 * We removed enough mappings so that the new allocation will
216 * succeed, retry the allocation one more time.
223 static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context
*context
,
224 struct drm_mm_node
*node
, size_t size
, u64 va
)
226 return drm_mm_insert_node_in_range(&context
->mm
, node
, size
, 0, 0, va
,
227 va
+ size
, DRM_MM_INSERT_LOWEST
);
230 int etnaviv_iommu_map_gem(struct etnaviv_iommu_context
*context
,
231 struct etnaviv_gem_object
*etnaviv_obj
, u32 memory_base
,
232 struct etnaviv_vram_mapping
*mapping
, u64 va
)
234 struct sg_table
*sgt
= etnaviv_obj
->sgt
;
235 struct drm_mm_node
*node
;
238 lockdep_assert_held(&etnaviv_obj
->lock
);
240 mutex_lock(&context
->lock
);
242 /* v1 MMU can optimize single entry (contiguous) scatterlists */
243 if (context
->global
->version
== ETNAVIV_IOMMU_V1
&&
244 sgt
->nents
== 1 && !(etnaviv_obj
->flags
& ETNA_BO_FORCE_MMU
)) {
247 iova
= sg_dma_address(sgt
->sgl
) - memory_base
;
248 if (iova
< 0x80000000 - sg_dma_len(sgt
->sgl
)) {
249 mapping
->iova
= iova
;
250 list_add_tail(&mapping
->mmu_node
, &context
->mappings
);
256 node
= &mapping
->vram_node
;
259 ret
= etnaviv_iommu_insert_exact(context
, node
,
260 etnaviv_obj
->base
.size
, va
);
262 ret
= etnaviv_iommu_find_iova(context
, node
,
263 etnaviv_obj
->base
.size
);
267 mapping
->iova
= node
->start
;
268 ret
= etnaviv_iommu_map(context
, node
->start
, sgt
, etnaviv_obj
->base
.size
,
269 ETNAVIV_PROT_READ
| ETNAVIV_PROT_WRITE
);
272 drm_mm_remove_node(node
);
276 list_add_tail(&mapping
->mmu_node
, &context
->mappings
);
277 context
->flush_seq
++;
279 mutex_unlock(&context
->lock
);
284 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context
*context
,
285 struct etnaviv_vram_mapping
*mapping
)
287 WARN_ON(mapping
->use
);
289 mutex_lock(&context
->lock
);
291 /* If the vram node is on the mm, unmap and remove the node */
292 if (mapping
->vram_node
.mm
== &context
->mm
)
293 etnaviv_iommu_remove_mapping(context
, mapping
);
295 list_del(&mapping
->mmu_node
);
296 context
->flush_seq
++;
297 mutex_unlock(&context
->lock
);
300 static void etnaviv_iommu_context_free(struct kref
*kref
)
302 struct etnaviv_iommu_context
*context
=
303 container_of(kref
, struct etnaviv_iommu_context
, refcount
);
305 etnaviv_cmdbuf_suballoc_unmap(context
, &context
->cmdbuf_mapping
);
307 context
->global
->ops
->free(context
);
309 void etnaviv_iommu_context_put(struct etnaviv_iommu_context
*context
)
311 kref_put(&context
->refcount
, etnaviv_iommu_context_free
);
314 struct etnaviv_iommu_context
*
315 etnaviv_iommu_context_init(struct etnaviv_iommu_global
*global
,
316 struct etnaviv_cmdbuf_suballoc
*suballoc
)
318 struct etnaviv_iommu_context
*ctx
;
321 if (global
->version
== ETNAVIV_IOMMU_V1
)
322 ctx
= etnaviv_iommuv1_context_alloc(global
);
324 ctx
= etnaviv_iommuv2_context_alloc(global
);
329 ret
= etnaviv_cmdbuf_suballoc_map(suballoc
, ctx
, &ctx
->cmdbuf_mapping
,
330 global
->memory_base
);
334 if (global
->version
== ETNAVIV_IOMMU_V1
&&
335 ctx
->cmdbuf_mapping
.iova
> 0x80000000) {
337 "command buffer outside valid memory window\n");
344 etnaviv_cmdbuf_suballoc_unmap(ctx
, &ctx
->cmdbuf_mapping
);
346 global
->ops
->free(ctx
);
350 void etnaviv_iommu_restore(struct etnaviv_gpu
*gpu
,
351 struct etnaviv_iommu_context
*context
)
353 context
->global
->ops
->restore(gpu
, context
);
356 int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context
*context
,
357 struct etnaviv_vram_mapping
*mapping
,
358 u32 memory_base
, dma_addr_t paddr
,
361 mutex_lock(&context
->lock
);
363 if (mapping
->use
> 0) {
365 mutex_unlock(&context
->lock
);
370 * For MMUv1 we don't add the suballoc region to the pagetables, as
371 * those GPUs can only work with cmdbufs accessed through the linear
372 * window. Instead we manufacture a mapping to make it look uniform
373 * to the upper layers.
375 if (context
->global
->version
== ETNAVIV_IOMMU_V1
) {
376 mapping
->iova
= paddr
- memory_base
;
378 struct drm_mm_node
*node
= &mapping
->vram_node
;
381 ret
= etnaviv_iommu_find_iova(context
, node
, size
);
383 mutex_unlock(&context
->lock
);
387 mapping
->iova
= node
->start
;
388 ret
= etnaviv_context_map(context
, node
->start
, paddr
, size
,
391 drm_mm_remove_node(node
);
392 mutex_unlock(&context
->lock
);
396 context
->flush_seq
++;
399 list_add_tail(&mapping
->mmu_node
, &context
->mappings
);
402 mutex_unlock(&context
->lock
);
407 void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context
*context
,
408 struct etnaviv_vram_mapping
*mapping
)
410 struct drm_mm_node
*node
= &mapping
->vram_node
;
412 mutex_lock(&context
->lock
);
415 if (mapping
->use
> 0 || context
->global
->version
== ETNAVIV_IOMMU_V1
) {
416 mutex_unlock(&context
->lock
);
420 etnaviv_context_unmap(context
, node
->start
, node
->size
);
421 drm_mm_remove_node(node
);
422 mutex_unlock(&context
->lock
);
425 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context
*context
)
427 return context
->global
->ops
->dump_size(context
);
430 void etnaviv_iommu_dump(struct etnaviv_iommu_context
*context
, void *buf
)
432 context
->global
->ops
->dump(context
, buf
);
435 int etnaviv_iommu_global_init(struct etnaviv_gpu
*gpu
)
437 enum etnaviv_iommu_version version
= ETNAVIV_IOMMU_V1
;
438 struct etnaviv_drm_private
*priv
= gpu
->drm
->dev_private
;
439 struct etnaviv_iommu_global
*global
;
440 struct device
*dev
= gpu
->drm
->dev
;
442 if (gpu
->identity
.minor_features1
& chipMinorFeatures1_MMU_VERSION
)
443 version
= ETNAVIV_IOMMU_V2
;
445 if (priv
->mmu_global
) {
446 if (priv
->mmu_global
->version
!= version
) {
448 "MMU version doesn't match global version\n");
452 priv
->mmu_global
->use
++;
456 global
= kzalloc(sizeof(*global
), GFP_KERNEL
);
460 global
->bad_page_cpu
= dma_alloc_wc(dev
, SZ_4K
, &global
->bad_page_dma
,
462 if (!global
->bad_page_cpu
)
465 memset32(global
->bad_page_cpu
, 0xdead55aa, SZ_4K
/ sizeof(u32
));
467 if (version
== ETNAVIV_IOMMU_V2
) {
468 global
->v2
.pta_cpu
= dma_alloc_wc(dev
, ETNAVIV_PTA_SIZE
,
469 &global
->v2
.pta_dma
, GFP_KERNEL
);
470 if (!global
->v2
.pta_cpu
)
475 global
->version
= version
;
477 mutex_init(&global
->lock
);
479 if (version
== ETNAVIV_IOMMU_V1
)
480 global
->ops
= &etnaviv_iommuv1_ops
;
482 global
->ops
= &etnaviv_iommuv2_ops
;
484 priv
->mmu_global
= global
;
489 dma_free_wc(dev
, SZ_4K
, global
->bad_page_cpu
, global
->bad_page_dma
);
496 void etnaviv_iommu_global_fini(struct etnaviv_gpu
*gpu
)
498 struct etnaviv_drm_private
*priv
= gpu
->drm
->dev_private
;
499 struct etnaviv_iommu_global
*global
= priv
->mmu_global
;
501 if (--global
->use
> 0)
504 if (global
->v2
.pta_cpu
)
505 dma_free_wc(global
->dev
, ETNAVIV_PTA_SIZE
,
506 global
->v2
.pta_cpu
, global
->v2
.pta_dma
);
508 if (global
->bad_page_cpu
)
509 dma_free_wc(global
->dev
, SZ_4K
,
510 global
->bad_page_cpu
, global
->bad_page_dma
);
512 mutex_destroy(&global
->lock
);
515 priv
->mmu_global
= NULL
;