1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2014-2018 Etnaviv Project
6 #include <linux/bitops.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/platform_device.h>
9 #include <linux/sizes.h>
10 #include <linux/slab.h>
12 #include "etnaviv_gpu.h"
13 #include "etnaviv_mmu.h"
14 #include "state_hi.xml.h"
17 #define PT_ENTRIES (PT_SIZE / sizeof(u32))
19 #define GPU_MEM_START 0x80000000
21 struct etnaviv_iommuv1_context
{
22 struct etnaviv_iommu_context base
;
24 dma_addr_t pgtable_dma
;
27 static struct etnaviv_iommuv1_context
*
28 to_v1_context(struct etnaviv_iommu_context
*context
)
30 return container_of(context
, struct etnaviv_iommuv1_context
, base
);
33 static void etnaviv_iommuv1_free(struct etnaviv_iommu_context
*context
)
35 struct etnaviv_iommuv1_context
*v1_context
= to_v1_context(context
);
37 drm_mm_takedown(&context
->mm
);
39 dma_free_wc(context
->global
->dev
, PT_SIZE
, v1_context
->pgtable_cpu
,
40 v1_context
->pgtable_dma
);
42 context
->global
->v1
.shared_context
= NULL
;
47 static int etnaviv_iommuv1_map(struct etnaviv_iommu_context
*context
,
48 unsigned long iova
, phys_addr_t paddr
,
49 size_t size
, int prot
)
51 struct etnaviv_iommuv1_context
*v1_context
= to_v1_context(context
);
52 unsigned int index
= (iova
- GPU_MEM_START
) / SZ_4K
;
57 v1_context
->pgtable_cpu
[index
] = paddr
;
62 static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context
*context
,
63 unsigned long iova
, size_t size
)
65 struct etnaviv_iommuv1_context
*v1_context
= to_v1_context(context
);
66 unsigned int index
= (iova
- GPU_MEM_START
) / SZ_4K
;
71 v1_context
->pgtable_cpu
[index
] = context
->global
->bad_page_dma
;
76 static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context
*context
)
81 static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context
*context
,
84 struct etnaviv_iommuv1_context
*v1_context
= to_v1_context(context
);
86 memcpy(buf
, v1_context
->pgtable_cpu
, PT_SIZE
);
89 static void etnaviv_iommuv1_restore(struct etnaviv_gpu
*gpu
,
90 struct etnaviv_iommu_context
*context
)
92 struct etnaviv_iommuv1_context
*v1_context
= to_v1_context(context
);
95 /* set base addresses */
96 gpu_write(gpu
, VIVS_MC_MEMORY_BASE_ADDR_RA
, context
->global
->memory_base
);
97 gpu_write(gpu
, VIVS_MC_MEMORY_BASE_ADDR_FE
, context
->global
->memory_base
);
98 gpu_write(gpu
, VIVS_MC_MEMORY_BASE_ADDR_TX
, context
->global
->memory_base
);
99 gpu_write(gpu
, VIVS_MC_MEMORY_BASE_ADDR_PEZ
, context
->global
->memory_base
);
100 gpu_write(gpu
, VIVS_MC_MEMORY_BASE_ADDR_PE
, context
->global
->memory_base
);
102 /* set page table address in MC */
103 pgtable
= (u32
)v1_context
->pgtable_dma
;
105 gpu_write(gpu
, VIVS_MC_MMU_FE_PAGE_TABLE
, pgtable
);
106 gpu_write(gpu
, VIVS_MC_MMU_TX_PAGE_TABLE
, pgtable
);
107 gpu_write(gpu
, VIVS_MC_MMU_PE_PAGE_TABLE
, pgtable
);
108 gpu_write(gpu
, VIVS_MC_MMU_PEZ_PAGE_TABLE
, pgtable
);
109 gpu_write(gpu
, VIVS_MC_MMU_RA_PAGE_TABLE
, pgtable
);
113 const struct etnaviv_iommu_ops etnaviv_iommuv1_ops
= {
114 .free
= etnaviv_iommuv1_free
,
115 .map
= etnaviv_iommuv1_map
,
116 .unmap
= etnaviv_iommuv1_unmap
,
117 .dump_size
= etnaviv_iommuv1_dump_size
,
118 .dump
= etnaviv_iommuv1_dump
,
119 .restore
= etnaviv_iommuv1_restore
,
122 struct etnaviv_iommu_context
*
123 etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global
*global
)
125 struct etnaviv_iommuv1_context
*v1_context
;
126 struct etnaviv_iommu_context
*context
;
128 mutex_lock(&global
->lock
);
131 * MMUv1 does not support switching between different contexts without
132 * a stop the world operation, so we only support a single shared
133 * context with this version.
135 if (global
->v1
.shared_context
) {
136 context
= global
->v1
.shared_context
;
137 etnaviv_iommu_context_get(context
);
138 mutex_unlock(&global
->lock
);
142 v1_context
= kzalloc(sizeof(*v1_context
), GFP_KERNEL
);
144 mutex_unlock(&global
->lock
);
148 v1_context
->pgtable_cpu
= dma_alloc_wc(global
->dev
, PT_SIZE
,
149 &v1_context
->pgtable_dma
,
151 if (!v1_context
->pgtable_cpu
)
154 memset32(v1_context
->pgtable_cpu
, global
->bad_page_dma
, PT_ENTRIES
);
156 context
= &v1_context
->base
;
157 context
->global
= global
;
158 kref_init(&context
->refcount
);
159 mutex_init(&context
->lock
);
160 INIT_LIST_HEAD(&context
->mappings
);
161 drm_mm_init(&context
->mm
, GPU_MEM_START
, PT_ENTRIES
* SZ_4K
);
162 context
->global
->v1
.shared_context
= context
;
164 mutex_unlock(&global
->lock
);
169 mutex_unlock(&global
->lock
);