1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include <linux/adreno-smmu-priv.h>
8 #include <linux/io-pgtable.h>
14 struct iommu_domain
*domain
;
18 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
20 struct msm_iommu_pagetable
{
22 struct msm_mmu
*parent
;
23 struct io_pgtable_ops
*pgtbl_ops
;
27 static struct msm_iommu_pagetable
*to_pagetable(struct msm_mmu
*mmu
)
29 return container_of(mmu
, struct msm_iommu_pagetable
, base
);
32 static int msm_iommu_pagetable_unmap(struct msm_mmu
*mmu
, u64 iova
,
35 struct msm_iommu_pagetable
*pagetable
= to_pagetable(mmu
);
36 struct io_pgtable_ops
*ops
= pagetable
->pgtbl_ops
;
39 /* Unmap the block one page at a time */
41 unmapped
+= ops
->unmap(ops
, iova
, 4096, NULL
);
46 iommu_flush_iotlb_all(to_msm_iommu(pagetable
->parent
)->domain
);
48 return (unmapped
== size
) ? 0 : -EINVAL
;
51 static int msm_iommu_pagetable_map(struct msm_mmu
*mmu
, u64 iova
,
52 struct sg_table
*sgt
, size_t len
, int prot
)
54 struct msm_iommu_pagetable
*pagetable
= to_pagetable(mmu
);
55 struct io_pgtable_ops
*ops
= pagetable
->pgtbl_ops
;
56 struct scatterlist
*sg
;
61 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
62 size_t size
= sg
->length
;
63 phys_addr_t phys
= sg_phys(sg
);
65 /* Map the block one page at a time */
67 if (ops
->map(ops
, addr
, phys
, 4096, prot
, GFP_KERNEL
)) {
68 msm_iommu_pagetable_unmap(mmu
, iova
, mapped
);
82 static void msm_iommu_pagetable_destroy(struct msm_mmu
*mmu
)
84 struct msm_iommu_pagetable
*pagetable
= to_pagetable(mmu
);
85 struct msm_iommu
*iommu
= to_msm_iommu(pagetable
->parent
);
86 struct adreno_smmu_priv
*adreno_smmu
=
87 dev_get_drvdata(pagetable
->parent
->dev
);
90 * If this is the last attached pagetable for the parent,
91 * disable TTBR0 in the arm-smmu driver
93 if (atomic_dec_return(&iommu
->pagetables
) == 0)
94 adreno_smmu
->set_ttbr0_cfg(adreno_smmu
->cookie
, NULL
);
96 free_io_pgtable_ops(pagetable
->pgtbl_ops
);
100 int msm_iommu_pagetable_params(struct msm_mmu
*mmu
,
101 phys_addr_t
*ttbr
, int *asid
)
103 struct msm_iommu_pagetable
*pagetable
;
105 if (mmu
->type
!= MSM_MMU_IOMMU_PAGETABLE
)
108 pagetable
= to_pagetable(mmu
);
111 *ttbr
= pagetable
->ttbr
;
114 *asid
= pagetable
->asid
;
119 static const struct msm_mmu_funcs pagetable_funcs
= {
120 .map
= msm_iommu_pagetable_map
,
121 .unmap
= msm_iommu_pagetable_unmap
,
122 .destroy
= msm_iommu_pagetable_destroy
,
125 static void msm_iommu_tlb_flush_all(void *cookie
)
129 static void msm_iommu_tlb_flush_walk(unsigned long iova
, size_t size
,
130 size_t granule
, void *cookie
)
134 static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather
*gather
,
135 unsigned long iova
, size_t granule
, void *cookie
)
139 static const struct iommu_flush_ops null_tlb_ops
= {
140 .tlb_flush_all
= msm_iommu_tlb_flush_all
,
141 .tlb_flush_walk
= msm_iommu_tlb_flush_walk
,
142 .tlb_add_page
= msm_iommu_tlb_add_page
,
145 struct msm_mmu
*msm_iommu_pagetable_create(struct msm_mmu
*parent
)
147 struct adreno_smmu_priv
*adreno_smmu
= dev_get_drvdata(parent
->dev
);
148 struct msm_iommu
*iommu
= to_msm_iommu(parent
);
149 struct msm_iommu_pagetable
*pagetable
;
150 const struct io_pgtable_cfg
*ttbr1_cfg
= NULL
;
151 struct io_pgtable_cfg ttbr0_cfg
;
154 /* Get the pagetable configuration from the domain */
155 if (adreno_smmu
->cookie
)
156 ttbr1_cfg
= adreno_smmu
->get_ttbr1_cfg(adreno_smmu
->cookie
);
158 return ERR_PTR(-ENODEV
);
160 pagetable
= kzalloc(sizeof(*pagetable
), GFP_KERNEL
);
162 return ERR_PTR(-ENOMEM
);
164 msm_mmu_init(&pagetable
->base
, parent
->dev
, &pagetable_funcs
,
165 MSM_MMU_IOMMU_PAGETABLE
);
167 /* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
168 ttbr0_cfg
= *ttbr1_cfg
;
170 /* The incoming cfg will have the TTBR1 quirk enabled */
171 ttbr0_cfg
.quirks
&= ~IO_PGTABLE_QUIRK_ARM_TTBR1
;
172 ttbr0_cfg
.tlb
= &null_tlb_ops
;
174 pagetable
->pgtbl_ops
= alloc_io_pgtable_ops(ARM_64_LPAE_S1
,
175 &ttbr0_cfg
, iommu
->domain
);
177 if (!pagetable
->pgtbl_ops
) {
179 return ERR_PTR(-ENOMEM
);
183 * If this is the first pagetable that we've allocated, send it back to
184 * the arm-smmu driver as a trigger to set up TTBR0
186 if (atomic_inc_return(&iommu
->pagetables
) == 1) {
187 ret
= adreno_smmu
->set_ttbr0_cfg(adreno_smmu
->cookie
, &ttbr0_cfg
);
189 free_io_pgtable_ops(pagetable
->pgtbl_ops
);
195 /* Needed later for TLB flush */
196 pagetable
->parent
= parent
;
197 pagetable
->ttbr
= ttbr0_cfg
.arm_lpae_s1_cfg
.ttbr
;
200 * TODO we would like each set of page tables to have a unique ASID
201 * to optimize TLB invalidation. But iommu_flush_iotlb_all() will
202 * end up flushing the ASID used for TTBR1 pagetables, which is not
203 * what we want. So for now just use the same ASID as TTBR1.
207 return &pagetable
->base
;
210 static int msm_fault_handler(struct iommu_domain
*domain
, struct device
*dev
,
211 unsigned long iova
, int flags
, void *arg
)
213 struct msm_iommu
*iommu
= arg
;
214 if (iommu
->base
.handler
)
215 return iommu
->base
.handler(iommu
->base
.arg
, iova
, flags
);
216 pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova
, flags
);
220 static void msm_iommu_detach(struct msm_mmu
*mmu
)
222 struct msm_iommu
*iommu
= to_msm_iommu(mmu
);
224 iommu_detach_device(iommu
->domain
, mmu
->dev
);
227 static int msm_iommu_map(struct msm_mmu
*mmu
, uint64_t iova
,
228 struct sg_table
*sgt
, size_t len
, int prot
)
230 struct msm_iommu
*iommu
= to_msm_iommu(mmu
);
233 /* The arm-smmu driver expects the addresses to be sign extended */
234 if (iova
& BIT_ULL(48))
235 iova
|= GENMASK_ULL(63, 49);
237 ret
= iommu_map_sgtable(iommu
->domain
, iova
, sgt
, prot
);
240 return (ret
== len
) ? 0 : -EINVAL
;
243 static int msm_iommu_unmap(struct msm_mmu
*mmu
, uint64_t iova
, size_t len
)
245 struct msm_iommu
*iommu
= to_msm_iommu(mmu
);
247 if (iova
& BIT_ULL(48))
248 iova
|= GENMASK_ULL(63, 49);
250 iommu_unmap(iommu
->domain
, iova
, len
);
255 static void msm_iommu_destroy(struct msm_mmu
*mmu
)
257 struct msm_iommu
*iommu
= to_msm_iommu(mmu
);
258 iommu_domain_free(iommu
->domain
);
262 static const struct msm_mmu_funcs funcs
= {
263 .detach
= msm_iommu_detach
,
264 .map
= msm_iommu_map
,
265 .unmap
= msm_iommu_unmap
,
266 .destroy
= msm_iommu_destroy
,
269 struct msm_mmu
*msm_iommu_new(struct device
*dev
, struct iommu_domain
*domain
)
271 struct msm_iommu
*iommu
;
275 return ERR_PTR(-ENODEV
);
277 iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
279 return ERR_PTR(-ENOMEM
);
281 iommu
->domain
= domain
;
282 msm_mmu_init(&iommu
->base
, dev
, &funcs
, MSM_MMU_IOMMU
);
283 iommu_set_fault_handler(domain
, msm_fault_handler
, iommu
);
285 atomic_set(&iommu
->pagetables
, 0);
287 ret
= iommu_attach_device(iommu
->domain
, dev
);