1 // SPDX-License-Identifier: GPL-2.0
3 * Implementation of the IOMMU SVA API for the ARM SMMUv3
7 #include <linux/mmu_context.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/slab.h>
11 #include "arm-smmu-v3.h"
12 #include "../../iommu-sva-lib.h"
13 #include "../../io-pgtable-arm.h"
15 struct arm_smmu_mmu_notifier
{
16 struct mmu_notifier mn
;
17 struct arm_smmu_ctx_desc
*cd
;
20 struct list_head list
;
21 struct arm_smmu_domain
*domain
;
24 #define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
26 struct arm_smmu_bond
{
29 struct arm_smmu_mmu_notifier
*smmu_mn
;
30 struct list_head list
;
34 #define sva_to_bond(handle) \
35 container_of(handle, struct arm_smmu_bond, sva)
37 static DEFINE_MUTEX(sva_lock
);
40 * Check if the CPU ASID is available on the SMMU side. If a private context
41 * descriptor is using it, try to replace it.
43 static struct arm_smmu_ctx_desc
*
44 arm_smmu_share_asid(struct mm_struct
*mm
, u16 asid
)
48 struct arm_smmu_ctx_desc
*cd
;
49 struct arm_smmu_device
*smmu
;
50 struct arm_smmu_domain
*smmu_domain
;
52 cd
= xa_load(&arm_smmu_asid_xa
, asid
);
57 if (WARN_ON(cd
->mm
!= mm
))
58 return ERR_PTR(-EINVAL
);
59 /* All devices bound to this mm use the same cd struct. */
60 refcount_inc(&cd
->refs
);
64 smmu_domain
= container_of(cd
, struct arm_smmu_domain
, s1_cfg
.cd
);
65 smmu
= smmu_domain
->smmu
;
67 ret
= xa_alloc(&arm_smmu_asid_xa
, &new_asid
, cd
,
68 XA_LIMIT(1, (1 << smmu
->asid_bits
) - 1), GFP_KERNEL
);
70 return ERR_PTR(-ENOSPC
);
72 * Race with unmap: TLB invalidations will start targeting the new ASID,
73 * which isn't assigned yet. We'll do an invalidate-all on the old ASID
74 * later, so it doesn't matter.
78 * Update ASID and invalidate CD in all associated masters. There will
79 * be some overlap between use of both ASIDs, until we invalidate the
82 arm_smmu_write_ctx_desc(smmu_domain
, 0, cd
);
84 /* Invalidate TLB entries previously associated with that context */
85 arm_smmu_tlb_inv_asid(smmu
, asid
);
87 xa_erase(&arm_smmu_asid_xa
, asid
);
91 static struct arm_smmu_ctx_desc
*arm_smmu_alloc_shared_cd(struct mm_struct
*mm
)
96 struct arm_smmu_ctx_desc
*cd
;
97 struct arm_smmu_ctx_desc
*ret
= NULL
;
99 asid
= arm64_mm_context_get(mm
);
101 return ERR_PTR(-ESRCH
);
103 cd
= kzalloc(sizeof(*cd
), GFP_KERNEL
);
106 goto out_put_context
;
109 refcount_set(&cd
->refs
, 1);
111 mutex_lock(&arm_smmu_asid_lock
);
112 ret
= arm_smmu_share_asid(mm
, asid
);
114 mutex_unlock(&arm_smmu_asid_lock
);
118 err
= xa_insert(&arm_smmu_asid_xa
, asid
, cd
, GFP_KERNEL
);
119 mutex_unlock(&arm_smmu_asid_lock
);
124 tcr
= FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ
, 64ULL - vabits_actual
) |
125 FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0
, ARM_LPAE_TCR_RGN_WBWA
) |
126 FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0
, ARM_LPAE_TCR_RGN_WBWA
) |
127 FIELD_PREP(CTXDESC_CD_0_TCR_SH0
, ARM_LPAE_TCR_SH_IS
) |
128 CTXDESC_CD_0_TCR_EPD1
| CTXDESC_CD_0_AA64
;
132 tcr
|= FIELD_PREP(CTXDESC_CD_0_TCR_TG0
, ARM_LPAE_TCR_TG0_4K
);
135 tcr
|= FIELD_PREP(CTXDESC_CD_0_TCR_TG0
, ARM_LPAE_TCR_TG0_16K
);
138 tcr
|= FIELD_PREP(CTXDESC_CD_0_TCR_TG0
, ARM_LPAE_TCR_TG0_64K
);
146 reg
= read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1
);
147 par
= cpuid_feature_extract_unsigned_field(reg
, ID_AA64MMFR0_PARANGE_SHIFT
);
148 tcr
|= FIELD_PREP(CTXDESC_CD_0_TCR_IPS
, par
);
150 cd
->ttbr
= virt_to_phys(mm
->pgd
);
153 * MAIR value is pretty much constant and global, so we can just get it
154 * from the current CPU register
156 cd
->mair
= read_sysreg(mair_el1
);
163 arm_smmu_free_asid(cd
);
167 arm64_mm_context_put(mm
);
168 return err
< 0 ? ERR_PTR(err
) : ret
;
171 static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc
*cd
)
173 if (arm_smmu_free_asid(cd
)) {
175 arm64_mm_context_put(cd
->mm
);
180 static void arm_smmu_mm_invalidate_range(struct mmu_notifier
*mn
,
181 struct mm_struct
*mm
,
182 unsigned long start
, unsigned long end
)
184 struct arm_smmu_mmu_notifier
*smmu_mn
= mn_to_smmu(mn
);
186 arm_smmu_atc_inv_domain(smmu_mn
->domain
, mm
->pasid
, start
,
190 static void arm_smmu_mm_release(struct mmu_notifier
*mn
, struct mm_struct
*mm
)
192 struct arm_smmu_mmu_notifier
*smmu_mn
= mn_to_smmu(mn
);
193 struct arm_smmu_domain
*smmu_domain
= smmu_mn
->domain
;
195 mutex_lock(&sva_lock
);
196 if (smmu_mn
->cleared
) {
197 mutex_unlock(&sva_lock
);
202 * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
203 * but disable translation.
205 arm_smmu_write_ctx_desc(smmu_domain
, mm
->pasid
, &quiet_cd
);
207 arm_smmu_tlb_inv_asid(smmu_domain
->smmu
, smmu_mn
->cd
->asid
);
208 arm_smmu_atc_inv_domain(smmu_domain
, mm
->pasid
, 0, 0);
210 smmu_mn
->cleared
= true;
211 mutex_unlock(&sva_lock
);
214 static void arm_smmu_mmu_notifier_free(struct mmu_notifier
*mn
)
216 kfree(mn_to_smmu(mn
));
219 static struct mmu_notifier_ops arm_smmu_mmu_notifier_ops
= {
220 .invalidate_range
= arm_smmu_mm_invalidate_range
,
221 .release
= arm_smmu_mm_release
,
222 .free_notifier
= arm_smmu_mmu_notifier_free
,
225 /* Allocate or get existing MMU notifier for this {domain, mm} pair */
226 static struct arm_smmu_mmu_notifier
*
227 arm_smmu_mmu_notifier_get(struct arm_smmu_domain
*smmu_domain
,
228 struct mm_struct
*mm
)
231 struct arm_smmu_ctx_desc
*cd
;
232 struct arm_smmu_mmu_notifier
*smmu_mn
;
234 list_for_each_entry(smmu_mn
, &smmu_domain
->mmu_notifiers
, list
) {
235 if (smmu_mn
->mn
.mm
== mm
) {
236 refcount_inc(&smmu_mn
->refs
);
241 cd
= arm_smmu_alloc_shared_cd(mm
);
245 smmu_mn
= kzalloc(sizeof(*smmu_mn
), GFP_KERNEL
);
251 refcount_set(&smmu_mn
->refs
, 1);
253 smmu_mn
->domain
= smmu_domain
;
254 smmu_mn
->mn
.ops
= &arm_smmu_mmu_notifier_ops
;
256 ret
= mmu_notifier_register(&smmu_mn
->mn
, mm
);
262 ret
= arm_smmu_write_ctx_desc(smmu_domain
, mm
->pasid
, cd
);
264 goto err_put_notifier
;
266 list_add(&smmu_mn
->list
, &smmu_domain
->mmu_notifiers
);
271 mmu_notifier_put(&smmu_mn
->mn
);
273 arm_smmu_free_shared_cd(cd
);
277 static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier
*smmu_mn
)
279 struct mm_struct
*mm
= smmu_mn
->mn
.mm
;
280 struct arm_smmu_ctx_desc
*cd
= smmu_mn
->cd
;
281 struct arm_smmu_domain
*smmu_domain
= smmu_mn
->domain
;
283 if (!refcount_dec_and_test(&smmu_mn
->refs
))
286 list_del(&smmu_mn
->list
);
287 arm_smmu_write_ctx_desc(smmu_domain
, mm
->pasid
, NULL
);
290 * If we went through clear(), we've already invalidated, and no
291 * new TLB entry can have been formed.
293 if (!smmu_mn
->cleared
) {
294 arm_smmu_tlb_inv_asid(smmu_domain
->smmu
, cd
->asid
);
295 arm_smmu_atc_inv_domain(smmu_domain
, mm
->pasid
, 0, 0);
299 mmu_notifier_put(&smmu_mn
->mn
);
300 arm_smmu_free_shared_cd(cd
);
303 static struct iommu_sva
*
304 __arm_smmu_sva_bind(struct device
*dev
, struct mm_struct
*mm
)
307 struct arm_smmu_bond
*bond
;
308 struct arm_smmu_master
*master
= dev_iommu_priv_get(dev
);
309 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
310 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
312 if (!master
|| !master
->sva_enabled
)
313 return ERR_PTR(-ENODEV
);
315 /* If bind() was already called for this {dev, mm} pair, reuse it. */
316 list_for_each_entry(bond
, &master
->bonds
, list
) {
317 if (bond
->mm
== mm
) {
318 refcount_inc(&bond
->refs
);
323 bond
= kzalloc(sizeof(*bond
), GFP_KERNEL
);
325 return ERR_PTR(-ENOMEM
);
327 /* Allocate a PASID for this mm if necessary */
328 ret
= iommu_sva_alloc_pasid(mm
, 1, (1U << master
->ssid_bits
) - 1);
334 refcount_set(&bond
->refs
, 1);
336 bond
->smmu_mn
= arm_smmu_mmu_notifier_get(smmu_domain
, mm
);
337 if (IS_ERR(bond
->smmu_mn
)) {
338 ret
= PTR_ERR(bond
->smmu_mn
);
342 list_add(&bond
->list
, &master
->bonds
);
346 iommu_sva_free_pasid(mm
);
353 arm_smmu_sva_bind(struct device
*dev
, struct mm_struct
*mm
, void *drvdata
)
355 struct iommu_sva
*handle
;
356 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
357 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
359 if (smmu_domain
->stage
!= ARM_SMMU_DOMAIN_S1
)
360 return ERR_PTR(-EINVAL
);
362 mutex_lock(&sva_lock
);
363 handle
= __arm_smmu_sva_bind(dev
, mm
);
364 mutex_unlock(&sva_lock
);
368 void arm_smmu_sva_unbind(struct iommu_sva
*handle
)
370 struct arm_smmu_bond
*bond
= sva_to_bond(handle
);
372 mutex_lock(&sva_lock
);
373 if (refcount_dec_and_test(&bond
->refs
)) {
374 list_del(&bond
->list
);
375 arm_smmu_mmu_notifier_put(bond
->smmu_mn
);
376 iommu_sva_free_pasid(bond
->mm
);
379 mutex_unlock(&sva_lock
);
382 u32
arm_smmu_sva_get_pasid(struct iommu_sva
*handle
)
384 struct arm_smmu_bond
*bond
= sva_to_bond(handle
);
386 return bond
->mm
->pasid
;
389 bool arm_smmu_sva_supported(struct arm_smmu_device
*smmu
)
391 unsigned long reg
, fld
;
393 unsigned long asid_bits
;
394 u32 feat_mask
= ARM_SMMU_FEAT_BTM
| ARM_SMMU_FEAT_COHERENCY
;
396 if (vabits_actual
== 52)
397 feat_mask
|= ARM_SMMU_FEAT_VAX
;
399 if ((smmu
->features
& feat_mask
) != feat_mask
)
402 if (!(smmu
->pgsize_bitmap
& PAGE_SIZE
))
406 * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
407 * not even pretending to support AArch32 here. Abort if the MMU outputs
408 * addresses larger than what we support.
410 reg
= read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1
);
411 fld
= cpuid_feature_extract_unsigned_field(reg
, ID_AA64MMFR0_PARANGE_SHIFT
);
412 oas
= id_aa64mmfr0_parange_to_phys_shift(fld
);
416 /* We can support bigger ASIDs than the CPU, but not smaller */
417 fld
= cpuid_feature_extract_unsigned_field(reg
, ID_AA64MMFR0_ASID_SHIFT
);
418 asid_bits
= fld
? 16 : 8;
419 if (smmu
->asid_bits
< asid_bits
)
423 * See max_pinned_asids in arch/arm64/mm/context.c. The following is
424 * generally the maximum number of bindable processes.
426 if (arm64_kernel_unmapped_at_el0())
428 dev_dbg(smmu
->dev
, "%d shared contexts\n", (1 << asid_bits
) -
429 num_possible_cpus() - 2);
434 static bool arm_smmu_iopf_supported(struct arm_smmu_master
*master
)
439 bool arm_smmu_master_sva_supported(struct arm_smmu_master
*master
)
441 if (!(master
->smmu
->features
& ARM_SMMU_FEAT_SVA
))
444 /* SSID and IOPF support are mandatory for the moment */
445 return master
->ssid_bits
&& arm_smmu_iopf_supported(master
);
448 bool arm_smmu_master_sva_enabled(struct arm_smmu_master
*master
)
452 mutex_lock(&sva_lock
);
453 enabled
= master
->sva_enabled
;
454 mutex_unlock(&sva_lock
);
458 int arm_smmu_master_enable_sva(struct arm_smmu_master
*master
)
460 mutex_lock(&sva_lock
);
461 master
->sva_enabled
= true;
462 mutex_unlock(&sva_lock
);
467 int arm_smmu_master_disable_sva(struct arm_smmu_master
*master
)
469 mutex_lock(&sva_lock
);
470 if (!list_empty(&master
->bonds
)) {
471 dev_err(master
->dev
, "cannot disable SVA, device is bound\n");
472 mutex_unlock(&sva_lock
);
475 master
->sva_enabled
= false;
476 mutex_unlock(&sva_lock
);
481 void arm_smmu_sva_notifier_synchronize(void)
484 * Some MMU notifiers may still be waiting to be freed, using
485 * arm_smmu_mmu_notifier_free(). Wait for them.
487 mmu_notifier_synchronize();