1 // SPDX-License-Identifier: GPL-2.0-only
3 * IOMMU API for ARM architected SMMU implementations.
5 * Copyright (C) 2013 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
9 * This driver currently supports:
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
14 * - Context fault reporting
15 * - Extended Stream ID (16 bit)
18 #define pr_fmt(fmt) "arm-smmu: " fmt
20 #include <linux/acpi.h>
21 #include <linux/acpi_iort.h>
22 #include <linux/bitfield.h>
23 #include <linux/delay.h>
24 #include <linux/dma-iommu.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/err.h>
27 #include <linux/interrupt.h>
29 #include <linux/iopoll.h>
30 #include <linux/init.h>
31 #include <linux/moduleparam.h>
33 #include <linux/of_address.h>
34 #include <linux/of_device.h>
35 #include <linux/of_iommu.h>
36 #include <linux/pci.h>
37 #include <linux/platform_device.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/slab.h>
41 #include <linux/amba/bus.h>
42 #include <linux/fsl/mc.h>
47 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
48 * global register space are still, in fact, using a hypervisor to mediate it
49 * by trapping and emulating register accesses. Sadly, some deployed versions
50 * of said trapping code have bugs wherein they go horribly wrong for stores
51 * using r31 (i.e. XZR/WZR) as the source register.
53 #define QCOM_DUMMY_VAL -1
55 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
56 #define TLB_SPIN_COUNT 10
58 #define MSI_IOVA_BASE 0x8000000
59 #define MSI_IOVA_LENGTH 0x100000
61 static int force_stage
;
63 * not really modular, but the easiest way to keep compat with existing
64 * bootargs behaviour is to continue using module_param() here.
66 module_param(force_stage
, int, S_IRUGO
);
67 MODULE_PARM_DESC(force_stage
,
68 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
69 static bool disable_bypass
=
70 IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
);
71 module_param(disable_bypass
, bool, S_IRUGO
);
72 MODULE_PARM_DESC(disable_bypass
,
73 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
75 struct arm_smmu_s2cr
{
76 struct iommu_group
*group
;
78 enum arm_smmu_s2cr_type type
;
79 enum arm_smmu_s2cr_privcfg privcfg
;
83 #define s2cr_init_val (struct arm_smmu_s2cr){ \
84 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
97 struct arm_smmu_cfg
*cfg
;
100 struct arm_smmu_master_cfg
{
101 struct arm_smmu_device
*smmu
;
104 #define INVALID_SMENDX -1
105 #define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
106 #define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
107 #define fwspec_smendx(fw, i) \
108 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
109 #define for_each_cfg_sme(fw, i, idx) \
110 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
112 static bool using_legacy_binding
, using_generic_binding
;
114 static inline int arm_smmu_rpm_get(struct arm_smmu_device
*smmu
)
116 if (pm_runtime_enabled(smmu
->dev
))
117 return pm_runtime_get_sync(smmu
->dev
);
122 static inline void arm_smmu_rpm_put(struct arm_smmu_device
*smmu
)
124 if (pm_runtime_enabled(smmu
->dev
))
125 pm_runtime_put(smmu
->dev
);
128 static struct arm_smmu_domain
*to_smmu_domain(struct iommu_domain
*dom
)
130 return container_of(dom
, struct arm_smmu_domain
, domain
);
133 static struct device_node
*dev_get_dev_node(struct device
*dev
)
135 if (dev_is_pci(dev
)) {
136 struct pci_bus
*bus
= to_pci_dev(dev
)->bus
;
138 while (!pci_is_root_bus(bus
))
140 return of_node_get(bus
->bridge
->parent
->of_node
);
143 return of_node_get(dev
->of_node
);
146 static int __arm_smmu_get_pci_sid(struct pci_dev
*pdev
, u16 alias
, void *data
)
148 *((__be32
*)data
) = cpu_to_be32(alias
);
149 return 0; /* Continue walking */
152 static int __find_legacy_master_phandle(struct device
*dev
, void *data
)
154 struct of_phandle_iterator
*it
= *(void **)data
;
155 struct device_node
*np
= it
->node
;
158 of_for_each_phandle(it
, err
, dev
->of_node
, "mmu-masters",
159 "#stream-id-cells", -1)
160 if (it
->node
== np
) {
161 *(void **)data
= dev
;
165 return err
== -ENOENT
? 0 : err
;
168 static struct platform_driver arm_smmu_driver
;
169 static struct iommu_ops arm_smmu_ops
;
171 static int arm_smmu_register_legacy_master(struct device
*dev
,
172 struct arm_smmu_device
**smmu
)
174 struct device
*smmu_dev
;
175 struct device_node
*np
;
176 struct of_phandle_iterator it
;
182 np
= dev_get_dev_node(dev
);
183 if (!np
|| !of_find_property(np
, "#stream-id-cells", NULL
)) {
189 err
= driver_for_each_device(&arm_smmu_driver
.driver
, NULL
, &data
,
190 __find_legacy_master_phandle
);
198 if (dev_is_pci(dev
)) {
199 /* "mmu-masters" assumes Stream ID == Requester ID */
200 pci_for_each_dma_alias(to_pci_dev(dev
), __arm_smmu_get_pci_sid
,
206 err
= iommu_fwspec_init(dev
, &smmu_dev
->of_node
->fwnode
,
211 sids
= kcalloc(it
.cur_count
, sizeof(*sids
), GFP_KERNEL
);
215 *smmu
= dev_get_drvdata(smmu_dev
);
216 of_phandle_iterator_args(&it
, sids
, it
.cur_count
);
217 err
= iommu_fwspec_add_ids(dev
, sids
, it
.cur_count
);
222 static int __arm_smmu_alloc_bitmap(unsigned long *map
, int start
, int end
)
227 idx
= find_next_zero_bit(map
, end
, start
);
230 } while (test_and_set_bit(idx
, map
));
235 static void __arm_smmu_free_bitmap(unsigned long *map
, int idx
)
240 /* Wait for any pending TLB invalidations to complete */
241 static void __arm_smmu_tlb_sync(struct arm_smmu_device
*smmu
, int page
,
242 int sync
, int status
)
244 unsigned int spin_cnt
, delay
;
247 arm_smmu_writel(smmu
, page
, sync
, QCOM_DUMMY_VAL
);
248 for (delay
= 1; delay
< TLB_LOOP_TIMEOUT
; delay
*= 2) {
249 for (spin_cnt
= TLB_SPIN_COUNT
; spin_cnt
> 0; spin_cnt
--) {
250 reg
= arm_smmu_readl(smmu
, page
, status
);
251 if (!(reg
& sTLBGSTATUS_GSACTIVE
))
257 dev_err_ratelimited(smmu
->dev
,
258 "TLB sync timed out -- SMMU may be deadlocked\n");
261 static void arm_smmu_tlb_sync_global(struct arm_smmu_device
*smmu
)
265 spin_lock_irqsave(&smmu
->global_sync_lock
, flags
);
266 __arm_smmu_tlb_sync(smmu
, ARM_SMMU_GR0
, ARM_SMMU_GR0_sTLBGSYNC
,
267 ARM_SMMU_GR0_sTLBGSTATUS
);
268 spin_unlock_irqrestore(&smmu
->global_sync_lock
, flags
);
271 static void arm_smmu_tlb_sync_context(void *cookie
)
273 struct arm_smmu_domain
*smmu_domain
= cookie
;
274 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
277 spin_lock_irqsave(&smmu_domain
->cb_lock
, flags
);
278 __arm_smmu_tlb_sync(smmu
, ARM_SMMU_CB(smmu
, smmu_domain
->cfg
.cbndx
),
279 ARM_SMMU_CB_TLBSYNC
, ARM_SMMU_CB_TLBSTATUS
);
280 spin_unlock_irqrestore(&smmu_domain
->cb_lock
, flags
);
283 static void arm_smmu_tlb_sync_vmid(void *cookie
)
285 struct arm_smmu_domain
*smmu_domain
= cookie
;
287 arm_smmu_tlb_sync_global(smmu_domain
->smmu
);
290 static void arm_smmu_tlb_inv_context_s1(void *cookie
)
292 struct arm_smmu_domain
*smmu_domain
= cookie
;
294 * The TLBI write may be relaxed, so ensure that PTEs cleared by the
295 * current CPU are visible beforehand.
298 arm_smmu_cb_write(smmu_domain
->smmu
, smmu_domain
->cfg
.cbndx
,
299 ARM_SMMU_CB_S1_TLBIASID
, smmu_domain
->cfg
.asid
);
300 arm_smmu_tlb_sync_context(cookie
);
303 static void arm_smmu_tlb_inv_context_s2(void *cookie
)
305 struct arm_smmu_domain
*smmu_domain
= cookie
;
306 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
310 arm_smmu_gr0_write(smmu
, ARM_SMMU_GR0_TLBIVMID
, smmu_domain
->cfg
.vmid
);
311 arm_smmu_tlb_sync_global(smmu
);
314 static void arm_smmu_tlb_inv_range_s1(unsigned long iova
, size_t size
,
315 size_t granule
, bool leaf
, void *cookie
)
317 struct arm_smmu_domain
*smmu_domain
= cookie
;
318 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
319 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
320 int reg
, idx
= cfg
->cbndx
;
322 if (smmu
->features
& ARM_SMMU_FEAT_COHERENT_WALK
)
325 reg
= leaf
? ARM_SMMU_CB_S1_TLBIVAL
: ARM_SMMU_CB_S1_TLBIVA
;
327 if (cfg
->fmt
!= ARM_SMMU_CTX_FMT_AARCH64
) {
328 iova
= (iova
>> 12) << 12;
331 arm_smmu_cb_write(smmu
, idx
, reg
, iova
);
333 } while (size
-= granule
);
336 iova
|= (u64
)cfg
->asid
<< 48;
338 arm_smmu_cb_writeq(smmu
, idx
, reg
, iova
);
339 iova
+= granule
>> 12;
340 } while (size
-= granule
);
344 static void arm_smmu_tlb_inv_range_s2(unsigned long iova
, size_t size
,
345 size_t granule
, bool leaf
, void *cookie
)
347 struct arm_smmu_domain
*smmu_domain
= cookie
;
348 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
349 int reg
, idx
= smmu_domain
->cfg
.cbndx
;
351 if (smmu
->features
& ARM_SMMU_FEAT_COHERENT_WALK
)
354 reg
= leaf
? ARM_SMMU_CB_S2_TLBIIPAS2L
: ARM_SMMU_CB_S2_TLBIIPAS2
;
357 if (smmu_domain
->cfg
.fmt
== ARM_SMMU_CTX_FMT_AARCH64
)
358 arm_smmu_cb_writeq(smmu
, idx
, reg
, iova
);
360 arm_smmu_cb_write(smmu
, idx
, reg
, iova
);
361 iova
+= granule
>> 12;
362 } while (size
-= granule
);
366 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
367 * almost negligible, but the benefit of getting the first one in as far ahead
368 * of the sync as possible is significant, hence we don't just make this a
369 * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think.
371 static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova
, size_t size
,
372 size_t granule
, bool leaf
, void *cookie
)
374 struct arm_smmu_domain
*smmu_domain
= cookie
;
375 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
377 if (smmu
->features
& ARM_SMMU_FEAT_COHERENT_WALK
)
380 arm_smmu_gr0_write(smmu
, ARM_SMMU_GR0_TLBIVMID
, smmu_domain
->cfg
.vmid
);
383 static void arm_smmu_tlb_inv_walk(unsigned long iova
, size_t size
,
384 size_t granule
, void *cookie
)
386 struct arm_smmu_domain
*smmu_domain
= cookie
;
387 const struct arm_smmu_flush_ops
*ops
= smmu_domain
->flush_ops
;
389 ops
->tlb_inv_range(iova
, size
, granule
, false, cookie
);
390 ops
->tlb_sync(cookie
);
393 static void arm_smmu_tlb_inv_leaf(unsigned long iova
, size_t size
,
394 size_t granule
, void *cookie
)
396 struct arm_smmu_domain
*smmu_domain
= cookie
;
397 const struct arm_smmu_flush_ops
*ops
= smmu_domain
->flush_ops
;
399 ops
->tlb_inv_range(iova
, size
, granule
, true, cookie
);
400 ops
->tlb_sync(cookie
);
403 static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather
*gather
,
404 unsigned long iova
, size_t granule
,
407 struct arm_smmu_domain
*smmu_domain
= cookie
;
408 const struct arm_smmu_flush_ops
*ops
= smmu_domain
->flush_ops
;
410 ops
->tlb_inv_range(iova
, granule
, granule
, true, cookie
);
413 static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops
= {
415 .tlb_flush_all
= arm_smmu_tlb_inv_context_s1
,
416 .tlb_flush_walk
= arm_smmu_tlb_inv_walk
,
417 .tlb_flush_leaf
= arm_smmu_tlb_inv_leaf
,
418 .tlb_add_page
= arm_smmu_tlb_add_page
,
420 .tlb_inv_range
= arm_smmu_tlb_inv_range_s1
,
421 .tlb_sync
= arm_smmu_tlb_sync_context
,
424 static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2
= {
426 .tlb_flush_all
= arm_smmu_tlb_inv_context_s2
,
427 .tlb_flush_walk
= arm_smmu_tlb_inv_walk
,
428 .tlb_flush_leaf
= arm_smmu_tlb_inv_leaf
,
429 .tlb_add_page
= arm_smmu_tlb_add_page
,
431 .tlb_inv_range
= arm_smmu_tlb_inv_range_s2
,
432 .tlb_sync
= arm_smmu_tlb_sync_context
,
435 static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1
= {
437 .tlb_flush_all
= arm_smmu_tlb_inv_context_s2
,
438 .tlb_flush_walk
= arm_smmu_tlb_inv_walk
,
439 .tlb_flush_leaf
= arm_smmu_tlb_inv_leaf
,
440 .tlb_add_page
= arm_smmu_tlb_add_page
,
442 .tlb_inv_range
= arm_smmu_tlb_inv_vmid_nosync
,
443 .tlb_sync
= arm_smmu_tlb_sync_vmid
,
446 static irqreturn_t
arm_smmu_context_fault(int irq
, void *dev
)
448 u32 fsr
, fsynr
, cbfrsynra
;
450 struct iommu_domain
*domain
= dev
;
451 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
452 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
453 int idx
= smmu_domain
->cfg
.cbndx
;
455 fsr
= arm_smmu_cb_read(smmu
, idx
, ARM_SMMU_CB_FSR
);
456 if (!(fsr
& FSR_FAULT
))
459 fsynr
= arm_smmu_cb_read(smmu
, idx
, ARM_SMMU_CB_FSYNR0
);
460 iova
= arm_smmu_cb_readq(smmu
, idx
, ARM_SMMU_CB_FAR
);
461 cbfrsynra
= arm_smmu_gr1_read(smmu
, ARM_SMMU_GR1_CBFRSYNRA(idx
));
463 dev_err_ratelimited(smmu
->dev
,
464 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
465 fsr
, iova
, fsynr
, cbfrsynra
, idx
);
467 arm_smmu_cb_write(smmu
, idx
, ARM_SMMU_CB_FSR
, fsr
);
471 static irqreturn_t
arm_smmu_global_fault(int irq
, void *dev
)
473 u32 gfsr
, gfsynr0
, gfsynr1
, gfsynr2
;
474 struct arm_smmu_device
*smmu
= dev
;
476 gfsr
= arm_smmu_gr0_read(smmu
, ARM_SMMU_GR0_sGFSR
);
477 gfsynr0
= arm_smmu_gr0_read(smmu
, ARM_SMMU_GR0_sGFSYNR0
);
478 gfsynr1
= arm_smmu_gr0_read(smmu
, ARM_SMMU_GR0_sGFSYNR1
);
479 gfsynr2
= arm_smmu_gr0_read(smmu
, ARM_SMMU_GR0_sGFSYNR2
);
484 dev_err_ratelimited(smmu
->dev
,
485 "Unexpected global fault, this could be serious\n");
486 dev_err_ratelimited(smmu
->dev
,
487 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
488 gfsr
, gfsynr0
, gfsynr1
, gfsynr2
);
490 arm_smmu_gr0_write(smmu
, ARM_SMMU_GR0_sGFSR
, gfsr
);
494 static void arm_smmu_init_context_bank(struct arm_smmu_domain
*smmu_domain
,
495 struct io_pgtable_cfg
*pgtbl_cfg
)
497 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
498 struct arm_smmu_cb
*cb
= &smmu_domain
->smmu
->cbs
[cfg
->cbndx
];
499 bool stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
505 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH32_S
) {
506 cb
->tcr
[0] = pgtbl_cfg
->arm_v7s_cfg
.tcr
;
508 cb
->tcr
[0] = pgtbl_cfg
->arm_lpae_s1_cfg
.tcr
;
509 cb
->tcr
[1] = pgtbl_cfg
->arm_lpae_s1_cfg
.tcr
>> 32;
510 cb
->tcr
[1] |= FIELD_PREP(TCR2_SEP
, TCR2_SEP_UPSTREAM
);
511 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH64
)
512 cb
->tcr
[1] |= TCR2_AS
;
515 cb
->tcr
[0] = pgtbl_cfg
->arm_lpae_s2_cfg
.vtcr
;
520 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH32_S
) {
521 cb
->ttbr
[0] = pgtbl_cfg
->arm_v7s_cfg
.ttbr
[0];
522 cb
->ttbr
[1] = pgtbl_cfg
->arm_v7s_cfg
.ttbr
[1];
524 cb
->ttbr
[0] = pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
[0];
525 cb
->ttbr
[0] |= FIELD_PREP(TTBRn_ASID
, cfg
->asid
);
526 cb
->ttbr
[1] = pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
[1];
527 cb
->ttbr
[1] |= FIELD_PREP(TTBRn_ASID
, cfg
->asid
);
530 cb
->ttbr
[0] = pgtbl_cfg
->arm_lpae_s2_cfg
.vttbr
;
533 /* MAIRs (stage-1 only) */
535 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH32_S
) {
536 cb
->mair
[0] = pgtbl_cfg
->arm_v7s_cfg
.prrr
;
537 cb
->mair
[1] = pgtbl_cfg
->arm_v7s_cfg
.nmrr
;
539 cb
->mair
[0] = pgtbl_cfg
->arm_lpae_s1_cfg
.mair
[0];
540 cb
->mair
[1] = pgtbl_cfg
->arm_lpae_s1_cfg
.mair
[1];
545 static void arm_smmu_write_context_bank(struct arm_smmu_device
*smmu
, int idx
)
549 struct arm_smmu_cb
*cb
= &smmu
->cbs
[idx
];
550 struct arm_smmu_cfg
*cfg
= cb
->cfg
;
552 /* Unassigned context banks only need disabling */
554 arm_smmu_cb_write(smmu
, idx
, ARM_SMMU_CB_SCTLR
, 0);
558 stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
561 if (smmu
->version
> ARM_SMMU_V1
) {
562 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH64
)
566 /* 16-bit VMIDs live in CBA2R */
567 if (smmu
->features
& ARM_SMMU_FEAT_VMID16
)
568 reg
|= FIELD_PREP(CBA2R_VMID16
, cfg
->vmid
);
570 arm_smmu_gr1_write(smmu
, ARM_SMMU_GR1_CBA2R(idx
), reg
);
574 reg
= FIELD_PREP(CBAR_TYPE
, cfg
->cbar
);
575 if (smmu
->version
< ARM_SMMU_V2
)
576 reg
|= FIELD_PREP(CBAR_IRPTNDX
, cfg
->irptndx
);
579 * Use the weakest shareability/memory types, so they are
580 * overridden by the ttbcr/pte.
583 reg
|= FIELD_PREP(CBAR_S1_BPSHCFG
, CBAR_S1_BPSHCFG_NSH
) |
584 FIELD_PREP(CBAR_S1_MEMATTR
, CBAR_S1_MEMATTR_WB
);
585 } else if (!(smmu
->features
& ARM_SMMU_FEAT_VMID16
)) {
586 /* 8-bit VMIDs live in CBAR */
587 reg
|= FIELD_PREP(CBAR_VMID
, cfg
->vmid
);
589 arm_smmu_gr1_write(smmu
, ARM_SMMU_GR1_CBAR(idx
), reg
);
593 * We must write this before the TTBRs, since it determines the
594 * access behaviour of some fields (in particular, ASID[15:8]).
596 if (stage1
&& smmu
->version
> ARM_SMMU_V1
)
597 arm_smmu_cb_write(smmu
, idx
, ARM_SMMU_CB_TCR2
, cb
->tcr
[1]);
598 arm_smmu_cb_write(smmu
, idx
, ARM_SMMU_CB_TCR
, cb
->tcr
[0]);
601 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH32_S
) {
602 arm_smmu_cb_write(smmu
, idx
, ARM_SMMU_CB_CONTEXTIDR
, cfg
->asid
);
603 arm_smmu_cb_write(smmu
, idx
, ARM_SMMU_CB_TTBR0
, cb
->ttbr
[0]);
604 arm_smmu_cb_write(smmu
, idx
, ARM_SMMU_CB_TTBR1
, cb
->ttbr
[1]);
606 arm_smmu_cb_writeq(smmu
, idx
, ARM_SMMU_CB_TTBR0
, cb
->ttbr
[0]);
608 arm_smmu_cb_writeq(smmu
, idx
, ARM_SMMU_CB_TTBR1
,
612 /* MAIRs (stage-1 only) */
614 arm_smmu_cb_write(smmu
, idx
, ARM_SMMU_CB_S1_MAIR0
, cb
->mair
[0]);
615 arm_smmu_cb_write(smmu
, idx
, ARM_SMMU_CB_S1_MAIR1
, cb
->mair
[1]);
619 reg
= SCTLR_CFIE
| SCTLR_CFRE
| SCTLR_AFE
| SCTLR_TRE
| SCTLR_M
;
621 reg
|= SCTLR_S1_ASIDPNE
;
622 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
625 arm_smmu_cb_write(smmu
, idx
, ARM_SMMU_CB_SCTLR
, reg
);
628 static int arm_smmu_init_domain_context(struct iommu_domain
*domain
,
629 struct arm_smmu_device
*smmu
)
631 int irq
, start
, ret
= 0;
632 unsigned long ias
, oas
;
633 struct io_pgtable_ops
*pgtbl_ops
;
634 struct io_pgtable_cfg pgtbl_cfg
;
635 enum io_pgtable_fmt fmt
;
636 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
637 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
639 mutex_lock(&smmu_domain
->init_mutex
);
640 if (smmu_domain
->smmu
)
643 if (domain
->type
== IOMMU_DOMAIN_IDENTITY
) {
644 smmu_domain
->stage
= ARM_SMMU_DOMAIN_BYPASS
;
645 smmu_domain
->smmu
= smmu
;
650 * Mapping the requested stage onto what we support is surprisingly
651 * complicated, mainly because the spec allows S1+S2 SMMUs without
652 * support for nested translation. That means we end up with the
655 * Requested Supported Actual
665 * Note that you can't actually request stage-2 mappings.
667 if (!(smmu
->features
& ARM_SMMU_FEAT_TRANS_S1
))
668 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S2
;
669 if (!(smmu
->features
& ARM_SMMU_FEAT_TRANS_S2
))
670 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S1
;
673 * Choosing a suitable context format is even more fiddly. Until we
674 * grow some way for the caller to express a preference, and/or move
675 * the decision into the io-pgtable code where it arguably belongs,
676 * just aim for the closest thing to the rest of the system, and hope
677 * that the hardware isn't esoteric enough that we can't assume AArch64
678 * support to be a superset of AArch32 support...
680 if (smmu
->features
& ARM_SMMU_FEAT_FMT_AARCH32_L
)
681 cfg
->fmt
= ARM_SMMU_CTX_FMT_AARCH32_L
;
682 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S
) &&
683 !IS_ENABLED(CONFIG_64BIT
) && !IS_ENABLED(CONFIG_ARM_LPAE
) &&
684 (smmu
->features
& ARM_SMMU_FEAT_FMT_AARCH32_S
) &&
685 (smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
))
686 cfg
->fmt
= ARM_SMMU_CTX_FMT_AARCH32_S
;
687 if ((IS_ENABLED(CONFIG_64BIT
) || cfg
->fmt
== ARM_SMMU_CTX_FMT_NONE
) &&
688 (smmu
->features
& (ARM_SMMU_FEAT_FMT_AARCH64_64K
|
689 ARM_SMMU_FEAT_FMT_AARCH64_16K
|
690 ARM_SMMU_FEAT_FMT_AARCH64_4K
)))
691 cfg
->fmt
= ARM_SMMU_CTX_FMT_AARCH64
;
693 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_NONE
) {
698 switch (smmu_domain
->stage
) {
699 case ARM_SMMU_DOMAIN_S1
:
700 cfg
->cbar
= CBAR_TYPE_S1_TRANS_S2_BYPASS
;
701 start
= smmu
->num_s2_context_banks
;
703 oas
= smmu
->ipa_size
;
704 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH64
) {
705 fmt
= ARM_64_LPAE_S1
;
706 } else if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH32_L
) {
707 fmt
= ARM_32_LPAE_S1
;
708 ias
= min(ias
, 32UL);
709 oas
= min(oas
, 40UL);
712 ias
= min(ias
, 32UL);
713 oas
= min(oas
, 32UL);
715 smmu_domain
->flush_ops
= &arm_smmu_s1_tlb_ops
;
717 case ARM_SMMU_DOMAIN_NESTED
:
719 * We will likely want to change this if/when KVM gets
722 case ARM_SMMU_DOMAIN_S2
:
723 cfg
->cbar
= CBAR_TYPE_S2_TRANS
;
725 ias
= smmu
->ipa_size
;
727 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH64
) {
728 fmt
= ARM_64_LPAE_S2
;
730 fmt
= ARM_32_LPAE_S2
;
731 ias
= min(ias
, 40UL);
732 oas
= min(oas
, 40UL);
734 if (smmu
->version
== ARM_SMMU_V2
)
735 smmu_domain
->flush_ops
= &arm_smmu_s2_tlb_ops_v2
;
737 smmu_domain
->flush_ops
= &arm_smmu_s2_tlb_ops_v1
;
743 ret
= __arm_smmu_alloc_bitmap(smmu
->context_map
, start
,
744 smmu
->num_context_banks
);
749 if (smmu
->version
< ARM_SMMU_V2
) {
750 cfg
->irptndx
= atomic_inc_return(&smmu
->irptndx
);
751 cfg
->irptndx
%= smmu
->num_context_irqs
;
753 cfg
->irptndx
= cfg
->cbndx
;
756 if (smmu_domain
->stage
== ARM_SMMU_DOMAIN_S2
)
757 cfg
->vmid
= cfg
->cbndx
+ 1;
759 cfg
->asid
= cfg
->cbndx
;
761 smmu_domain
->smmu
= smmu
;
762 if (smmu
->impl
&& smmu
->impl
->init_context
) {
763 ret
= smmu
->impl
->init_context(smmu_domain
);
768 pgtbl_cfg
= (struct io_pgtable_cfg
) {
769 .pgsize_bitmap
= smmu
->pgsize_bitmap
,
772 .coherent_walk
= smmu
->features
& ARM_SMMU_FEAT_COHERENT_WALK
,
773 .tlb
= &smmu_domain
->flush_ops
->tlb
,
774 .iommu_dev
= smmu
->dev
,
777 if (smmu_domain
->non_strict
)
778 pgtbl_cfg
.quirks
|= IO_PGTABLE_QUIRK_NON_STRICT
;
780 pgtbl_ops
= alloc_io_pgtable_ops(fmt
, &pgtbl_cfg
, smmu_domain
);
786 /* Update the domain's page sizes to reflect the page table format */
787 domain
->pgsize_bitmap
= pgtbl_cfg
.pgsize_bitmap
;
788 domain
->geometry
.aperture_end
= (1UL << ias
) - 1;
789 domain
->geometry
.force_aperture
= true;
791 /* Initialise the context bank with our page table cfg */
792 arm_smmu_init_context_bank(smmu_domain
, &pgtbl_cfg
);
793 arm_smmu_write_context_bank(smmu
, cfg
->cbndx
);
796 * Request context fault interrupt. Do this last to avoid the
797 * handler seeing a half-initialised domain state.
799 irq
= smmu
->irqs
[smmu
->num_global_irqs
+ cfg
->irptndx
];
800 ret
= devm_request_irq(smmu
->dev
, irq
, arm_smmu_context_fault
,
801 IRQF_SHARED
, "arm-smmu-context-fault", domain
);
803 dev_err(smmu
->dev
, "failed to request context IRQ %d (%u)\n",
805 cfg
->irptndx
= INVALID_IRPTNDX
;
808 mutex_unlock(&smmu_domain
->init_mutex
);
810 /* Publish page table ops for map/unmap */
811 smmu_domain
->pgtbl_ops
= pgtbl_ops
;
815 __arm_smmu_free_bitmap(smmu
->context_map
, cfg
->cbndx
);
816 smmu_domain
->smmu
= NULL
;
818 mutex_unlock(&smmu_domain
->init_mutex
);
822 static void arm_smmu_destroy_domain_context(struct iommu_domain
*domain
)
824 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
825 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
826 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
829 if (!smmu
|| domain
->type
== IOMMU_DOMAIN_IDENTITY
)
832 ret
= arm_smmu_rpm_get(smmu
);
837 * Disable the context bank and free the page tables before freeing
840 smmu
->cbs
[cfg
->cbndx
].cfg
= NULL
;
841 arm_smmu_write_context_bank(smmu
, cfg
->cbndx
);
843 if (cfg
->irptndx
!= INVALID_IRPTNDX
) {
844 irq
= smmu
->irqs
[smmu
->num_global_irqs
+ cfg
->irptndx
];
845 devm_free_irq(smmu
->dev
, irq
, domain
);
848 free_io_pgtable_ops(smmu_domain
->pgtbl_ops
);
849 __arm_smmu_free_bitmap(smmu
->context_map
, cfg
->cbndx
);
851 arm_smmu_rpm_put(smmu
);
854 static struct iommu_domain
*arm_smmu_domain_alloc(unsigned type
)
856 struct arm_smmu_domain
*smmu_domain
;
858 if (type
!= IOMMU_DOMAIN_UNMANAGED
&&
859 type
!= IOMMU_DOMAIN_DMA
&&
860 type
!= IOMMU_DOMAIN_IDENTITY
)
863 * Allocate the domain and initialise some of its data structures.
864 * We can't really do anything meaningful until we've added a
867 smmu_domain
= kzalloc(sizeof(*smmu_domain
), GFP_KERNEL
);
871 if (type
== IOMMU_DOMAIN_DMA
&& (using_legacy_binding
||
872 iommu_get_dma_cookie(&smmu_domain
->domain
))) {
877 mutex_init(&smmu_domain
->init_mutex
);
878 spin_lock_init(&smmu_domain
->cb_lock
);
880 return &smmu_domain
->domain
;
883 static void arm_smmu_domain_free(struct iommu_domain
*domain
)
885 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
888 * Free the domain resources. We assume that all devices have
889 * already been detached.
891 iommu_put_dma_cookie(domain
);
892 arm_smmu_destroy_domain_context(domain
);
896 static void arm_smmu_write_smr(struct arm_smmu_device
*smmu
, int idx
)
898 struct arm_smmu_smr
*smr
= smmu
->smrs
+ idx
;
899 u32 reg
= FIELD_PREP(SMR_ID
, smr
->id
) | FIELD_PREP(SMR_MASK
, smr
->mask
);
901 if (!(smmu
->features
& ARM_SMMU_FEAT_EXIDS
) && smr
->valid
)
903 arm_smmu_gr0_write(smmu
, ARM_SMMU_GR0_SMR(idx
), reg
);
906 static void arm_smmu_write_s2cr(struct arm_smmu_device
*smmu
, int idx
)
908 struct arm_smmu_s2cr
*s2cr
= smmu
->s2crs
+ idx
;
909 u32 reg
= FIELD_PREP(S2CR_TYPE
, s2cr
->type
) |
910 FIELD_PREP(S2CR_CBNDX
, s2cr
->cbndx
) |
911 FIELD_PREP(S2CR_PRIVCFG
, s2cr
->privcfg
);
913 if (smmu
->features
& ARM_SMMU_FEAT_EXIDS
&& smmu
->smrs
&&
914 smmu
->smrs
[idx
].valid
)
915 reg
|= S2CR_EXIDVALID
;
916 arm_smmu_gr0_write(smmu
, ARM_SMMU_GR0_S2CR(idx
), reg
);
919 static void arm_smmu_write_sme(struct arm_smmu_device
*smmu
, int idx
)
921 arm_smmu_write_s2cr(smmu
, idx
);
923 arm_smmu_write_smr(smmu
, idx
);
927 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
928 * should be called after sCR0 is written.
930 static void arm_smmu_test_smr_masks(struct arm_smmu_device
*smmu
)
938 * SMR.ID bits may not be preserved if the corresponding MASK
939 * bits are set, so check each one separately. We can reject
940 * masters later if they try to claim IDs outside these masks.
942 smr
= FIELD_PREP(SMR_ID
, smmu
->streamid_mask
);
943 arm_smmu_gr0_write(smmu
, ARM_SMMU_GR0_SMR(0), smr
);
944 smr
= arm_smmu_gr0_read(smmu
, ARM_SMMU_GR0_SMR(0));
945 smmu
->streamid_mask
= FIELD_GET(SMR_ID
, smr
);
947 smr
= FIELD_PREP(SMR_MASK
, smmu
->streamid_mask
);
948 arm_smmu_gr0_write(smmu
, ARM_SMMU_GR0_SMR(0), smr
);
949 smr
= arm_smmu_gr0_read(smmu
, ARM_SMMU_GR0_SMR(0));
950 smmu
->smr_mask_mask
= FIELD_GET(SMR_MASK
, smr
);
953 static int arm_smmu_find_sme(struct arm_smmu_device
*smmu
, u16 id
, u16 mask
)
955 struct arm_smmu_smr
*smrs
= smmu
->smrs
;
956 int i
, free_idx
= -ENOSPC
;
958 /* Stream indexing is blissfully easy */
962 /* Validating SMRs is... less so */
963 for (i
= 0; i
< smmu
->num_mapping_groups
; ++i
) {
964 if (!smrs
[i
].valid
) {
966 * Note the first free entry we come across, which
967 * we'll claim in the end if nothing else matches.
974 * If the new entry is _entirely_ matched by an existing entry,
975 * then reuse that, with the guarantee that there also cannot
976 * be any subsequent conflicting entries. In normal use we'd
977 * expect simply identical entries for this case, but there's
978 * no harm in accommodating the generalisation.
980 if ((mask
& smrs
[i
].mask
) == mask
&&
981 !((id
^ smrs
[i
].id
) & ~smrs
[i
].mask
))
984 * If the new entry has any other overlap with an existing one,
985 * though, then there always exists at least one stream ID
986 * which would cause a conflict, and we can't allow that risk.
988 if (!((id
^ smrs
[i
].id
) & ~(smrs
[i
].mask
| mask
)))
995 static bool arm_smmu_free_sme(struct arm_smmu_device
*smmu
, int idx
)
997 if (--smmu
->s2crs
[idx
].count
)
1000 smmu
->s2crs
[idx
] = s2cr_init_val
;
1002 smmu
->smrs
[idx
].valid
= false;
1007 static int arm_smmu_master_alloc_smes(struct device
*dev
)
1009 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
1010 struct arm_smmu_master_cfg
*cfg
= fwspec
->iommu_priv
;
1011 struct arm_smmu_device
*smmu
= cfg
->smmu
;
1012 struct arm_smmu_smr
*smrs
= smmu
->smrs
;
1013 struct iommu_group
*group
;
1016 mutex_lock(&smmu
->stream_map_mutex
);
1017 /* Figure out a viable stream map entry allocation */
1018 for_each_cfg_sme(fwspec
, i
, idx
) {
1019 u16 sid
= FIELD_GET(SMR_ID
, fwspec
->ids
[i
]);
1020 u16 mask
= FIELD_GET(SMR_MASK
, fwspec
->ids
[i
]);
1022 if (idx
!= INVALID_SMENDX
) {
1027 ret
= arm_smmu_find_sme(smmu
, sid
, mask
);
1032 if (smrs
&& smmu
->s2crs
[idx
].count
== 0) {
1034 smrs
[idx
].mask
= mask
;
1035 smrs
[idx
].valid
= true;
1037 smmu
->s2crs
[idx
].count
++;
1038 cfg
->smendx
[i
] = (s16
)idx
;
1041 group
= iommu_group_get_for_dev(dev
);
1043 group
= ERR_PTR(-ENOMEM
);
1044 if (IS_ERR(group
)) {
1045 ret
= PTR_ERR(group
);
1048 iommu_group_put(group
);
1050 /* It worked! Now, poke the actual hardware */
1051 for_each_cfg_sme(fwspec
, i
, idx
) {
1052 arm_smmu_write_sme(smmu
, idx
);
1053 smmu
->s2crs
[idx
].group
= group
;
1056 mutex_unlock(&smmu
->stream_map_mutex
);
1061 arm_smmu_free_sme(smmu
, cfg
->smendx
[i
]);
1062 cfg
->smendx
[i
] = INVALID_SMENDX
;
1064 mutex_unlock(&smmu
->stream_map_mutex
);
1068 static void arm_smmu_master_free_smes(struct iommu_fwspec
*fwspec
)
1070 struct arm_smmu_device
*smmu
= fwspec_smmu(fwspec
);
1071 struct arm_smmu_master_cfg
*cfg
= fwspec
->iommu_priv
;
1074 mutex_lock(&smmu
->stream_map_mutex
);
1075 for_each_cfg_sme(fwspec
, i
, idx
) {
1076 if (arm_smmu_free_sme(smmu
, idx
))
1077 arm_smmu_write_sme(smmu
, idx
);
1078 cfg
->smendx
[i
] = INVALID_SMENDX
;
1080 mutex_unlock(&smmu
->stream_map_mutex
);
1083 static int arm_smmu_domain_add_master(struct arm_smmu_domain
*smmu_domain
,
1084 struct iommu_fwspec
*fwspec
)
1086 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1087 struct arm_smmu_s2cr
*s2cr
= smmu
->s2crs
;
1088 u8 cbndx
= smmu_domain
->cfg
.cbndx
;
1089 enum arm_smmu_s2cr_type type
;
1092 if (smmu_domain
->stage
== ARM_SMMU_DOMAIN_BYPASS
)
1093 type
= S2CR_TYPE_BYPASS
;
1095 type
= S2CR_TYPE_TRANS
;
1097 for_each_cfg_sme(fwspec
, i
, idx
) {
1098 if (type
== s2cr
[idx
].type
&& cbndx
== s2cr
[idx
].cbndx
)
1101 s2cr
[idx
].type
= type
;
1102 s2cr
[idx
].privcfg
= S2CR_PRIVCFG_DEFAULT
;
1103 s2cr
[idx
].cbndx
= cbndx
;
1104 arm_smmu_write_s2cr(smmu
, idx
);
1109 static int arm_smmu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1112 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
1113 struct arm_smmu_device
*smmu
;
1114 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1116 if (!fwspec
|| fwspec
->ops
!= &arm_smmu_ops
) {
1117 dev_err(dev
, "cannot attach to SMMU, is it on the same bus?\n");
1122 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1123 * domains between of_xlate() and add_device() - we have no way to cope
1124 * with that, so until ARM gets converted to rely on groups and default
1125 * domains, just say no (but more politely than by dereferencing NULL).
1126 * This should be at least a WARN_ON once that's sorted.
1128 if (!fwspec
->iommu_priv
)
1131 smmu
= fwspec_smmu(fwspec
);
1133 ret
= arm_smmu_rpm_get(smmu
);
1137 /* Ensure that the domain is finalised */
1138 ret
= arm_smmu_init_domain_context(domain
, smmu
);
1143 * Sanity check the domain. We don't support domains across
1146 if (smmu_domain
->smmu
!= smmu
) {
1148 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1149 dev_name(smmu_domain
->smmu
->dev
), dev_name(smmu
->dev
));
1154 /* Looks ok, so add the device to the domain */
1155 ret
= arm_smmu_domain_add_master(smmu_domain
, fwspec
);
1158 arm_smmu_rpm_put(smmu
);
1162 static int arm_smmu_map(struct iommu_domain
*domain
, unsigned long iova
,
1163 phys_addr_t paddr
, size_t size
, int prot
)
1165 struct io_pgtable_ops
*ops
= to_smmu_domain(domain
)->pgtbl_ops
;
1166 struct arm_smmu_device
*smmu
= to_smmu_domain(domain
)->smmu
;
1172 arm_smmu_rpm_get(smmu
);
1173 ret
= ops
->map(ops
, iova
, paddr
, size
, prot
);
1174 arm_smmu_rpm_put(smmu
);
1179 static size_t arm_smmu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
1180 size_t size
, struct iommu_iotlb_gather
*gather
)
1182 struct io_pgtable_ops
*ops
= to_smmu_domain(domain
)->pgtbl_ops
;
1183 struct arm_smmu_device
*smmu
= to_smmu_domain(domain
)->smmu
;
1189 arm_smmu_rpm_get(smmu
);
1190 ret
= ops
->unmap(ops
, iova
, size
, gather
);
1191 arm_smmu_rpm_put(smmu
);
1196 static void arm_smmu_flush_iotlb_all(struct iommu_domain
*domain
)
1198 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1199 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1201 if (smmu_domain
->flush_ops
) {
1202 arm_smmu_rpm_get(smmu
);
1203 smmu_domain
->flush_ops
->tlb
.tlb_flush_all(smmu_domain
);
1204 arm_smmu_rpm_put(smmu
);
1208 static void arm_smmu_iotlb_sync(struct iommu_domain
*domain
,
1209 struct iommu_iotlb_gather
*gather
)
1211 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1212 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1214 if (smmu_domain
->flush_ops
) {
1215 arm_smmu_rpm_get(smmu
);
1216 smmu_domain
->flush_ops
->tlb_sync(smmu_domain
);
1217 arm_smmu_rpm_put(smmu
);
1221 static phys_addr_t
arm_smmu_iova_to_phys_hard(struct iommu_domain
*domain
,
1224 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1225 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1226 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
1227 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1228 struct device
*dev
= smmu
->dev
;
1232 unsigned long va
, flags
;
1233 int ret
, idx
= cfg
->cbndx
;
1235 ret
= arm_smmu_rpm_get(smmu
);
1239 spin_lock_irqsave(&smmu_domain
->cb_lock
, flags
);
1240 va
= iova
& ~0xfffUL
;
1241 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH64
)
1242 arm_smmu_cb_writeq(smmu
, idx
, ARM_SMMU_CB_ATS1PR
, va
);
1244 arm_smmu_cb_write(smmu
, idx
, ARM_SMMU_CB_ATS1PR
, va
);
1246 reg
= arm_smmu_page(smmu
, ARM_SMMU_CB(smmu
, idx
)) + ARM_SMMU_CB_ATSR
;
1247 if (readl_poll_timeout_atomic(reg
, tmp
, !(tmp
& ATSR_ACTIVE
), 5, 50)) {
1248 spin_unlock_irqrestore(&smmu_domain
->cb_lock
, flags
);
1250 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1252 return ops
->iova_to_phys(ops
, iova
);
1255 phys
= arm_smmu_cb_readq(smmu
, idx
, ARM_SMMU_CB_PAR
);
1256 spin_unlock_irqrestore(&smmu_domain
->cb_lock
, flags
);
1257 if (phys
& CB_PAR_F
) {
1258 dev_err(dev
, "translation fault!\n");
1259 dev_err(dev
, "PAR = 0x%llx\n", phys
);
1263 arm_smmu_rpm_put(smmu
);
1265 return (phys
& GENMASK_ULL(39, 12)) | (iova
& 0xfff);
1268 static phys_addr_t
arm_smmu_iova_to_phys(struct iommu_domain
*domain
,
1271 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1272 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1274 if (domain
->type
== IOMMU_DOMAIN_IDENTITY
)
1280 if (smmu_domain
->smmu
->features
& ARM_SMMU_FEAT_TRANS_OPS
&&
1281 smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
)
1282 return arm_smmu_iova_to_phys_hard(domain
, iova
);
1284 return ops
->iova_to_phys(ops
, iova
);
1287 static bool arm_smmu_capable(enum iommu_cap cap
)
1290 case IOMMU_CAP_CACHE_COHERENCY
:
1292 * Return true here as the SMMU can always send out coherent
1296 case IOMMU_CAP_NOEXEC
:
1304 struct arm_smmu_device
*arm_smmu_get_by_fwnode(struct fwnode_handle
*fwnode
)
1306 struct device
*dev
= driver_find_device_by_fwnode(&arm_smmu_driver
.driver
,
1309 return dev
? dev_get_drvdata(dev
) : NULL
;
1312 static int arm_smmu_add_device(struct device
*dev
)
1314 struct arm_smmu_device
*smmu
;
1315 struct arm_smmu_master_cfg
*cfg
;
1316 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
1319 if (using_legacy_binding
) {
1320 ret
= arm_smmu_register_legacy_master(dev
, &smmu
);
1323 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1324 * will allocate/initialise a new one. Thus we need to update fwspec for
1327 fwspec
= dev_iommu_fwspec_get(dev
);
1330 } else if (fwspec
&& fwspec
->ops
== &arm_smmu_ops
) {
1331 smmu
= arm_smmu_get_by_fwnode(fwspec
->iommu_fwnode
);
1337 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
1338 u16 sid
= FIELD_GET(SMR_ID
, fwspec
->ids
[i
]);
1339 u16 mask
= FIELD_GET(SMR_MASK
, fwspec
->ids
[i
]);
1341 if (sid
& ~smmu
->streamid_mask
) {
1342 dev_err(dev
, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1343 sid
, smmu
->streamid_mask
);
1346 if (mask
& ~smmu
->smr_mask_mask
) {
1347 dev_err(dev
, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1348 mask
, smmu
->smr_mask_mask
);
1354 cfg
= kzalloc(offsetof(struct arm_smmu_master_cfg
, smendx
[i
]),
1360 fwspec
->iommu_priv
= cfg
;
1362 cfg
->smendx
[i
] = INVALID_SMENDX
;
1364 ret
= arm_smmu_rpm_get(smmu
);
1368 ret
= arm_smmu_master_alloc_smes(dev
);
1369 arm_smmu_rpm_put(smmu
);
1374 iommu_device_link(&smmu
->iommu
, dev
);
1376 device_link_add(dev
, smmu
->dev
,
1377 DL_FLAG_PM_RUNTIME
| DL_FLAG_AUTOREMOVE_SUPPLIER
);
1384 iommu_fwspec_free(dev
);
1388 static void arm_smmu_remove_device(struct device
*dev
)
1390 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
1391 struct arm_smmu_master_cfg
*cfg
;
1392 struct arm_smmu_device
*smmu
;
1395 if (!fwspec
|| fwspec
->ops
!= &arm_smmu_ops
)
1398 cfg
= fwspec
->iommu_priv
;
1401 ret
= arm_smmu_rpm_get(smmu
);
1405 iommu_device_unlink(&smmu
->iommu
, dev
);
1406 arm_smmu_master_free_smes(fwspec
);
1408 arm_smmu_rpm_put(smmu
);
1410 iommu_group_remove_device(dev
);
1411 kfree(fwspec
->iommu_priv
);
1412 iommu_fwspec_free(dev
);
1415 static struct iommu_group
*arm_smmu_device_group(struct device
*dev
)
1417 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
1418 struct arm_smmu_device
*smmu
= fwspec_smmu(fwspec
);
1419 struct iommu_group
*group
= NULL
;
1422 for_each_cfg_sme(fwspec
, i
, idx
) {
1423 if (group
&& smmu
->s2crs
[idx
].group
&&
1424 group
!= smmu
->s2crs
[idx
].group
)
1425 return ERR_PTR(-EINVAL
);
1427 group
= smmu
->s2crs
[idx
].group
;
1431 return iommu_group_ref_get(group
);
1433 if (dev_is_pci(dev
))
1434 group
= pci_device_group(dev
);
1435 else if (dev_is_fsl_mc(dev
))
1436 group
= fsl_mc_device_group(dev
);
1438 group
= generic_device_group(dev
);
1443 static int arm_smmu_domain_get_attr(struct iommu_domain
*domain
,
1444 enum iommu_attr attr
, void *data
)
1446 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1448 switch(domain
->type
) {
1449 case IOMMU_DOMAIN_UNMANAGED
:
1451 case DOMAIN_ATTR_NESTING
:
1452 *(int *)data
= (smmu_domain
->stage
== ARM_SMMU_DOMAIN_NESTED
);
1458 case IOMMU_DOMAIN_DMA
:
1460 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
:
1461 *(int *)data
= smmu_domain
->non_strict
;
1472 static int arm_smmu_domain_set_attr(struct iommu_domain
*domain
,
1473 enum iommu_attr attr
, void *data
)
1476 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1478 mutex_lock(&smmu_domain
->init_mutex
);
1480 switch(domain
->type
) {
1481 case IOMMU_DOMAIN_UNMANAGED
:
1483 case DOMAIN_ATTR_NESTING
:
1484 if (smmu_domain
->smmu
) {
1490 smmu_domain
->stage
= ARM_SMMU_DOMAIN_NESTED
;
1492 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S1
;
1498 case IOMMU_DOMAIN_DMA
:
1500 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
:
1501 smmu_domain
->non_strict
= *(int *)data
;
1511 mutex_unlock(&smmu_domain
->init_mutex
);
1515 static int arm_smmu_of_xlate(struct device
*dev
, struct of_phandle_args
*args
)
1519 if (args
->args_count
> 0)
1520 fwid
|= FIELD_PREP(SMR_ID
, args
->args
[0]);
1522 if (args
->args_count
> 1)
1523 fwid
|= FIELD_PREP(SMR_MASK
, args
->args
[1]);
1524 else if (!of_property_read_u32(args
->np
, "stream-match-mask", &mask
))
1525 fwid
|= FIELD_PREP(SMR_MASK
, mask
);
1527 return iommu_fwspec_add_ids(dev
, &fwid
, 1);
1530 static void arm_smmu_get_resv_regions(struct device
*dev
,
1531 struct list_head
*head
)
1533 struct iommu_resv_region
*region
;
1534 int prot
= IOMMU_WRITE
| IOMMU_NOEXEC
| IOMMU_MMIO
;
1536 region
= iommu_alloc_resv_region(MSI_IOVA_BASE
, MSI_IOVA_LENGTH
,
1537 prot
, IOMMU_RESV_SW_MSI
);
1541 list_add_tail(®ion
->list
, head
);
1543 iommu_dma_get_resv_regions(dev
, head
);
1546 static void arm_smmu_put_resv_regions(struct device
*dev
,
1547 struct list_head
*head
)
1549 struct iommu_resv_region
*entry
, *next
;
1551 list_for_each_entry_safe(entry
, next
, head
, list
)
1555 static struct iommu_ops arm_smmu_ops
= {
1556 .capable
= arm_smmu_capable
,
1557 .domain_alloc
= arm_smmu_domain_alloc
,
1558 .domain_free
= arm_smmu_domain_free
,
1559 .attach_dev
= arm_smmu_attach_dev
,
1560 .map
= arm_smmu_map
,
1561 .unmap
= arm_smmu_unmap
,
1562 .flush_iotlb_all
= arm_smmu_flush_iotlb_all
,
1563 .iotlb_sync
= arm_smmu_iotlb_sync
,
1564 .iova_to_phys
= arm_smmu_iova_to_phys
,
1565 .add_device
= arm_smmu_add_device
,
1566 .remove_device
= arm_smmu_remove_device
,
1567 .device_group
= arm_smmu_device_group
,
1568 .domain_get_attr
= arm_smmu_domain_get_attr
,
1569 .domain_set_attr
= arm_smmu_domain_set_attr
,
1570 .of_xlate
= arm_smmu_of_xlate
,
1571 .get_resv_regions
= arm_smmu_get_resv_regions
,
1572 .put_resv_regions
= arm_smmu_put_resv_regions
,
1573 .pgsize_bitmap
= -1UL, /* Restricted during device attach */
1576 static void arm_smmu_device_reset(struct arm_smmu_device
*smmu
)
1581 /* clear global FSR */
1582 reg
= arm_smmu_gr0_read(smmu
, ARM_SMMU_GR0_sGFSR
);
1583 arm_smmu_gr0_write(smmu
, ARM_SMMU_GR0_sGFSR
, reg
);
1586 * Reset stream mapping groups: Initial values mark all SMRn as
1587 * invalid and all S2CRn as bypass unless overridden.
1589 for (i
= 0; i
< smmu
->num_mapping_groups
; ++i
)
1590 arm_smmu_write_sme(smmu
, i
);
1592 /* Make sure all context banks are disabled and clear CB_FSR */
1593 for (i
= 0; i
< smmu
->num_context_banks
; ++i
) {
1594 arm_smmu_write_context_bank(smmu
, i
);
1595 arm_smmu_cb_write(smmu
, i
, ARM_SMMU_CB_FSR
, FSR_FAULT
);
1598 /* Invalidate the TLB, just in case */
1599 arm_smmu_gr0_write(smmu
, ARM_SMMU_GR0_TLBIALLH
, QCOM_DUMMY_VAL
);
1600 arm_smmu_gr0_write(smmu
, ARM_SMMU_GR0_TLBIALLNSNH
, QCOM_DUMMY_VAL
);
1602 reg
= arm_smmu_gr0_read(smmu
, ARM_SMMU_GR0_sCR0
);
1604 /* Enable fault reporting */
1605 reg
|= (sCR0_GFRE
| sCR0_GFIE
| sCR0_GCFGFRE
| sCR0_GCFGFIE
);
1607 /* Disable TLB broadcasting. */
1608 reg
|= (sCR0_VMIDPNE
| sCR0_PTM
);
1610 /* Enable client access, handling unmatched streams as appropriate */
1611 reg
&= ~sCR0_CLIENTPD
;
1615 reg
&= ~sCR0_USFCFG
;
1617 /* Disable forced broadcasting */
1620 /* Don't upgrade barriers */
1623 if (smmu
->features
& ARM_SMMU_FEAT_VMID16
)
1624 reg
|= sCR0_VMID16EN
;
1626 if (smmu
->features
& ARM_SMMU_FEAT_EXIDS
)
1627 reg
|= sCR0_EXIDENABLE
;
1629 if (smmu
->impl
&& smmu
->impl
->reset
)
1630 smmu
->impl
->reset(smmu
);
1632 /* Push the button */
1633 arm_smmu_tlb_sync_global(smmu
);
1634 arm_smmu_gr0_write(smmu
, ARM_SMMU_GR0_sCR0
, reg
);
1637 static int arm_smmu_id_size_to_bits(int size
)
1656 static int arm_smmu_device_cfg_probe(struct arm_smmu_device
*smmu
)
1660 bool cttw_reg
, cttw_fw
= smmu
->features
& ARM_SMMU_FEAT_COHERENT_WALK
;
1663 dev_notice(smmu
->dev
, "probing hardware configuration...\n");
1664 dev_notice(smmu
->dev
, "SMMUv%d with:\n",
1665 smmu
->version
== ARM_SMMU_V2
? 2 : 1);
1668 id
= arm_smmu_gr0_read(smmu
, ARM_SMMU_GR0_ID0
);
1670 /* Restrict available stages based on module parameter */
1671 if (force_stage
== 1)
1672 id
&= ~(ID0_S2TS
| ID0_NTS
);
1673 else if (force_stage
== 2)
1674 id
&= ~(ID0_S1TS
| ID0_NTS
);
1676 if (id
& ID0_S1TS
) {
1677 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S1
;
1678 dev_notice(smmu
->dev
, "\tstage 1 translation\n");
1681 if (id
& ID0_S2TS
) {
1682 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S2
;
1683 dev_notice(smmu
->dev
, "\tstage 2 translation\n");
1687 smmu
->features
|= ARM_SMMU_FEAT_TRANS_NESTED
;
1688 dev_notice(smmu
->dev
, "\tnested translation\n");
1691 if (!(smmu
->features
&
1692 (ARM_SMMU_FEAT_TRANS_S1
| ARM_SMMU_FEAT_TRANS_S2
))) {
1693 dev_err(smmu
->dev
, "\tno translation support!\n");
1697 if ((id
& ID0_S1TS
) &&
1698 ((smmu
->version
< ARM_SMMU_V2
) || !(id
& ID0_ATOSNS
))) {
1699 smmu
->features
|= ARM_SMMU_FEAT_TRANS_OPS
;
1700 dev_notice(smmu
->dev
, "\taddress translation ops\n");
1704 * In order for DMA API calls to work properly, we must defer to what
1705 * the FW says about coherency, regardless of what the hardware claims.
1706 * Fortunately, this also opens up a workaround for systems where the
1707 * ID register value has ended up configured incorrectly.
1709 cttw_reg
= !!(id
& ID0_CTTW
);
1710 if (cttw_fw
|| cttw_reg
)
1711 dev_notice(smmu
->dev
, "\t%scoherent table walk\n",
1712 cttw_fw
? "" : "non-");
1713 if (cttw_fw
!= cttw_reg
)
1714 dev_notice(smmu
->dev
,
1715 "\t(IDR0.CTTW overridden by FW configuration)\n");
1717 /* Max. number of entries we have for stream matching/indexing */
1718 if (smmu
->version
== ARM_SMMU_V2
&& id
& ID0_EXIDS
) {
1719 smmu
->features
|= ARM_SMMU_FEAT_EXIDS
;
1722 size
= 1 << FIELD_GET(ID0_NUMSIDB
, id
);
1724 smmu
->streamid_mask
= size
- 1;
1726 smmu
->features
|= ARM_SMMU_FEAT_STREAM_MATCH
;
1727 size
= FIELD_GET(ID0_NUMSMRG
, id
);
1730 "stream-matching supported, but no SMRs present!\n");
1734 /* Zero-initialised to mark as invalid */
1735 smmu
->smrs
= devm_kcalloc(smmu
->dev
, size
, sizeof(*smmu
->smrs
),
1740 dev_notice(smmu
->dev
,
1741 "\tstream matching with %u register groups", size
);
1743 /* s2cr->type == 0 means translation, so initialise explicitly */
1744 smmu
->s2crs
= devm_kmalloc_array(smmu
->dev
, size
, sizeof(*smmu
->s2crs
),
1748 for (i
= 0; i
< size
; i
++)
1749 smmu
->s2crs
[i
] = s2cr_init_val
;
1751 smmu
->num_mapping_groups
= size
;
1752 mutex_init(&smmu
->stream_map_mutex
);
1753 spin_lock_init(&smmu
->global_sync_lock
);
1755 if (smmu
->version
< ARM_SMMU_V2
|| !(id
& ID0_PTFS_NO_AARCH32
)) {
1756 smmu
->features
|= ARM_SMMU_FEAT_FMT_AARCH32_L
;
1757 if (!(id
& ID0_PTFS_NO_AARCH32S
))
1758 smmu
->features
|= ARM_SMMU_FEAT_FMT_AARCH32_S
;
1762 id
= arm_smmu_gr0_read(smmu
, ARM_SMMU_GR0_ID1
);
1763 smmu
->pgshift
= (id
& ID1_PAGESIZE
) ? 16 : 12;
1765 /* Check for size mismatch of SMMU address space from mapped region */
1766 size
= 1 << (FIELD_GET(ID1_NUMPAGENDXB
, id
) + 1);
1767 if (smmu
->numpage
!= 2 * size
<< smmu
->pgshift
)
1769 "SMMU address space size (0x%x) differs from mapped region size (0x%x)!\n",
1770 2 * size
<< smmu
->pgshift
, smmu
->numpage
);
1771 /* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
1772 smmu
->numpage
= size
;
1774 smmu
->num_s2_context_banks
= FIELD_GET(ID1_NUMS2CB
, id
);
1775 smmu
->num_context_banks
= FIELD_GET(ID1_NUMCB
, id
);
1776 if (smmu
->num_s2_context_banks
> smmu
->num_context_banks
) {
1777 dev_err(smmu
->dev
, "impossible number of S2 context banks!\n");
1780 dev_notice(smmu
->dev
, "\t%u context banks (%u stage-2 only)\n",
1781 smmu
->num_context_banks
, smmu
->num_s2_context_banks
);
1782 smmu
->cbs
= devm_kcalloc(smmu
->dev
, smmu
->num_context_banks
,
1783 sizeof(*smmu
->cbs
), GFP_KERNEL
);
1788 id
= arm_smmu_gr0_read(smmu
, ARM_SMMU_GR0_ID2
);
1789 size
= arm_smmu_id_size_to_bits(FIELD_GET(ID2_IAS
, id
));
1790 smmu
->ipa_size
= size
;
1792 /* The output mask is also applied for bypass */
1793 size
= arm_smmu_id_size_to_bits(FIELD_GET(ID2_OAS
, id
));
1794 smmu
->pa_size
= size
;
1796 if (id
& ID2_VMID16
)
1797 smmu
->features
|= ARM_SMMU_FEAT_VMID16
;
1800 * What the page table walker can address actually depends on which
1801 * descriptor format is in use, but since a) we don't know that yet,
1802 * and b) it can vary per context bank, this will have to do...
1804 if (dma_set_mask_and_coherent(smmu
->dev
, DMA_BIT_MASK(size
)))
1806 "failed to set DMA mask for table walker\n");
1808 if (smmu
->version
< ARM_SMMU_V2
) {
1809 smmu
->va_size
= smmu
->ipa_size
;
1810 if (smmu
->version
== ARM_SMMU_V1_64K
)
1811 smmu
->features
|= ARM_SMMU_FEAT_FMT_AARCH64_64K
;
1813 size
= FIELD_GET(ID2_UBS
, id
);
1814 smmu
->va_size
= arm_smmu_id_size_to_bits(size
);
1815 if (id
& ID2_PTFS_4K
)
1816 smmu
->features
|= ARM_SMMU_FEAT_FMT_AARCH64_4K
;
1817 if (id
& ID2_PTFS_16K
)
1818 smmu
->features
|= ARM_SMMU_FEAT_FMT_AARCH64_16K
;
1819 if (id
& ID2_PTFS_64K
)
1820 smmu
->features
|= ARM_SMMU_FEAT_FMT_AARCH64_64K
;
1823 /* Now we've corralled the various formats, what'll it do? */
1824 if (smmu
->features
& ARM_SMMU_FEAT_FMT_AARCH32_S
)
1825 smmu
->pgsize_bitmap
|= SZ_4K
| SZ_64K
| SZ_1M
| SZ_16M
;
1826 if (smmu
->features
&
1827 (ARM_SMMU_FEAT_FMT_AARCH32_L
| ARM_SMMU_FEAT_FMT_AARCH64_4K
))
1828 smmu
->pgsize_bitmap
|= SZ_4K
| SZ_2M
| SZ_1G
;
1829 if (smmu
->features
& ARM_SMMU_FEAT_FMT_AARCH64_16K
)
1830 smmu
->pgsize_bitmap
|= SZ_16K
| SZ_32M
;
1831 if (smmu
->features
& ARM_SMMU_FEAT_FMT_AARCH64_64K
)
1832 smmu
->pgsize_bitmap
|= SZ_64K
| SZ_512M
;
1834 if (arm_smmu_ops
.pgsize_bitmap
== -1UL)
1835 arm_smmu_ops
.pgsize_bitmap
= smmu
->pgsize_bitmap
;
1837 arm_smmu_ops
.pgsize_bitmap
|= smmu
->pgsize_bitmap
;
1838 dev_notice(smmu
->dev
, "\tSupported page sizes: 0x%08lx\n",
1839 smmu
->pgsize_bitmap
);
1842 if (smmu
->features
& ARM_SMMU_FEAT_TRANS_S1
)
1843 dev_notice(smmu
->dev
, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1844 smmu
->va_size
, smmu
->ipa_size
);
1846 if (smmu
->features
& ARM_SMMU_FEAT_TRANS_S2
)
1847 dev_notice(smmu
->dev
, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1848 smmu
->ipa_size
, smmu
->pa_size
);
1850 if (smmu
->impl
&& smmu
->impl
->cfg_probe
)
1851 return smmu
->impl
->cfg_probe(smmu
);
1856 struct arm_smmu_match_data
{
1857 enum arm_smmu_arch_version version
;
1858 enum arm_smmu_implementation model
;
1861 #define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1862 static const struct arm_smmu_match_data name = { .version = ver, .model = imp }
1864 ARM_SMMU_MATCH_DATA(smmu_generic_v1
, ARM_SMMU_V1
, GENERIC_SMMU
);
1865 ARM_SMMU_MATCH_DATA(smmu_generic_v2
, ARM_SMMU_V2
, GENERIC_SMMU
);
1866 ARM_SMMU_MATCH_DATA(arm_mmu401
, ARM_SMMU_V1_64K
, GENERIC_SMMU
);
1867 ARM_SMMU_MATCH_DATA(arm_mmu500
, ARM_SMMU_V2
, ARM_MMU500
);
1868 ARM_SMMU_MATCH_DATA(cavium_smmuv2
, ARM_SMMU_V2
, CAVIUM_SMMUV2
);
1869 ARM_SMMU_MATCH_DATA(qcom_smmuv2
, ARM_SMMU_V2
, QCOM_SMMUV2
);
1871 static const struct of_device_id arm_smmu_of_match
[] = {
1872 { .compatible
= "arm,smmu-v1", .data
= &smmu_generic_v1
},
1873 { .compatible
= "arm,smmu-v2", .data
= &smmu_generic_v2
},
1874 { .compatible
= "arm,mmu-400", .data
= &smmu_generic_v1
},
1875 { .compatible
= "arm,mmu-401", .data
= &arm_mmu401
},
1876 { .compatible
= "arm,mmu-500", .data
= &arm_mmu500
},
1877 { .compatible
= "cavium,smmu-v2", .data
= &cavium_smmuv2
},
1878 { .compatible
= "qcom,smmu-v2", .data
= &qcom_smmuv2
},
1883 static int acpi_smmu_get_data(u32 model
, struct arm_smmu_device
*smmu
)
1888 case ACPI_IORT_SMMU_V1
:
1889 case ACPI_IORT_SMMU_CORELINK_MMU400
:
1890 smmu
->version
= ARM_SMMU_V1
;
1891 smmu
->model
= GENERIC_SMMU
;
1893 case ACPI_IORT_SMMU_CORELINK_MMU401
:
1894 smmu
->version
= ARM_SMMU_V1_64K
;
1895 smmu
->model
= GENERIC_SMMU
;
1897 case ACPI_IORT_SMMU_V2
:
1898 smmu
->version
= ARM_SMMU_V2
;
1899 smmu
->model
= GENERIC_SMMU
;
1901 case ACPI_IORT_SMMU_CORELINK_MMU500
:
1902 smmu
->version
= ARM_SMMU_V2
;
1903 smmu
->model
= ARM_MMU500
;
1905 case ACPI_IORT_SMMU_CAVIUM_THUNDERX
:
1906 smmu
->version
= ARM_SMMU_V2
;
1907 smmu
->model
= CAVIUM_SMMUV2
;
1916 static int arm_smmu_device_acpi_probe(struct platform_device
*pdev
,
1917 struct arm_smmu_device
*smmu
)
1919 struct device
*dev
= smmu
->dev
;
1920 struct acpi_iort_node
*node
=
1921 *(struct acpi_iort_node
**)dev_get_platdata(dev
);
1922 struct acpi_iort_smmu
*iort_smmu
;
1925 /* Retrieve SMMU1/2 specific data */
1926 iort_smmu
= (struct acpi_iort_smmu
*)node
->node_data
;
1928 ret
= acpi_smmu_get_data(iort_smmu
->model
, smmu
);
1932 /* Ignore the configuration access interrupt */
1933 smmu
->num_global_irqs
= 1;
1935 if (iort_smmu
->flags
& ACPI_IORT_SMMU_COHERENT_WALK
)
1936 smmu
->features
|= ARM_SMMU_FEAT_COHERENT_WALK
;
1941 static inline int arm_smmu_device_acpi_probe(struct platform_device
*pdev
,
1942 struct arm_smmu_device
*smmu
)
1948 static int arm_smmu_device_dt_probe(struct platform_device
*pdev
,
1949 struct arm_smmu_device
*smmu
)
1951 const struct arm_smmu_match_data
*data
;
1952 struct device
*dev
= &pdev
->dev
;
1953 bool legacy_binding
;
1955 if (of_property_read_u32(dev
->of_node
, "#global-interrupts",
1956 &smmu
->num_global_irqs
)) {
1957 dev_err(dev
, "missing #global-interrupts property\n");
1961 data
= of_device_get_match_data(dev
);
1962 smmu
->version
= data
->version
;
1963 smmu
->model
= data
->model
;
1965 legacy_binding
= of_find_property(dev
->of_node
, "mmu-masters", NULL
);
1966 if (legacy_binding
&& !using_generic_binding
) {
1967 if (!using_legacy_binding
)
1968 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
1969 using_legacy_binding
= true;
1970 } else if (!legacy_binding
&& !using_legacy_binding
) {
1971 using_generic_binding
= true;
1973 dev_err(dev
, "not probing due to mismatched DT properties\n");
1977 if (of_dma_is_coherent(dev
->of_node
))
1978 smmu
->features
|= ARM_SMMU_FEAT_COHERENT_WALK
;
1983 static void arm_smmu_bus_init(void)
1985 /* Oh, for a proper bus abstraction */
1986 if (!iommu_present(&platform_bus_type
))
1987 bus_set_iommu(&platform_bus_type
, &arm_smmu_ops
);
1988 #ifdef CONFIG_ARM_AMBA
1989 if (!iommu_present(&amba_bustype
))
1990 bus_set_iommu(&amba_bustype
, &arm_smmu_ops
);
1993 if (!iommu_present(&pci_bus_type
)) {
1995 bus_set_iommu(&pci_bus_type
, &arm_smmu_ops
);
1998 #ifdef CONFIG_FSL_MC_BUS
1999 if (!iommu_present(&fsl_mc_bus_type
))
2000 bus_set_iommu(&fsl_mc_bus_type
, &arm_smmu_ops
);
2004 static int arm_smmu_device_probe(struct platform_device
*pdev
)
2006 struct resource
*res
;
2007 resource_size_t ioaddr
;
2008 struct arm_smmu_device
*smmu
;
2009 struct device
*dev
= &pdev
->dev
;
2010 int num_irqs
, i
, err
;
2012 smmu
= devm_kzalloc(dev
, sizeof(*smmu
), GFP_KERNEL
);
2014 dev_err(dev
, "failed to allocate arm_smmu_device\n");
2020 err
= arm_smmu_device_dt_probe(pdev
, smmu
);
2022 err
= arm_smmu_device_acpi_probe(pdev
, smmu
);
2027 smmu
= arm_smmu_impl_init(smmu
);
2029 return PTR_ERR(smmu
);
2031 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2032 ioaddr
= res
->start
;
2033 smmu
->base
= devm_ioremap_resource(dev
, res
);
2034 if (IS_ERR(smmu
->base
))
2035 return PTR_ERR(smmu
->base
);
2037 * The resource size should effectively match the value of SMMU_TOP;
2038 * stash that temporarily until we know PAGESIZE to validate it with.
2040 smmu
->numpage
= resource_size(res
);
2043 while ((res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, num_irqs
))) {
2045 if (num_irqs
> smmu
->num_global_irqs
)
2046 smmu
->num_context_irqs
++;
2049 if (!smmu
->num_context_irqs
) {
2050 dev_err(dev
, "found %d interrupts but expected at least %d\n",
2051 num_irqs
, smmu
->num_global_irqs
+ 1);
2055 smmu
->irqs
= devm_kcalloc(dev
, num_irqs
, sizeof(*smmu
->irqs
),
2058 dev_err(dev
, "failed to allocate %d irqs\n", num_irqs
);
2062 for (i
= 0; i
< num_irqs
; ++i
) {
2063 int irq
= platform_get_irq(pdev
, i
);
2066 dev_err(dev
, "failed to get irq index %d\n", i
);
2069 smmu
->irqs
[i
] = irq
;
2072 err
= devm_clk_bulk_get_all(dev
, &smmu
->clks
);
2074 dev_err(dev
, "failed to get clocks %d\n", err
);
2077 smmu
->num_clks
= err
;
2079 err
= clk_bulk_prepare_enable(smmu
->num_clks
, smmu
->clks
);
2083 err
= arm_smmu_device_cfg_probe(smmu
);
2087 if (smmu
->version
== ARM_SMMU_V2
) {
2088 if (smmu
->num_context_banks
> smmu
->num_context_irqs
) {
2090 "found only %d context irq(s) but %d required\n",
2091 smmu
->num_context_irqs
, smmu
->num_context_banks
);
2095 /* Ignore superfluous interrupts */
2096 smmu
->num_context_irqs
= smmu
->num_context_banks
;
2099 for (i
= 0; i
< smmu
->num_global_irqs
; ++i
) {
2100 err
= devm_request_irq(smmu
->dev
, smmu
->irqs
[i
],
2101 arm_smmu_global_fault
,
2103 "arm-smmu global fault",
2106 dev_err(dev
, "failed to request global IRQ %d (%u)\n",
2112 err
= iommu_device_sysfs_add(&smmu
->iommu
, smmu
->dev
, NULL
,
2113 "smmu.%pa", &ioaddr
);
2115 dev_err(dev
, "Failed to register iommu in sysfs\n");
2119 iommu_device_set_ops(&smmu
->iommu
, &arm_smmu_ops
);
2120 iommu_device_set_fwnode(&smmu
->iommu
, dev
->fwnode
);
2122 err
= iommu_device_register(&smmu
->iommu
);
2124 dev_err(dev
, "Failed to register iommu\n");
2128 platform_set_drvdata(pdev
, smmu
);
2129 arm_smmu_device_reset(smmu
);
2130 arm_smmu_test_smr_masks(smmu
);
2133 * We want to avoid touching dev->power.lock in fastpaths unless
2134 * it's really going to do something useful - pm_runtime_enabled()
2135 * can serve as an ideal proxy for that decision. So, conditionally
2136 * enable pm_runtime.
2138 if (dev
->pm_domain
) {
2139 pm_runtime_set_active(dev
);
2140 pm_runtime_enable(dev
);
2144 * For ACPI and generic DT bindings, an SMMU will be probed before
2145 * any device which might need it, so we want the bus ops in place
2146 * ready to handle default domain setup as soon as any SMMU exists.
2148 if (!using_legacy_binding
)
2149 arm_smmu_bus_init();
2155 * With the legacy DT binding in play, though, we have no guarantees about
2156 * probe order, but then we're also not doing default domains, so we can
2157 * delay setting bus ops until we're sure every possible SMMU is ready,
2158 * and that way ensure that no add_device() calls get missed.
2160 static int arm_smmu_legacy_bus_init(void)
2162 if (using_legacy_binding
)
2163 arm_smmu_bus_init();
2166 device_initcall_sync(arm_smmu_legacy_bus_init
);
2168 static void arm_smmu_device_shutdown(struct platform_device
*pdev
)
2170 struct arm_smmu_device
*smmu
= platform_get_drvdata(pdev
);
2175 if (!bitmap_empty(smmu
->context_map
, ARM_SMMU_MAX_CBS
))
2176 dev_err(&pdev
->dev
, "removing device with active domains!\n");
2178 arm_smmu_rpm_get(smmu
);
2179 /* Turn the thing off */
2180 arm_smmu_gr0_write(smmu
, ARM_SMMU_GR0_sCR0
, sCR0_CLIENTPD
);
2181 arm_smmu_rpm_put(smmu
);
2183 if (pm_runtime_enabled(smmu
->dev
))
2184 pm_runtime_force_suspend(smmu
->dev
);
2186 clk_bulk_disable(smmu
->num_clks
, smmu
->clks
);
2188 clk_bulk_unprepare(smmu
->num_clks
, smmu
->clks
);
2191 static int __maybe_unused
arm_smmu_runtime_resume(struct device
*dev
)
2193 struct arm_smmu_device
*smmu
= dev_get_drvdata(dev
);
2196 ret
= clk_bulk_enable(smmu
->num_clks
, smmu
->clks
);
2200 arm_smmu_device_reset(smmu
);
2205 static int __maybe_unused
arm_smmu_runtime_suspend(struct device
*dev
)
2207 struct arm_smmu_device
*smmu
= dev_get_drvdata(dev
);
2209 clk_bulk_disable(smmu
->num_clks
, smmu
->clks
);
2214 static int __maybe_unused
arm_smmu_pm_resume(struct device
*dev
)
2216 if (pm_runtime_suspended(dev
))
2219 return arm_smmu_runtime_resume(dev
);
2222 static int __maybe_unused
arm_smmu_pm_suspend(struct device
*dev
)
2224 if (pm_runtime_suspended(dev
))
2227 return arm_smmu_runtime_suspend(dev
);
2230 static const struct dev_pm_ops arm_smmu_pm_ops
= {
2231 SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_pm_suspend
, arm_smmu_pm_resume
)
2232 SET_RUNTIME_PM_OPS(arm_smmu_runtime_suspend
,
2233 arm_smmu_runtime_resume
, NULL
)
2236 static struct platform_driver arm_smmu_driver
= {
2239 .of_match_table
= of_match_ptr(arm_smmu_of_match
),
2240 .pm
= &arm_smmu_pm_ops
,
2241 .suppress_bind_attrs
= true,
2243 .probe
= arm_smmu_device_probe
,
2244 .shutdown
= arm_smmu_device_shutdown
,
2246 builtin_platform_driver(arm_smmu_driver
);