1 // SPDX-License-Identifier: GPL-2.0-only
2 // Miscellaneous Arm SMMU implementation and integration quirks
3 // Copyright (C) 2019 Arm Limited
5 #define pr_fmt(fmt) "arm-smmu: " fmt
7 #include <linux/bitfield.h>
13 static int arm_smmu_gr0_ns(int offset
)
16 case ARM_SMMU_GR0_sCR0
:
17 case ARM_SMMU_GR0_sACR
:
18 case ARM_SMMU_GR0_sGFSR
:
19 case ARM_SMMU_GR0_sGFSYNR0
:
20 case ARM_SMMU_GR0_sGFSYNR1
:
21 case ARM_SMMU_GR0_sGFSYNR2
:
22 return offset
+ 0x400;
28 static u32
arm_smmu_read_ns(struct arm_smmu_device
*smmu
, int page
,
31 if (page
== ARM_SMMU_GR0
)
32 offset
= arm_smmu_gr0_ns(offset
);
33 return readl_relaxed(arm_smmu_page(smmu
, page
) + offset
);
36 static void arm_smmu_write_ns(struct arm_smmu_device
*smmu
, int page
,
39 if (page
== ARM_SMMU_GR0
)
40 offset
= arm_smmu_gr0_ns(offset
);
41 writel_relaxed(val
, arm_smmu_page(smmu
, page
) + offset
);
44 /* Since we don't care for sGFAR, we can do without 64-bit accessors */
45 static const struct arm_smmu_impl calxeda_impl
= {
46 .read_reg
= arm_smmu_read_ns
,
47 .write_reg
= arm_smmu_write_ns
,
52 struct arm_smmu_device smmu
;
56 static int cavium_cfg_probe(struct arm_smmu_device
*smmu
)
58 static atomic_t context_count
= ATOMIC_INIT(0);
59 struct cavium_smmu
*cs
= container_of(smmu
, struct cavium_smmu
, smmu
);
61 * Cavium CN88xx erratum #27704.
62 * Ensure ASID and VMID allocation is unique across all SMMUs in
65 cs
->id_base
= atomic_fetch_add(smmu
->num_context_banks
, &context_count
);
66 dev_notice(smmu
->dev
, "\tenabling workaround for Cavium erratum 27704\n");
71 static int cavium_init_context(struct arm_smmu_domain
*smmu_domain
,
72 struct io_pgtable_cfg
*pgtbl_cfg
, struct device
*dev
)
74 struct cavium_smmu
*cs
= container_of(smmu_domain
->smmu
,
75 struct cavium_smmu
, smmu
);
77 if (smmu_domain
->stage
== ARM_SMMU_DOMAIN_S2
)
78 smmu_domain
->cfg
.vmid
+= cs
->id_base
;
80 smmu_domain
->cfg
.asid
+= cs
->id_base
;
85 static const struct arm_smmu_impl cavium_impl
= {
86 .cfg_probe
= cavium_cfg_probe
,
87 .init_context
= cavium_init_context
,
90 static struct arm_smmu_device
*cavium_smmu_impl_init(struct arm_smmu_device
*smmu
)
92 struct cavium_smmu
*cs
;
94 cs
= devm_krealloc(smmu
->dev
, smmu
, sizeof(*cs
), GFP_KERNEL
);
96 return ERR_PTR(-ENOMEM
);
98 cs
->smmu
.impl
= &cavium_impl
;
104 #define ARM_MMU500_ACTLR_CPRE (1 << 1)
106 #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
107 #define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
108 #define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
110 int arm_mmu500_reset(struct arm_smmu_device
*smmu
)
115 * On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before
116 * writes to the context bank ACTLRs will stick. And we just hope that
117 * Secure has also cleared SACR.CACHE_LOCK for this to take effect...
119 reg
= arm_smmu_gr0_read(smmu
, ARM_SMMU_GR0_ID7
);
120 major
= FIELD_GET(ARM_SMMU_ID7_MAJOR
, reg
);
121 reg
= arm_smmu_gr0_read(smmu
, ARM_SMMU_GR0_sACR
);
123 reg
&= ~ARM_MMU500_ACR_CACHE_LOCK
;
125 * Allow unmatched Stream IDs to allocate bypass
126 * TLB entries for reduced latency.
128 reg
|= ARM_MMU500_ACR_SMTNMB_TLBEN
| ARM_MMU500_ACR_S2CRB_TLBEN
;
129 arm_smmu_gr0_write(smmu
, ARM_SMMU_GR0_sACR
, reg
);
132 * Disable MMU-500's not-particularly-beneficial next-page
133 * prefetcher for the sake of errata #841119 and #826419.
135 for (i
= 0; i
< smmu
->num_context_banks
; ++i
) {
136 reg
= arm_smmu_cb_read(smmu
, i
, ARM_SMMU_CB_ACTLR
);
137 reg
&= ~ARM_MMU500_ACTLR_CPRE
;
138 arm_smmu_cb_write(smmu
, i
, ARM_SMMU_CB_ACTLR
, reg
);
144 static const struct arm_smmu_impl arm_mmu500_impl
= {
145 .reset
= arm_mmu500_reset
,
148 static u64
mrvl_mmu500_readq(struct arm_smmu_device
*smmu
, int page
, int off
)
151 * Marvell Armada-AP806 erratum #582743.
152 * Split all the readq to double readl
154 return hi_lo_readq_relaxed(arm_smmu_page(smmu
, page
) + off
);
157 static void mrvl_mmu500_writeq(struct arm_smmu_device
*smmu
, int page
, int off
,
161 * Marvell Armada-AP806 erratum #582743.
162 * Split all the writeq to double writel
164 hi_lo_writeq_relaxed(val
, arm_smmu_page(smmu
, page
) + off
);
167 static int mrvl_mmu500_cfg_probe(struct arm_smmu_device
*smmu
)
171 * Armada-AP806 erratum #582743.
172 * Hide the SMMU_IDR2.PTFSv8 fields to sidestep the AArch64
173 * formats altogether and allow using 32 bits access on the
176 smmu
->features
&= ~(ARM_SMMU_FEAT_FMT_AARCH64_4K
|
177 ARM_SMMU_FEAT_FMT_AARCH64_16K
|
178 ARM_SMMU_FEAT_FMT_AARCH64_64K
);
183 static const struct arm_smmu_impl mrvl_mmu500_impl
= {
184 .read_reg64
= mrvl_mmu500_readq
,
185 .write_reg64
= mrvl_mmu500_writeq
,
186 .cfg_probe
= mrvl_mmu500_cfg_probe
,
187 .reset
= arm_mmu500_reset
,
191 struct arm_smmu_device
*arm_smmu_impl_init(struct arm_smmu_device
*smmu
)
193 const struct device_node
*np
= smmu
->dev
->of_node
;
196 * Set the impl for model-specific implementation quirks first,
197 * such that platform integration quirks can pick it up and
198 * inherit from it if necessary.
200 switch (smmu
->model
) {
202 smmu
->impl
= &arm_mmu500_impl
;
205 return cavium_smmu_impl_init(smmu
);
210 /* This is implicitly MMU-400 */
211 if (of_property_read_bool(np
, "calxeda,smmu-secure-config-access"))
212 smmu
->impl
= &calxeda_impl
;
214 if (of_device_is_compatible(np
, "nvidia,tegra194-smmu"))
215 return nvidia_smmu_impl_init(smmu
);
217 smmu
= qcom_smmu_impl_init(smmu
);
219 if (of_device_is_compatible(np
, "marvell,ap806-smmu-500"))
220 smmu
->impl
= &mrvl_mmu500_impl
;