1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
6 #include <linux/adreno-smmu-priv.h>
7 #include <linux/of_device.h>
8 #include <linux/qcom_scm.h>
13 struct arm_smmu_device smmu
;
18 static struct qcom_smmu
*to_qcom_smmu(struct arm_smmu_device
*smmu
)
20 return container_of(smmu
, struct qcom_smmu
, smmu
);
23 static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device
*smmu
, int idx
,
27 * On the GPU device we want to process subsequent transactions after a
28 * fault to keep the GPU from hanging
30 reg
|= ARM_SMMU_SCTLR_HUPCF
;
32 arm_smmu_cb_write(smmu
, idx
, ARM_SMMU_CB_SCTLR
, reg
);
35 #define QCOM_ADRENO_SMMU_GPU_SID 0
37 static bool qcom_adreno_smmu_is_gpu_device(struct device
*dev
)
39 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
43 * The GPU will always use SID 0 so that is a handy way to uniquely
44 * identify it and configure it for per-instance pagetables
46 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
47 u16 sid
= FIELD_GET(ARM_SMMU_SMR_ID
, fwspec
->ids
[i
]);
49 if (sid
== QCOM_ADRENO_SMMU_GPU_SID
)
56 static const struct io_pgtable_cfg
*qcom_adreno_smmu_get_ttbr1_cfg(
59 struct arm_smmu_domain
*smmu_domain
= (void *)cookie
;
60 struct io_pgtable
*pgtable
=
61 io_pgtable_ops_to_pgtable(smmu_domain
->pgtbl_ops
);
66 * Local implementation to configure TTBR0 with the specified pagetable config.
67 * The GPU driver will call this to enable TTBR0 when per-instance pagetables
71 static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie
,
72 const struct io_pgtable_cfg
*pgtbl_cfg
)
74 struct arm_smmu_domain
*smmu_domain
= (void *)cookie
;
75 struct io_pgtable
*pgtable
= io_pgtable_ops_to_pgtable(smmu_domain
->pgtbl_ops
);
76 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
77 struct arm_smmu_cb
*cb
= &smmu_domain
->smmu
->cbs
[cfg
->cbndx
];
79 /* The domain must have split pagetables already enabled */
80 if (cb
->tcr
[0] & ARM_SMMU_TCR_EPD1
)
83 /* If the pagetable config is NULL, disable TTBR0 */
85 /* Do nothing if it is already disabled */
86 if ((cb
->tcr
[0] & ARM_SMMU_TCR_EPD0
))
89 /* Set TCR to the original configuration */
90 cb
->tcr
[0] = arm_smmu_lpae_tcr(&pgtable
->cfg
);
91 cb
->ttbr
[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID
, cb
->cfg
->asid
);
95 /* Don't call this again if TTBR0 is already enabled */
96 if (!(cb
->tcr
[0] & ARM_SMMU_TCR_EPD0
))
99 tcr
|= arm_smmu_lpae_tcr(pgtbl_cfg
);
100 tcr
&= ~(ARM_SMMU_TCR_EPD0
| ARM_SMMU_TCR_EPD1
);
103 cb
->ttbr
[0] = pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
;
104 cb
->ttbr
[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID
, cb
->cfg
->asid
);
107 arm_smmu_write_context_bank(smmu_domain
->smmu
, cb
->cfg
->cbndx
);
112 static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain
*smmu_domain
,
113 struct arm_smmu_device
*smmu
,
114 struct device
*dev
, int start
)
119 * Assign context bank 0 to the GPU device so the GPU hardware can
122 if (qcom_adreno_smmu_is_gpu_device(dev
)) {
127 count
= smmu
->num_context_banks
;
130 return __arm_smmu_alloc_bitmap(smmu
->context_map
, start
, count
);
133 static int qcom_adreno_smmu_init_context(struct arm_smmu_domain
*smmu_domain
,
134 struct io_pgtable_cfg
*pgtbl_cfg
, struct device
*dev
)
136 struct adreno_smmu_priv
*priv
;
138 /* Only enable split pagetables for the GPU device (SID 0) */
139 if (!qcom_adreno_smmu_is_gpu_device(dev
))
143 * All targets that use the qcom,adreno-smmu compatible string *should*
144 * be AARCH64 stage 1 but double check because the arm-smmu code assumes
145 * that is the case when the TTBR1 quirk is enabled
147 if ((smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
) &&
148 (smmu_domain
->cfg
.fmt
== ARM_SMMU_CTX_FMT_AARCH64
))
149 pgtbl_cfg
->quirks
|= IO_PGTABLE_QUIRK_ARM_TTBR1
;
152 * Initialize private interface with GPU:
155 priv
= dev_get_drvdata(dev
);
156 priv
->cookie
= smmu_domain
;
157 priv
->get_ttbr1_cfg
= qcom_adreno_smmu_get_ttbr1_cfg
;
158 priv
->set_ttbr0_cfg
= qcom_adreno_smmu_set_ttbr0_cfg
;
163 static const struct of_device_id qcom_smmu_client_of_match
[] __maybe_unused
= {
164 { .compatible
= "qcom,adreno" },
165 { .compatible
= "qcom,mdp4" },
166 { .compatible
= "qcom,mdss" },
167 { .compatible
= "qcom,sc7180-mdss" },
168 { .compatible
= "qcom,sc7180-mss-pil" },
169 { .compatible
= "qcom,sdm845-mdss" },
170 { .compatible
= "qcom,sdm845-mss-pil" },
174 static int qcom_smmu_cfg_probe(struct arm_smmu_device
*smmu
)
176 unsigned int last_s2cr
= ARM_SMMU_GR0_S2CR(smmu
->num_mapping_groups
- 1);
177 struct qcom_smmu
*qsmmu
= to_qcom_smmu(smmu
);
183 * With some firmware versions writes to S2CR of type FAULT are
184 * ignored, and writing BYPASS will end up written as FAULT in the
185 * register. Perform a write to S2CR to detect if this is the case and
186 * if so reserve a context bank to emulate bypass streams.
188 reg
= FIELD_PREP(ARM_SMMU_S2CR_TYPE
, S2CR_TYPE_BYPASS
) |
189 FIELD_PREP(ARM_SMMU_S2CR_CBNDX
, 0xff) |
190 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG
, S2CR_PRIVCFG_DEFAULT
);
191 arm_smmu_gr0_write(smmu
, last_s2cr
, reg
);
192 reg
= arm_smmu_gr0_read(smmu
, last_s2cr
);
193 if (FIELD_GET(ARM_SMMU_S2CR_TYPE
, reg
) != S2CR_TYPE_BYPASS
) {
194 qsmmu
->bypass_quirk
= true;
195 qsmmu
->bypass_cbndx
= smmu
->num_context_banks
- 1;
197 set_bit(qsmmu
->bypass_cbndx
, smmu
->context_map
);
199 arm_smmu_cb_write(smmu
, qsmmu
->bypass_cbndx
, ARM_SMMU_CB_SCTLR
, 0);
201 reg
= FIELD_PREP(ARM_SMMU_CBAR_TYPE
, CBAR_TYPE_S1_TRANS_S2_BYPASS
);
202 arm_smmu_gr1_write(smmu
, ARM_SMMU_GR1_CBAR(qsmmu
->bypass_cbndx
), reg
);
205 for (i
= 0; i
< smmu
->num_mapping_groups
; i
++) {
206 smr
= arm_smmu_gr0_read(smmu
, ARM_SMMU_GR0_SMR(i
));
208 if (FIELD_GET(ARM_SMMU_SMR_VALID
, smr
)) {
209 smmu
->smrs
[i
].id
= FIELD_GET(ARM_SMMU_SMR_ID
, smr
);
210 smmu
->smrs
[i
].mask
= FIELD_GET(ARM_SMMU_SMR_MASK
, smr
);
211 smmu
->smrs
[i
].valid
= true;
213 smmu
->s2crs
[i
].type
= S2CR_TYPE_BYPASS
;
214 smmu
->s2crs
[i
].privcfg
= S2CR_PRIVCFG_DEFAULT
;
215 smmu
->s2crs
[i
].cbndx
= 0xff;
222 static void qcom_smmu_write_s2cr(struct arm_smmu_device
*smmu
, int idx
)
224 struct arm_smmu_s2cr
*s2cr
= smmu
->s2crs
+ idx
;
225 struct qcom_smmu
*qsmmu
= to_qcom_smmu(smmu
);
226 u32 cbndx
= s2cr
->cbndx
;
227 u32 type
= s2cr
->type
;
230 if (qsmmu
->bypass_quirk
) {
231 if (type
== S2CR_TYPE_BYPASS
) {
233 * Firmware with quirky S2CR handling will substitute
234 * BYPASS writes with FAULT, so point the stream to the
235 * reserved context bank and ask for translation on the
238 type
= S2CR_TYPE_TRANS
;
239 cbndx
= qsmmu
->bypass_cbndx
;
240 } else if (type
== S2CR_TYPE_FAULT
) {
242 * Firmware with quirky S2CR handling will ignore FAULT
243 * writes, so trick it to write FAULT by asking for a
246 type
= S2CR_TYPE_BYPASS
;
251 reg
= FIELD_PREP(ARM_SMMU_S2CR_TYPE
, type
) |
252 FIELD_PREP(ARM_SMMU_S2CR_CBNDX
, cbndx
) |
253 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG
, s2cr
->privcfg
);
254 arm_smmu_gr0_write(smmu
, ARM_SMMU_GR0_S2CR(idx
), reg
);
257 static int qcom_smmu_def_domain_type(struct device
*dev
)
259 const struct of_device_id
*match
=
260 of_match_device(qcom_smmu_client_of_match
, dev
);
262 return match
? IOMMU_DOMAIN_IDENTITY
: 0;
265 static int qcom_sdm845_smmu500_reset(struct arm_smmu_device
*smmu
)
270 * To address performance degradation in non-real time clients,
271 * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
272 * such as MTP and db845, whose firmwares implement secure monitor
273 * call handlers to turn on/off the wait-for-safe logic.
275 ret
= qcom_scm_qsmmu500_wait_safe_toggle(0);
277 dev_warn(smmu
->dev
, "Failed to turn off SAFE logic\n");
282 static int qcom_smmu500_reset(struct arm_smmu_device
*smmu
)
284 const struct device_node
*np
= smmu
->dev
->of_node
;
286 arm_mmu500_reset(smmu
);
288 if (of_device_is_compatible(np
, "qcom,sdm845-smmu-500"))
289 return qcom_sdm845_smmu500_reset(smmu
);
294 static const struct arm_smmu_impl qcom_smmu_impl
= {
295 .cfg_probe
= qcom_smmu_cfg_probe
,
296 .def_domain_type
= qcom_smmu_def_domain_type
,
297 .reset
= qcom_smmu500_reset
,
298 .write_s2cr
= qcom_smmu_write_s2cr
,
301 static const struct arm_smmu_impl qcom_adreno_smmu_impl
= {
302 .init_context
= qcom_adreno_smmu_init_context
,
303 .def_domain_type
= qcom_smmu_def_domain_type
,
304 .reset
= qcom_smmu500_reset
,
305 .alloc_context_bank
= qcom_adreno_smmu_alloc_context_bank
,
306 .write_sctlr
= qcom_adreno_smmu_write_sctlr
,
309 static struct arm_smmu_device
*qcom_smmu_create(struct arm_smmu_device
*smmu
,
310 const struct arm_smmu_impl
*impl
)
312 struct qcom_smmu
*qsmmu
;
314 /* Check to make sure qcom_scm has finished probing */
315 if (!qcom_scm_is_available())
316 return ERR_PTR(-EPROBE_DEFER
);
318 qsmmu
= devm_krealloc(smmu
->dev
, smmu
, sizeof(*qsmmu
), GFP_KERNEL
);
320 return ERR_PTR(-ENOMEM
);
322 qsmmu
->smmu
.impl
= impl
;
327 static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match
[] = {
328 { .compatible
= "qcom,sc7180-smmu-500" },
329 { .compatible
= "qcom,sdm845-smmu-500" },
330 { .compatible
= "qcom,sm8150-smmu-500" },
331 { .compatible
= "qcom,sm8250-smmu-500" },
335 struct arm_smmu_device
*qcom_smmu_impl_init(struct arm_smmu_device
*smmu
)
337 const struct device_node
*np
= smmu
->dev
->of_node
;
339 if (of_match_node(qcom_smmu_impl_of_match
, np
))
340 return qcom_smmu_create(smmu
, &qcom_smmu_impl
);
342 if (of_device_is_compatible(np
, "qcom,adreno-smmu"))
343 return qcom_smmu_create(smmu
, &qcom_adreno_smmu_impl
);