1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
6 #include <linux/acpi.h>
7 #include <linux/adreno-smmu-priv.h>
8 #include <linux/delay.h>
9 #include <linux/of_device.h>
10 #include <linux/firmware/qcom/qcom_scm.h>
11 #include <linux/platform_device.h>
12 #include <linux/pm_runtime.h>
15 #include "arm-smmu-qcom.h"
17 #define QCOM_DUMMY_VAL -1
19 static struct qcom_smmu
*to_qcom_smmu(struct arm_smmu_device
*smmu
)
21 return container_of(smmu
, struct qcom_smmu
, smmu
);
24 static void qcom_smmu_tlb_sync(struct arm_smmu_device
*smmu
, int page
,
27 unsigned int spin_cnt
, delay
;
30 arm_smmu_writel(smmu
, page
, sync
, QCOM_DUMMY_VAL
);
31 for (delay
= 1; delay
< TLB_LOOP_TIMEOUT
; delay
*= 2) {
32 for (spin_cnt
= TLB_SPIN_COUNT
; spin_cnt
> 0; spin_cnt
--) {
33 reg
= arm_smmu_readl(smmu
, page
, status
);
34 if (!(reg
& ARM_SMMU_sTLBGSTATUS_GSACTIVE
))
41 qcom_smmu_tlb_sync_debug(smmu
);
44 static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device
*smmu
, int idx
,
47 struct qcom_smmu
*qsmmu
= to_qcom_smmu(smmu
);
50 * On the GPU device we want to process subsequent transactions after a
51 * fault to keep the GPU from hanging
53 reg
|= ARM_SMMU_SCTLR_HUPCF
;
55 if (qsmmu
->stall_enabled
& BIT(idx
))
56 reg
|= ARM_SMMU_SCTLR_CFCFG
;
58 arm_smmu_cb_write(smmu
, idx
, ARM_SMMU_CB_SCTLR
, reg
);
61 static void qcom_adreno_smmu_get_fault_info(const void *cookie
,
62 struct adreno_smmu_fault_info
*info
)
64 struct arm_smmu_domain
*smmu_domain
= (void *)cookie
;
65 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
66 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
68 info
->fsr
= arm_smmu_cb_read(smmu
, cfg
->cbndx
, ARM_SMMU_CB_FSR
);
69 info
->fsynr0
= arm_smmu_cb_read(smmu
, cfg
->cbndx
, ARM_SMMU_CB_FSYNR0
);
70 info
->fsynr1
= arm_smmu_cb_read(smmu
, cfg
->cbndx
, ARM_SMMU_CB_FSYNR1
);
71 info
->far
= arm_smmu_cb_readq(smmu
, cfg
->cbndx
, ARM_SMMU_CB_FAR
);
72 info
->cbfrsynra
= arm_smmu_gr1_read(smmu
, ARM_SMMU_GR1_CBFRSYNRA(cfg
->cbndx
));
73 info
->ttbr0
= arm_smmu_cb_readq(smmu
, cfg
->cbndx
, ARM_SMMU_CB_TTBR0
);
74 info
->contextidr
= arm_smmu_cb_read(smmu
, cfg
->cbndx
, ARM_SMMU_CB_CONTEXTIDR
);
77 static void qcom_adreno_smmu_set_stall(const void *cookie
, bool enabled
)
79 struct arm_smmu_domain
*smmu_domain
= (void *)cookie
;
80 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
81 struct qcom_smmu
*qsmmu
= to_qcom_smmu(smmu_domain
->smmu
);
84 qsmmu
->stall_enabled
|= BIT(cfg
->cbndx
);
86 qsmmu
->stall_enabled
&= ~BIT(cfg
->cbndx
);
89 static void qcom_adreno_smmu_resume_translation(const void *cookie
, bool terminate
)
91 struct arm_smmu_domain
*smmu_domain
= (void *)cookie
;
92 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
93 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
97 reg
|= ARM_SMMU_RESUME_TERMINATE
;
99 arm_smmu_cb_write(smmu
, cfg
->cbndx
, ARM_SMMU_CB_RESUME
, reg
);
102 #define QCOM_ADRENO_SMMU_GPU_SID 0
104 static bool qcom_adreno_smmu_is_gpu_device(struct device
*dev
)
106 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
110 * The GPU will always use SID 0 so that is a handy way to uniquely
111 * identify it and configure it for per-instance pagetables
113 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
114 u16 sid
= FIELD_GET(ARM_SMMU_SMR_ID
, fwspec
->ids
[i
]);
116 if (sid
== QCOM_ADRENO_SMMU_GPU_SID
)
123 static const struct io_pgtable_cfg
*qcom_adreno_smmu_get_ttbr1_cfg(
126 struct arm_smmu_domain
*smmu_domain
= (void *)cookie
;
127 struct io_pgtable
*pgtable
=
128 io_pgtable_ops_to_pgtable(smmu_domain
->pgtbl_ops
);
129 return &pgtable
->cfg
;
133 * Local implementation to configure TTBR0 with the specified pagetable config.
134 * The GPU driver will call this to enable TTBR0 when per-instance pagetables
138 static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie
,
139 const struct io_pgtable_cfg
*pgtbl_cfg
)
141 struct arm_smmu_domain
*smmu_domain
= (void *)cookie
;
142 struct io_pgtable
*pgtable
= io_pgtable_ops_to_pgtable(smmu_domain
->pgtbl_ops
);
143 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
144 struct arm_smmu_cb
*cb
= &smmu_domain
->smmu
->cbs
[cfg
->cbndx
];
146 /* The domain must have split pagetables already enabled */
147 if (cb
->tcr
[0] & ARM_SMMU_TCR_EPD1
)
150 /* If the pagetable config is NULL, disable TTBR0 */
152 /* Do nothing if it is already disabled */
153 if ((cb
->tcr
[0] & ARM_SMMU_TCR_EPD0
))
156 /* Set TCR to the original configuration */
157 cb
->tcr
[0] = arm_smmu_lpae_tcr(&pgtable
->cfg
);
158 cb
->ttbr
[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID
, cb
->cfg
->asid
);
160 u32 tcr
= cb
->tcr
[0];
162 /* Don't call this again if TTBR0 is already enabled */
163 if (!(cb
->tcr
[0] & ARM_SMMU_TCR_EPD0
))
166 tcr
|= arm_smmu_lpae_tcr(pgtbl_cfg
);
167 tcr
&= ~(ARM_SMMU_TCR_EPD0
| ARM_SMMU_TCR_EPD1
);
170 cb
->ttbr
[0] = pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
;
171 cb
->ttbr
[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID
, cb
->cfg
->asid
);
174 arm_smmu_write_context_bank(smmu_domain
->smmu
, cb
->cfg
->cbndx
);
179 static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain
*smmu_domain
,
180 struct arm_smmu_device
*smmu
,
181 struct device
*dev
, int start
)
186 * Assign context bank 0 to the GPU device so the GPU hardware can
189 if (qcom_adreno_smmu_is_gpu_device(dev
)) {
194 count
= smmu
->num_context_banks
;
197 return __arm_smmu_alloc_bitmap(smmu
->context_map
, start
, count
);
200 static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device
*smmu
)
202 const struct device_node
*np
= smmu
->dev
->of_node
;
204 if (of_device_is_compatible(np
, "qcom,msm8996-smmu-v2"))
210 static int qcom_adreno_smmu_init_context(struct arm_smmu_domain
*smmu_domain
,
211 struct io_pgtable_cfg
*pgtbl_cfg
, struct device
*dev
)
213 struct adreno_smmu_priv
*priv
;
215 smmu_domain
->cfg
.flush_walk_prefer_tlbiasid
= true;
217 /* Only enable split pagetables for the GPU device (SID 0) */
218 if (!qcom_adreno_smmu_is_gpu_device(dev
))
222 * All targets that use the qcom,adreno-smmu compatible string *should*
223 * be AARCH64 stage 1 but double check because the arm-smmu code assumes
224 * that is the case when the TTBR1 quirk is enabled
226 if (qcom_adreno_can_do_ttbr1(smmu_domain
->smmu
) &&
227 (smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
) &&
228 (smmu_domain
->cfg
.fmt
== ARM_SMMU_CTX_FMT_AARCH64
))
229 pgtbl_cfg
->quirks
|= IO_PGTABLE_QUIRK_ARM_TTBR1
;
232 * Initialize private interface with GPU:
235 priv
= dev_get_drvdata(dev
);
236 priv
->cookie
= smmu_domain
;
237 priv
->get_ttbr1_cfg
= qcom_adreno_smmu_get_ttbr1_cfg
;
238 priv
->set_ttbr0_cfg
= qcom_adreno_smmu_set_ttbr0_cfg
;
239 priv
->get_fault_info
= qcom_adreno_smmu_get_fault_info
;
240 priv
->set_stall
= qcom_adreno_smmu_set_stall
;
241 priv
->resume_translation
= qcom_adreno_smmu_resume_translation
;
246 static const struct of_device_id qcom_smmu_client_of_match
[] __maybe_unused
= {
247 { .compatible
= "qcom,adreno" },
248 { .compatible
= "qcom,adreno-gmu" },
249 { .compatible
= "qcom,mdp4" },
250 { .compatible
= "qcom,mdss" },
251 { .compatible
= "qcom,qcm2290-mdss" },
252 { .compatible
= "qcom,sc7180-mdss" },
253 { .compatible
= "qcom,sc7180-mss-pil" },
254 { .compatible
= "qcom,sc7280-mdss" },
255 { .compatible
= "qcom,sc7280-mss-pil" },
256 { .compatible
= "qcom,sc8180x-mdss" },
257 { .compatible
= "qcom,sc8280xp-mdss" },
258 { .compatible
= "qcom,sdm670-mdss" },
259 { .compatible
= "qcom,sdm845-mdss" },
260 { .compatible
= "qcom,sdm845-mss-pil" },
261 { .compatible
= "qcom,sm6350-mdss" },
262 { .compatible
= "qcom,sm6375-mdss" },
263 { .compatible
= "qcom,sm8150-mdss" },
264 { .compatible
= "qcom,sm8250-mdss" },
265 { .compatible
= "qcom,x1e80100-mdss" },
269 static int qcom_smmu_init_context(struct arm_smmu_domain
*smmu_domain
,
270 struct io_pgtable_cfg
*pgtbl_cfg
, struct device
*dev
)
272 smmu_domain
->cfg
.flush_walk_prefer_tlbiasid
= true;
277 static int qcom_smmu_cfg_probe(struct arm_smmu_device
*smmu
)
279 struct qcom_smmu
*qsmmu
= to_qcom_smmu(smmu
);
280 unsigned int last_s2cr
;
286 * MSM8998 LPASS SMMU reports 13 context banks, but accessing
287 * the last context bank crashes the system.
289 if (of_device_is_compatible(smmu
->dev
->of_node
, "qcom,msm8998-smmu-v2") &&
290 smmu
->num_context_banks
== 13) {
291 smmu
->num_context_banks
= 12;
292 } else if (of_device_is_compatible(smmu
->dev
->of_node
, "qcom,sdm630-smmu-v2")) {
293 if (smmu
->num_context_banks
== 21) /* SDM630 / SDM660 A2NOC SMMU */
294 smmu
->num_context_banks
= 7;
295 else if (smmu
->num_context_banks
== 14) /* SDM630 / SDM660 LPASS SMMU */
296 smmu
->num_context_banks
= 13;
300 * Some platforms support more than the Arm SMMU architected maximum of
301 * 128 stream matching groups. For unknown reasons, the additional
302 * groups don't exhibit the same behavior as the architected registers,
303 * so limit the groups to 128 until the behavior is fixed for the other
306 if (smmu
->num_mapping_groups
> 128) {
307 dev_notice(smmu
->dev
, "\tLimiting the stream matching groups to 128\n");
308 smmu
->num_mapping_groups
= 128;
311 last_s2cr
= ARM_SMMU_GR0_S2CR(smmu
->num_mapping_groups
- 1);
314 * With some firmware versions writes to S2CR of type FAULT are
315 * ignored, and writing BYPASS will end up written as FAULT in the
316 * register. Perform a write to S2CR to detect if this is the case and
317 * if so reserve a context bank to emulate bypass streams.
319 reg
= FIELD_PREP(ARM_SMMU_S2CR_TYPE
, S2CR_TYPE_BYPASS
) |
320 FIELD_PREP(ARM_SMMU_S2CR_CBNDX
, 0xff) |
321 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG
, S2CR_PRIVCFG_DEFAULT
);
322 arm_smmu_gr0_write(smmu
, last_s2cr
, reg
);
323 reg
= arm_smmu_gr0_read(smmu
, last_s2cr
);
324 if (FIELD_GET(ARM_SMMU_S2CR_TYPE
, reg
) != S2CR_TYPE_BYPASS
) {
325 qsmmu
->bypass_quirk
= true;
326 qsmmu
->bypass_cbndx
= smmu
->num_context_banks
- 1;
328 set_bit(qsmmu
->bypass_cbndx
, smmu
->context_map
);
330 arm_smmu_cb_write(smmu
, qsmmu
->bypass_cbndx
, ARM_SMMU_CB_SCTLR
, 0);
332 reg
= FIELD_PREP(ARM_SMMU_CBAR_TYPE
, CBAR_TYPE_S1_TRANS_S2_BYPASS
);
333 arm_smmu_gr1_write(smmu
, ARM_SMMU_GR1_CBAR(qsmmu
->bypass_cbndx
), reg
);
336 for (i
= 0; i
< smmu
->num_mapping_groups
; i
++) {
337 smr
= arm_smmu_gr0_read(smmu
, ARM_SMMU_GR0_SMR(i
));
339 if (FIELD_GET(ARM_SMMU_SMR_VALID
, smr
)) {
340 /* Ignore valid bit for SMR mask extraction. */
341 smr
&= ~ARM_SMMU_SMR_VALID
;
342 smmu
->smrs
[i
].id
= FIELD_GET(ARM_SMMU_SMR_ID
, smr
);
343 smmu
->smrs
[i
].mask
= FIELD_GET(ARM_SMMU_SMR_MASK
, smr
);
344 smmu
->smrs
[i
].valid
= true;
346 smmu
->s2crs
[i
].type
= S2CR_TYPE_BYPASS
;
347 smmu
->s2crs
[i
].privcfg
= S2CR_PRIVCFG_DEFAULT
;
348 smmu
->s2crs
[i
].cbndx
= 0xff;
355 static int qcom_adreno_smmuv2_cfg_probe(struct arm_smmu_device
*smmu
)
357 /* Support for 16K pages is advertised on some SoCs, but it doesn't seem to work */
358 smmu
->features
&= ~ARM_SMMU_FEAT_FMT_AARCH64_16K
;
360 /* TZ protects several last context banks, hide them from Linux */
361 if (of_device_is_compatible(smmu
->dev
->of_node
, "qcom,sdm630-smmu-v2") &&
362 smmu
->num_context_banks
== 5)
363 smmu
->num_context_banks
= 2;
368 static void qcom_smmu_write_s2cr(struct arm_smmu_device
*smmu
, int idx
)
370 struct arm_smmu_s2cr
*s2cr
= smmu
->s2crs
+ idx
;
371 struct qcom_smmu
*qsmmu
= to_qcom_smmu(smmu
);
372 u32 cbndx
= s2cr
->cbndx
;
373 u32 type
= s2cr
->type
;
376 if (qsmmu
->bypass_quirk
) {
377 if (type
== S2CR_TYPE_BYPASS
) {
379 * Firmware with quirky S2CR handling will substitute
380 * BYPASS writes with FAULT, so point the stream to the
381 * reserved context bank and ask for translation on the
384 type
= S2CR_TYPE_TRANS
;
385 cbndx
= qsmmu
->bypass_cbndx
;
386 } else if (type
== S2CR_TYPE_FAULT
) {
388 * Firmware with quirky S2CR handling will ignore FAULT
389 * writes, so trick it to write FAULT by asking for a
392 type
= S2CR_TYPE_BYPASS
;
397 reg
= FIELD_PREP(ARM_SMMU_S2CR_TYPE
, type
) |
398 FIELD_PREP(ARM_SMMU_S2CR_CBNDX
, cbndx
) |
399 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG
, s2cr
->privcfg
);
400 arm_smmu_gr0_write(smmu
, ARM_SMMU_GR0_S2CR(idx
), reg
);
403 static int qcom_smmu_def_domain_type(struct device
*dev
)
405 const struct of_device_id
*match
=
406 of_match_device(qcom_smmu_client_of_match
, dev
);
408 return match
? IOMMU_DOMAIN_IDENTITY
: 0;
411 static int qcom_sdm845_smmu500_reset(struct arm_smmu_device
*smmu
)
415 arm_mmu500_reset(smmu
);
418 * To address performance degradation in non-real time clients,
419 * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
420 * such as MTP and db845, whose firmwares implement secure monitor
421 * call handlers to turn on/off the wait-for-safe logic.
423 ret
= qcom_scm_qsmmu500_wait_safe_toggle(0);
425 dev_warn(smmu
->dev
, "Failed to turn off SAFE logic\n");
430 static const struct arm_smmu_impl qcom_smmu_v2_impl
= {
431 .init_context
= qcom_smmu_init_context
,
432 .cfg_probe
= qcom_smmu_cfg_probe
,
433 .def_domain_type
= qcom_smmu_def_domain_type
,
434 .write_s2cr
= qcom_smmu_write_s2cr
,
435 .tlb_sync
= qcom_smmu_tlb_sync
,
438 static const struct arm_smmu_impl qcom_smmu_500_impl
= {
439 .init_context
= qcom_smmu_init_context
,
440 .cfg_probe
= qcom_smmu_cfg_probe
,
441 .def_domain_type
= qcom_smmu_def_domain_type
,
442 .reset
= arm_mmu500_reset
,
443 .write_s2cr
= qcom_smmu_write_s2cr
,
444 .tlb_sync
= qcom_smmu_tlb_sync
,
445 #ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
446 .context_fault
= qcom_smmu_context_fault
,
447 .context_fault_needs_threaded_irq
= true,
451 static const struct arm_smmu_impl sdm845_smmu_500_impl
= {
452 .init_context
= qcom_smmu_init_context
,
453 .cfg_probe
= qcom_smmu_cfg_probe
,
454 .def_domain_type
= qcom_smmu_def_domain_type
,
455 .reset
= qcom_sdm845_smmu500_reset
,
456 .write_s2cr
= qcom_smmu_write_s2cr
,
457 .tlb_sync
= qcom_smmu_tlb_sync
,
458 #ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
459 .context_fault
= qcom_smmu_context_fault
,
460 .context_fault_needs_threaded_irq
= true,
464 static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl
= {
465 .init_context
= qcom_adreno_smmu_init_context
,
466 .cfg_probe
= qcom_adreno_smmuv2_cfg_probe
,
467 .def_domain_type
= qcom_smmu_def_domain_type
,
468 .alloc_context_bank
= qcom_adreno_smmu_alloc_context_bank
,
469 .write_sctlr
= qcom_adreno_smmu_write_sctlr
,
470 .tlb_sync
= qcom_smmu_tlb_sync
,
473 static const struct arm_smmu_impl qcom_adreno_smmu_500_impl
= {
474 .init_context
= qcom_adreno_smmu_init_context
,
475 .def_domain_type
= qcom_smmu_def_domain_type
,
476 .reset
= arm_mmu500_reset
,
477 .alloc_context_bank
= qcom_adreno_smmu_alloc_context_bank
,
478 .write_sctlr
= qcom_adreno_smmu_write_sctlr
,
479 .tlb_sync
= qcom_smmu_tlb_sync
,
482 static struct arm_smmu_device
*qcom_smmu_create(struct arm_smmu_device
*smmu
,
483 const struct qcom_smmu_match_data
*data
)
485 const struct device_node
*np
= smmu
->dev
->of_node
;
486 const struct arm_smmu_impl
*impl
;
487 struct qcom_smmu
*qsmmu
;
490 return ERR_PTR(-EINVAL
);
492 if (np
&& of_device_is_compatible(np
, "qcom,adreno-smmu"))
493 impl
= data
->adreno_impl
;
500 /* Check to make sure qcom_scm has finished probing */
501 if (!qcom_scm_is_available())
502 return ERR_PTR(dev_err_probe(smmu
->dev
, -EPROBE_DEFER
,
503 "qcom_scm not ready\n"));
505 qsmmu
= devm_krealloc(smmu
->dev
, smmu
, sizeof(*qsmmu
), GFP_KERNEL
);
507 return ERR_PTR(-ENOMEM
);
509 qsmmu
->smmu
.impl
= impl
;
510 qsmmu
->cfg
= data
->cfg
;
515 /* Implementation Defined Register Space 0 register offsets */
516 static const u32 qcom_smmu_impl0_reg_offset
[] = {
517 [QCOM_SMMU_TBU_PWR_STATUS
] = 0x2204,
518 [QCOM_SMMU_STATS_SYNC_INV_TBU_ACK
] = 0x25dc,
519 [QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR
] = 0x2670,
522 static const struct qcom_smmu_config qcom_smmu_impl0_cfg
= {
523 .reg_offset
= qcom_smmu_impl0_reg_offset
,
527 * It is not yet possible to use MDP SMMU with the bypass quirk on the msm8996,
528 * there are not enough context banks.
530 static const struct qcom_smmu_match_data msm8996_smmu_data
= {
532 .adreno_impl
= &qcom_adreno_smmu_v2_impl
,
535 static const struct qcom_smmu_match_data qcom_smmu_v2_data
= {
536 .impl
= &qcom_smmu_v2_impl
,
537 .adreno_impl
= &qcom_adreno_smmu_v2_impl
,
540 static const struct qcom_smmu_match_data sdm845_smmu_500_data
= {
541 .impl
= &sdm845_smmu_500_impl
,
543 * No need for adreno impl here. On sdm845 the Adreno SMMU is handled
544 * by the separate sdm845-smmu-v2 device.
546 /* Also no debug configuration. */
549 static const struct qcom_smmu_match_data qcom_smmu_500_impl0_data
= {
550 .impl
= &qcom_smmu_500_impl
,
551 .adreno_impl
= &qcom_adreno_smmu_500_impl
,
552 .cfg
= &qcom_smmu_impl0_cfg
,
556 * Do not add any more qcom,SOC-smmu-500 entries to this list, unless they need
557 * special handling and can not be covered by the qcom,smmu-500 entry.
559 static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match
[] = {
560 { .compatible
= "qcom,msm8996-smmu-v2", .data
= &msm8996_smmu_data
},
561 { .compatible
= "qcom,msm8998-smmu-v2", .data
= &qcom_smmu_v2_data
},
562 { .compatible
= "qcom,qcm2290-smmu-500", .data
= &qcom_smmu_500_impl0_data
},
563 { .compatible
= "qcom,qdu1000-smmu-500", .data
= &qcom_smmu_500_impl0_data
},
564 { .compatible
= "qcom,sc7180-smmu-500", .data
= &qcom_smmu_500_impl0_data
},
565 { .compatible
= "qcom,sc7180-smmu-v2", .data
= &qcom_smmu_v2_data
},
566 { .compatible
= "qcom,sc7280-smmu-500", .data
= &qcom_smmu_500_impl0_data
},
567 { .compatible
= "qcom,sc8180x-smmu-500", .data
= &qcom_smmu_500_impl0_data
},
568 { .compatible
= "qcom,sc8280xp-smmu-500", .data
= &qcom_smmu_500_impl0_data
},
569 { .compatible
= "qcom,sdm630-smmu-v2", .data
= &qcom_smmu_v2_data
},
570 { .compatible
= "qcom,sdm845-smmu-v2", .data
= &qcom_smmu_v2_data
},
571 { .compatible
= "qcom,sdm845-smmu-500", .data
= &sdm845_smmu_500_data
},
572 { .compatible
= "qcom,sm6115-smmu-500", .data
= &qcom_smmu_500_impl0_data
},
573 { .compatible
= "qcom,sm6125-smmu-500", .data
= &qcom_smmu_500_impl0_data
},
574 { .compatible
= "qcom,sm6350-smmu-v2", .data
= &qcom_smmu_v2_data
},
575 { .compatible
= "qcom,sm6350-smmu-500", .data
= &qcom_smmu_500_impl0_data
},
576 { .compatible
= "qcom,sm6375-smmu-v2", .data
= &qcom_smmu_v2_data
},
577 { .compatible
= "qcom,sm6375-smmu-500", .data
= &qcom_smmu_500_impl0_data
},
578 { .compatible
= "qcom,sm7150-smmu-v2", .data
= &qcom_smmu_v2_data
},
579 { .compatible
= "qcom,sm8150-smmu-500", .data
= &qcom_smmu_500_impl0_data
},
580 { .compatible
= "qcom,sm8250-smmu-500", .data
= &qcom_smmu_500_impl0_data
},
581 { .compatible
= "qcom,sm8350-smmu-500", .data
= &qcom_smmu_500_impl0_data
},
582 { .compatible
= "qcom,sm8450-smmu-500", .data
= &qcom_smmu_500_impl0_data
},
583 { .compatible
= "qcom,smmu-500", .data
= &qcom_smmu_500_impl0_data
},
588 static struct acpi_platform_list qcom_acpi_platlist
[] = {
589 { "LENOVO", "CB-01 ", 0x8180, ACPI_SIG_IORT
, equal
, "QCOM SMMU" },
590 { "QCOM ", "QCOMEDK2", 0x8180, ACPI_SIG_IORT
, equal
, "QCOM SMMU" },
595 static int qcom_smmu_tbu_probe(struct platform_device
*pdev
)
597 struct device
*dev
= &pdev
->dev
;
600 if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM_DEBUG
)) {
601 ret
= qcom_tbu_probe(pdev
);
606 if (dev
->pm_domain
) {
607 pm_runtime_set_active(dev
);
608 pm_runtime_enable(dev
);
614 static const struct of_device_id qcom_smmu_tbu_of_match
[] = {
615 { .compatible
= "qcom,sc7280-tbu" },
616 { .compatible
= "qcom,sdm845-tbu" },
620 static struct platform_driver qcom_smmu_tbu_driver
= {
623 .of_match_table
= qcom_smmu_tbu_of_match
,
625 .probe
= qcom_smmu_tbu_probe
,
628 struct arm_smmu_device
*qcom_smmu_impl_init(struct arm_smmu_device
*smmu
)
630 const struct device_node
*np
= smmu
->dev
->of_node
;
631 const struct of_device_id
*match
;
632 static u8 tbu_registered
;
634 if (!tbu_registered
++)
635 platform_driver_register(&qcom_smmu_tbu_driver
);
639 /* Match platform for ACPI boot */
640 if (acpi_match_platform_list(qcom_acpi_platlist
) >= 0)
641 return qcom_smmu_create(smmu
, &qcom_smmu_500_impl0_data
);
645 match
= of_match_node(qcom_smmu_impl_of_match
, np
);
647 return qcom_smmu_create(smmu
, match
->data
);
650 * If you hit this WARN_ON() you are missing an entry in the
651 * qcom_smmu_impl_of_match[] table, and GPU per-process page-
652 * tables will be broken.
654 WARN(of_device_is_compatible(np
, "qcom,adreno-smmu"),
655 "Missing qcom_smmu_impl_of_match entry for: %s",
656 dev_name(smmu
->dev
));