1 // SPDX-License-Identifier: GPL-2.0-only
3 * IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c
5 * Copyright (C) 2013 ARM Limited
6 * Copyright (C) 2017 Red Hat
9 #include <linux/atomic.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
17 #include <linux/io-64-nonatomic-hi-lo.h>
18 #include <linux/io-pgtable.h>
19 #include <linux/iommu.h>
20 #include <linux/iopoll.h>
21 #include <linux/kconfig.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
25 #include <linux/of_platform.h>
26 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/firmware/qcom/qcom_scm.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
35 #define SMMU_INTR_SEL_NS 0x2000
44 struct qcom_iommu_ctx
;
46 struct qcom_iommu_dev
{
47 /* IOMMU core code handle */
48 struct iommu_device iommu
;
50 struct clk_bulk_data clks
[CLK_NUM
];
51 void __iomem
*local_base
;
54 struct qcom_iommu_ctx
*ctxs
[]; /* indexed by asid */
57 struct qcom_iommu_ctx
{
62 u8 asid
; /* asid and ctx bank # are 1:1 */
63 struct iommu_domain
*domain
;
66 struct qcom_iommu_domain
{
67 struct io_pgtable_ops
*pgtbl_ops
;
68 spinlock_t pgtbl_lock
;
69 struct mutex init_mutex
; /* Protects iommu pointer */
70 struct iommu_domain domain
;
71 struct qcom_iommu_dev
*iommu
;
72 struct iommu_fwspec
*fwspec
;
75 static struct qcom_iommu_domain
*to_qcom_iommu_domain(struct iommu_domain
*dom
)
77 return container_of(dom
, struct qcom_iommu_domain
, domain
);
80 static const struct iommu_ops qcom_iommu_ops
;
82 static struct qcom_iommu_ctx
* to_ctx(struct qcom_iommu_domain
*d
, unsigned asid
)
84 struct qcom_iommu_dev
*qcom_iommu
= d
->iommu
;
87 return qcom_iommu
->ctxs
[asid
];
91 iommu_writel(struct qcom_iommu_ctx
*ctx
, unsigned reg
, u32 val
)
93 writel_relaxed(val
, ctx
->base
+ reg
);
97 iommu_writeq(struct qcom_iommu_ctx
*ctx
, unsigned reg
, u64 val
)
99 writeq_relaxed(val
, ctx
->base
+ reg
);
103 iommu_readl(struct qcom_iommu_ctx
*ctx
, unsigned reg
)
105 return readl_relaxed(ctx
->base
+ reg
);
109 iommu_readq(struct qcom_iommu_ctx
*ctx
, unsigned reg
)
111 return readq_relaxed(ctx
->base
+ reg
);
114 static void qcom_iommu_tlb_sync(void *cookie
)
116 struct qcom_iommu_domain
*qcom_domain
= cookie
;
117 struct iommu_fwspec
*fwspec
= qcom_domain
->fwspec
;
120 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
121 struct qcom_iommu_ctx
*ctx
= to_ctx(qcom_domain
, fwspec
->ids
[i
]);
122 unsigned int val
, ret
;
124 iommu_writel(ctx
, ARM_SMMU_CB_TLBSYNC
, 0);
126 ret
= readl_poll_timeout(ctx
->base
+ ARM_SMMU_CB_TLBSTATUS
, val
,
127 (val
& 0x1) == 0, 0, 5000000);
129 dev_err(ctx
->dev
, "timeout waiting for TLB SYNC\n");
133 static void qcom_iommu_tlb_inv_context(void *cookie
)
135 struct qcom_iommu_domain
*qcom_domain
= cookie
;
136 struct iommu_fwspec
*fwspec
= qcom_domain
->fwspec
;
139 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
140 struct qcom_iommu_ctx
*ctx
= to_ctx(qcom_domain
, fwspec
->ids
[i
]);
141 iommu_writel(ctx
, ARM_SMMU_CB_S1_TLBIASID
, ctx
->asid
);
144 qcom_iommu_tlb_sync(cookie
);
147 static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova
, size_t size
,
148 size_t granule
, bool leaf
, void *cookie
)
150 struct qcom_iommu_domain
*qcom_domain
= cookie
;
151 struct iommu_fwspec
*fwspec
= qcom_domain
->fwspec
;
154 reg
= leaf
? ARM_SMMU_CB_S1_TLBIVAL
: ARM_SMMU_CB_S1_TLBIVA
;
156 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
157 struct qcom_iommu_ctx
*ctx
= to_ctx(qcom_domain
, fwspec
->ids
[i
]);
160 iova
= (iova
>> 12) << 12;
163 iommu_writel(ctx
, reg
, iova
);
165 } while (s
-= granule
);
169 static void qcom_iommu_tlb_flush_walk(unsigned long iova
, size_t size
,
170 size_t granule
, void *cookie
)
172 qcom_iommu_tlb_inv_range_nosync(iova
, size
, granule
, false, cookie
);
173 qcom_iommu_tlb_sync(cookie
);
176 static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather
*gather
,
177 unsigned long iova
, size_t granule
,
180 qcom_iommu_tlb_inv_range_nosync(iova
, granule
, granule
, true, cookie
);
183 static const struct iommu_flush_ops qcom_flush_ops
= {
184 .tlb_flush_all
= qcom_iommu_tlb_inv_context
,
185 .tlb_flush_walk
= qcom_iommu_tlb_flush_walk
,
186 .tlb_add_page
= qcom_iommu_tlb_add_page
,
189 static irqreturn_t
qcom_iommu_fault(int irq
, void *dev
)
191 struct qcom_iommu_ctx
*ctx
= dev
;
195 fsr
= iommu_readl(ctx
, ARM_SMMU_CB_FSR
);
197 if (!(fsr
& ARM_SMMU_CB_FSR_FAULT
))
200 fsynr
= iommu_readl(ctx
, ARM_SMMU_CB_FSYNR0
);
201 iova
= iommu_readq(ctx
, ARM_SMMU_CB_FAR
);
203 if (!report_iommu_fault(ctx
->domain
, ctx
->dev
, iova
, 0)) {
204 dev_err_ratelimited(ctx
->dev
,
205 "Unhandled context fault: fsr=0x%x, "
206 "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
207 fsr
, iova
, fsynr
, ctx
->asid
);
210 iommu_writel(ctx
, ARM_SMMU_CB_FSR
, fsr
);
211 iommu_writel(ctx
, ARM_SMMU_CB_RESUME
, ARM_SMMU_RESUME_TERMINATE
);
216 static int qcom_iommu_init_domain(struct iommu_domain
*domain
,
217 struct qcom_iommu_dev
*qcom_iommu
,
220 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
221 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
222 struct io_pgtable_ops
*pgtbl_ops
;
223 struct io_pgtable_cfg pgtbl_cfg
;
227 mutex_lock(&qcom_domain
->init_mutex
);
228 if (qcom_domain
->iommu
)
231 pgtbl_cfg
= (struct io_pgtable_cfg
) {
232 .pgsize_bitmap
= qcom_iommu_ops
.pgsize_bitmap
,
235 .tlb
= &qcom_flush_ops
,
236 .iommu_dev
= qcom_iommu
->dev
,
239 qcom_domain
->iommu
= qcom_iommu
;
240 qcom_domain
->fwspec
= fwspec
;
242 pgtbl_ops
= alloc_io_pgtable_ops(ARM_32_LPAE_S1
, &pgtbl_cfg
, qcom_domain
);
244 dev_err(qcom_iommu
->dev
, "failed to allocate pagetable ops\n");
246 goto out_clear_iommu
;
249 /* Update the domain's page sizes to reflect the page table format */
250 domain
->pgsize_bitmap
= pgtbl_cfg
.pgsize_bitmap
;
251 domain
->geometry
.aperture_end
= (1ULL << pgtbl_cfg
.ias
) - 1;
252 domain
->geometry
.force_aperture
= true;
254 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
255 struct qcom_iommu_ctx
*ctx
= to_ctx(qcom_domain
, fwspec
->ids
[i
]);
257 if (!ctx
->secure_init
) {
258 ret
= qcom_scm_restore_sec_cfg(qcom_iommu
->sec_id
, ctx
->asid
);
260 dev_err(qcom_iommu
->dev
, "secure init failed: %d\n", ret
);
261 goto out_clear_iommu
;
263 ctx
->secure_init
= true;
266 /* Secured QSMMU-500/QSMMU-v2 contexts cannot be programmed */
267 if (ctx
->secured_ctx
) {
268 ctx
->domain
= domain
;
272 /* Disable context bank before programming */
273 iommu_writel(ctx
, ARM_SMMU_CB_SCTLR
, 0);
275 /* Clear context bank fault address fault status registers */
276 iommu_writel(ctx
, ARM_SMMU_CB_FAR
, 0);
277 iommu_writel(ctx
, ARM_SMMU_CB_FSR
, ARM_SMMU_CB_FSR_FAULT
);
280 iommu_writeq(ctx
, ARM_SMMU_CB_TTBR0
,
281 pgtbl_cfg
.arm_lpae_s1_cfg
.ttbr
|
282 FIELD_PREP(ARM_SMMU_TTBRn_ASID
, ctx
->asid
));
283 iommu_writeq(ctx
, ARM_SMMU_CB_TTBR1
, 0);
286 iommu_writel(ctx
, ARM_SMMU_CB_TCR2
,
287 arm_smmu_lpae_tcr2(&pgtbl_cfg
));
288 iommu_writel(ctx
, ARM_SMMU_CB_TCR
,
289 arm_smmu_lpae_tcr(&pgtbl_cfg
) | ARM_SMMU_TCR_EAE
);
291 /* MAIRs (stage-1 only) */
292 iommu_writel(ctx
, ARM_SMMU_CB_S1_MAIR0
,
293 pgtbl_cfg
.arm_lpae_s1_cfg
.mair
);
294 iommu_writel(ctx
, ARM_SMMU_CB_S1_MAIR1
,
295 pgtbl_cfg
.arm_lpae_s1_cfg
.mair
>> 32);
298 reg
= ARM_SMMU_SCTLR_CFIE
| ARM_SMMU_SCTLR_CFRE
|
299 ARM_SMMU_SCTLR_AFE
| ARM_SMMU_SCTLR_TRE
|
300 ARM_SMMU_SCTLR_M
| ARM_SMMU_SCTLR_S1_ASIDPNE
|
301 ARM_SMMU_SCTLR_CFCFG
;
303 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
304 reg
|= ARM_SMMU_SCTLR_E
;
306 iommu_writel(ctx
, ARM_SMMU_CB_SCTLR
, reg
);
308 ctx
->domain
= domain
;
311 mutex_unlock(&qcom_domain
->init_mutex
);
313 /* Publish page table ops for map/unmap */
314 qcom_domain
->pgtbl_ops
= pgtbl_ops
;
319 qcom_domain
->iommu
= NULL
;
321 mutex_unlock(&qcom_domain
->init_mutex
);
325 static struct iommu_domain
*qcom_iommu_domain_alloc_paging(struct device
*dev
)
327 struct qcom_iommu_domain
*qcom_domain
;
330 * Allocate the domain and initialise some of its data structures.
331 * We can't really do anything meaningful until we've added a
334 qcom_domain
= kzalloc(sizeof(*qcom_domain
), GFP_KERNEL
);
338 mutex_init(&qcom_domain
->init_mutex
);
339 spin_lock_init(&qcom_domain
->pgtbl_lock
);
341 return &qcom_domain
->domain
;
344 static void qcom_iommu_domain_free(struct iommu_domain
*domain
)
346 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
348 if (qcom_domain
->iommu
) {
350 * NOTE: unmap can be called after client device is powered
351 * off, for example, with GPUs or anything involving dma-buf.
352 * So we cannot rely on the device_link. Make sure the IOMMU
353 * is on to avoid unclocked accesses in the TLB inv path:
355 pm_runtime_get_sync(qcom_domain
->iommu
->dev
);
356 free_io_pgtable_ops(qcom_domain
->pgtbl_ops
);
357 pm_runtime_put_sync(qcom_domain
->iommu
->dev
);
363 static int qcom_iommu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
365 struct qcom_iommu_dev
*qcom_iommu
= dev_iommu_priv_get(dev
);
366 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
370 dev_err(dev
, "cannot attach to IOMMU, is it on the same bus?\n");
374 /* Ensure that the domain is finalized */
375 pm_runtime_get_sync(qcom_iommu
->dev
);
376 ret
= qcom_iommu_init_domain(domain
, qcom_iommu
, dev
);
377 pm_runtime_put_sync(qcom_iommu
->dev
);
382 * Sanity check the domain. We don't support domains across
385 if (qcom_domain
->iommu
!= qcom_iommu
)
391 static int qcom_iommu_identity_attach(struct iommu_domain
*identity_domain
,
394 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
395 struct qcom_iommu_domain
*qcom_domain
;
396 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
397 struct qcom_iommu_dev
*qcom_iommu
= dev_iommu_priv_get(dev
);
400 if (domain
== identity_domain
|| !domain
)
403 qcom_domain
= to_qcom_iommu_domain(domain
);
404 if (WARN_ON(!qcom_domain
->iommu
))
407 pm_runtime_get_sync(qcom_iommu
->dev
);
408 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
409 struct qcom_iommu_ctx
*ctx
= to_ctx(qcom_domain
, fwspec
->ids
[i
]);
411 /* Disable the context bank: */
412 iommu_writel(ctx
, ARM_SMMU_CB_SCTLR
, 0);
416 pm_runtime_put_sync(qcom_iommu
->dev
);
420 static struct iommu_domain_ops qcom_iommu_identity_ops
= {
421 .attach_dev
= qcom_iommu_identity_attach
,
424 static struct iommu_domain qcom_iommu_identity_domain
= {
425 .type
= IOMMU_DOMAIN_IDENTITY
,
426 .ops
= &qcom_iommu_identity_ops
,
429 static int qcom_iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
430 phys_addr_t paddr
, size_t pgsize
, size_t pgcount
,
431 int prot
, gfp_t gfp
, size_t *mapped
)
435 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
436 struct io_pgtable_ops
*ops
= qcom_domain
->pgtbl_ops
;
441 spin_lock_irqsave(&qcom_domain
->pgtbl_lock
, flags
);
442 ret
= ops
->map_pages(ops
, iova
, paddr
, pgsize
, pgcount
, prot
, GFP_ATOMIC
, mapped
);
443 spin_unlock_irqrestore(&qcom_domain
->pgtbl_lock
, flags
);
447 static size_t qcom_iommu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
448 size_t pgsize
, size_t pgcount
,
449 struct iommu_iotlb_gather
*gather
)
453 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
454 struct io_pgtable_ops
*ops
= qcom_domain
->pgtbl_ops
;
459 /* NOTE: unmap can be called after client device is powered off,
460 * for example, with GPUs or anything involving dma-buf. So we
461 * cannot rely on the device_link. Make sure the IOMMU is on to
462 * avoid unclocked accesses in the TLB inv path:
464 pm_runtime_get_sync(qcom_domain
->iommu
->dev
);
465 spin_lock_irqsave(&qcom_domain
->pgtbl_lock
, flags
);
466 ret
= ops
->unmap_pages(ops
, iova
, pgsize
, pgcount
, gather
);
467 spin_unlock_irqrestore(&qcom_domain
->pgtbl_lock
, flags
);
468 pm_runtime_put_sync(qcom_domain
->iommu
->dev
);
473 static void qcom_iommu_flush_iotlb_all(struct iommu_domain
*domain
)
475 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
476 struct io_pgtable
*pgtable
= container_of(qcom_domain
->pgtbl_ops
,
477 struct io_pgtable
, ops
);
478 if (!qcom_domain
->pgtbl_ops
)
481 pm_runtime_get_sync(qcom_domain
->iommu
->dev
);
482 qcom_iommu_tlb_sync(pgtable
->cookie
);
483 pm_runtime_put_sync(qcom_domain
->iommu
->dev
);
486 static void qcom_iommu_iotlb_sync(struct iommu_domain
*domain
,
487 struct iommu_iotlb_gather
*gather
)
489 qcom_iommu_flush_iotlb_all(domain
);
492 static phys_addr_t
qcom_iommu_iova_to_phys(struct iommu_domain
*domain
,
497 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
498 struct io_pgtable_ops
*ops
= qcom_domain
->pgtbl_ops
;
503 spin_lock_irqsave(&qcom_domain
->pgtbl_lock
, flags
);
504 ret
= ops
->iova_to_phys(ops
, iova
);
505 spin_unlock_irqrestore(&qcom_domain
->pgtbl_lock
, flags
);
510 static bool qcom_iommu_capable(struct device
*dev
, enum iommu_cap cap
)
513 case IOMMU_CAP_CACHE_COHERENCY
:
515 * Return true here as the SMMU can always send out coherent
519 case IOMMU_CAP_NOEXEC
:
526 static struct iommu_device
*qcom_iommu_probe_device(struct device
*dev
)
528 struct qcom_iommu_dev
*qcom_iommu
= dev_iommu_priv_get(dev
);
529 struct device_link
*link
;
532 return ERR_PTR(-ENODEV
);
535 * Establish the link between iommu and master, so that the
536 * iommu gets runtime enabled/disabled as per the master's
539 link
= device_link_add(dev
, qcom_iommu
->dev
, DL_FLAG_PM_RUNTIME
);
541 dev_err(qcom_iommu
->dev
, "Unable to create device link between %s and %s\n",
542 dev_name(qcom_iommu
->dev
), dev_name(dev
));
543 return ERR_PTR(-ENODEV
);
546 return &qcom_iommu
->iommu
;
549 static int qcom_iommu_of_xlate(struct device
*dev
,
550 const struct of_phandle_args
*args
)
552 struct qcom_iommu_dev
*qcom_iommu
;
553 struct platform_device
*iommu_pdev
;
554 unsigned asid
= args
->args
[0];
556 if (args
->args_count
!= 1) {
557 dev_err(dev
, "incorrect number of iommu params found for %s "
558 "(found %d, expected 1)\n",
559 args
->np
->full_name
, args
->args_count
);
563 iommu_pdev
= of_find_device_by_node(args
->np
);
564 if (WARN_ON(!iommu_pdev
))
567 qcom_iommu
= platform_get_drvdata(iommu_pdev
);
569 /* make sure the asid specified in dt is valid, so we don't have
570 * to sanity check this elsewhere:
572 if (WARN_ON(asid
> qcom_iommu
->max_asid
) ||
573 WARN_ON(qcom_iommu
->ctxs
[asid
] == NULL
)) {
574 put_device(&iommu_pdev
->dev
);
578 if (!dev_iommu_priv_get(dev
)) {
579 dev_iommu_priv_set(dev
, qcom_iommu
);
581 /* make sure devices iommus dt node isn't referring to
582 * multiple different iommu devices. Multiple context
583 * banks are ok, but multiple devices are not:
585 if (WARN_ON(qcom_iommu
!= dev_iommu_priv_get(dev
))) {
586 put_device(&iommu_pdev
->dev
);
591 return iommu_fwspec_add_ids(dev
, &asid
, 1);
594 static const struct iommu_ops qcom_iommu_ops
= {
595 .identity_domain
= &qcom_iommu_identity_domain
,
596 .capable
= qcom_iommu_capable
,
597 .domain_alloc_paging
= qcom_iommu_domain_alloc_paging
,
598 .probe_device
= qcom_iommu_probe_device
,
599 .device_group
= generic_device_group
,
600 .of_xlate
= qcom_iommu_of_xlate
,
601 .pgsize_bitmap
= SZ_4K
| SZ_64K
| SZ_1M
| SZ_16M
,
602 .default_domain_ops
= &(const struct iommu_domain_ops
) {
603 .attach_dev
= qcom_iommu_attach_dev
,
604 .map_pages
= qcom_iommu_map
,
605 .unmap_pages
= qcom_iommu_unmap
,
606 .flush_iotlb_all
= qcom_iommu_flush_iotlb_all
,
607 .iotlb_sync
= qcom_iommu_iotlb_sync
,
608 .iova_to_phys
= qcom_iommu_iova_to_phys
,
609 .free
= qcom_iommu_domain_free
,
613 static int qcom_iommu_sec_ptbl_init(struct device
*dev
)
616 unsigned int spare
= 0;
620 static bool allocated
= false;
626 ret
= qcom_scm_iommu_secure_ptbl_size(spare
, &psize
);
628 dev_err(dev
, "failed to get iommu secure pgtable size (%d)\n",
633 dev_info(dev
, "iommu sec: pgtable size: %zu\n", psize
);
635 attrs
= DMA_ATTR_NO_KERNEL_MAPPING
;
637 cpu_addr
= dma_alloc_attrs(dev
, psize
, &paddr
, GFP_KERNEL
, attrs
);
639 dev_err(dev
, "failed to allocate %zu bytes for pgtable\n",
644 ret
= qcom_scm_iommu_secure_ptbl_init(paddr
, psize
, spare
);
646 dev_err(dev
, "failed to init iommu pgtable (%d)\n", ret
);
654 dma_free_attrs(dev
, psize
, cpu_addr
, paddr
, attrs
);
658 static int get_asid(const struct device_node
*np
)
663 /* read the "reg" property directly to get the relative address
664 * of the context bank, and calculate the asid from that:
666 if (of_property_read_u32_index(np
, "reg", 0, ®
))
670 * Context banks are 0x1000 apart but, in some cases, the ASID
671 * number doesn't match to this logic and needs to be passed
672 * from the DT configuration explicitly.
674 if (!of_property_read_u32(np
, "qcom,ctx-asid", &val
))
682 static int qcom_iommu_ctx_probe(struct platform_device
*pdev
)
684 struct qcom_iommu_ctx
*ctx
;
685 struct device
*dev
= &pdev
->dev
;
686 struct qcom_iommu_dev
*qcom_iommu
= dev_get_drvdata(dev
->parent
);
689 ctx
= devm_kzalloc(dev
, sizeof(*ctx
), GFP_KERNEL
);
694 platform_set_drvdata(pdev
, ctx
);
696 ctx
->base
= devm_platform_ioremap_resource(pdev
, 0);
697 if (IS_ERR(ctx
->base
))
698 return PTR_ERR(ctx
->base
);
700 irq
= platform_get_irq(pdev
, 0);
704 if (of_device_is_compatible(dev
->of_node
, "qcom,msm-iommu-v2-sec"))
705 ctx
->secured_ctx
= true;
707 /* clear IRQs before registering fault handler, just in case the
708 * boot-loader left us a surprise:
710 if (!ctx
->secured_ctx
)
711 iommu_writel(ctx
, ARM_SMMU_CB_FSR
, iommu_readl(ctx
, ARM_SMMU_CB_FSR
));
713 ret
= devm_request_irq(dev
, irq
,
719 dev_err(dev
, "failed to request IRQ %u\n", irq
);
723 ret
= get_asid(dev
->of_node
);
725 dev_err(dev
, "missing reg property\n");
731 dev_dbg(dev
, "found asid %u\n", ctx
->asid
);
733 qcom_iommu
->ctxs
[ctx
->asid
] = ctx
;
738 static void qcom_iommu_ctx_remove(struct platform_device
*pdev
)
740 struct qcom_iommu_dev
*qcom_iommu
= dev_get_drvdata(pdev
->dev
.parent
);
741 struct qcom_iommu_ctx
*ctx
= platform_get_drvdata(pdev
);
743 platform_set_drvdata(pdev
, NULL
);
745 qcom_iommu
->ctxs
[ctx
->asid
] = NULL
;
748 static const struct of_device_id ctx_of_match
[] = {
749 { .compatible
= "qcom,msm-iommu-v1-ns" },
750 { .compatible
= "qcom,msm-iommu-v1-sec" },
751 { .compatible
= "qcom,msm-iommu-v2-ns" },
752 { .compatible
= "qcom,msm-iommu-v2-sec" },
756 static struct platform_driver qcom_iommu_ctx_driver
= {
758 .name
= "qcom-iommu-ctx",
759 .of_match_table
= ctx_of_match
,
761 .probe
= qcom_iommu_ctx_probe
,
762 .remove
= qcom_iommu_ctx_remove
,
765 static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev
*qcom_iommu
)
767 struct device_node
*child
;
769 for_each_child_of_node(qcom_iommu
->dev
->of_node
, child
) {
770 if (of_device_is_compatible(child
, "qcom,msm-iommu-v1-sec") ||
771 of_device_is_compatible(child
, "qcom,msm-iommu-v2-sec")) {
780 static int qcom_iommu_device_probe(struct platform_device
*pdev
)
782 struct device_node
*child
;
783 struct qcom_iommu_dev
*qcom_iommu
;
784 struct device
*dev
= &pdev
->dev
;
785 struct resource
*res
;
787 int ret
, max_asid
= 0;
789 /* find the max asid (which is 1:1 to ctx bank idx), so we know how
790 * many child ctx devices we have:
792 for_each_child_of_node(dev
->of_node
, child
)
793 max_asid
= max(max_asid
, get_asid(child
));
795 qcom_iommu
= devm_kzalloc(dev
, struct_size(qcom_iommu
, ctxs
, max_asid
+ 1),
799 qcom_iommu
->max_asid
= max_asid
;
800 qcom_iommu
->dev
= dev
;
802 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
804 qcom_iommu
->local_base
= devm_ioremap_resource(dev
, res
);
805 if (IS_ERR(qcom_iommu
->local_base
))
806 return PTR_ERR(qcom_iommu
->local_base
);
809 clk
= devm_clk_get(dev
, "iface");
811 dev_err(dev
, "failed to get iface clock\n");
814 qcom_iommu
->clks
[CLK_IFACE
].clk
= clk
;
816 clk
= devm_clk_get(dev
, "bus");
818 dev_err(dev
, "failed to get bus clock\n");
821 qcom_iommu
->clks
[CLK_BUS
].clk
= clk
;
823 clk
= devm_clk_get_optional(dev
, "tbu");
825 dev_err(dev
, "failed to get tbu clock\n");
828 qcom_iommu
->clks
[CLK_TBU
].clk
= clk
;
830 if (of_property_read_u32(dev
->of_node
, "qcom,iommu-secure-id",
831 &qcom_iommu
->sec_id
)) {
832 dev_err(dev
, "missing qcom,iommu-secure-id property\n");
836 if (qcom_iommu_has_secure_context(qcom_iommu
)) {
837 ret
= qcom_iommu_sec_ptbl_init(dev
);
839 dev_err(dev
, "cannot init secure pg table(%d)\n", ret
);
844 platform_set_drvdata(pdev
, qcom_iommu
);
846 pm_runtime_enable(dev
);
848 /* register context bank devices, which are child nodes: */
849 ret
= devm_of_platform_populate(dev
);
851 dev_err(dev
, "Failed to populate iommu contexts\n");
855 ret
= iommu_device_sysfs_add(&qcom_iommu
->iommu
, dev
, NULL
,
858 dev_err(dev
, "Failed to register iommu in sysfs\n");
862 ret
= iommu_device_register(&qcom_iommu
->iommu
, &qcom_iommu_ops
, dev
);
864 dev_err(dev
, "Failed to register iommu\n");
868 if (qcom_iommu
->local_base
) {
869 pm_runtime_get_sync(dev
);
870 writel_relaxed(0xffffffff, qcom_iommu
->local_base
+ SMMU_INTR_SEL_NS
);
871 pm_runtime_put_sync(dev
);
877 pm_runtime_disable(dev
);
881 static void qcom_iommu_device_remove(struct platform_device
*pdev
)
883 struct qcom_iommu_dev
*qcom_iommu
= platform_get_drvdata(pdev
);
885 pm_runtime_force_suspend(&pdev
->dev
);
886 platform_set_drvdata(pdev
, NULL
);
887 iommu_device_sysfs_remove(&qcom_iommu
->iommu
);
888 iommu_device_unregister(&qcom_iommu
->iommu
);
891 static int __maybe_unused
qcom_iommu_resume(struct device
*dev
)
893 struct qcom_iommu_dev
*qcom_iommu
= dev_get_drvdata(dev
);
896 ret
= clk_bulk_prepare_enable(CLK_NUM
, qcom_iommu
->clks
);
901 return qcom_scm_restore_sec_cfg(qcom_iommu
->sec_id
, 0);
906 static int __maybe_unused
qcom_iommu_suspend(struct device
*dev
)
908 struct qcom_iommu_dev
*qcom_iommu
= dev_get_drvdata(dev
);
910 clk_bulk_disable_unprepare(CLK_NUM
, qcom_iommu
->clks
);
915 static const struct dev_pm_ops qcom_iommu_pm_ops
= {
916 SET_RUNTIME_PM_OPS(qcom_iommu_suspend
, qcom_iommu_resume
, NULL
)
917 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend
,
918 pm_runtime_force_resume
)
921 static const struct of_device_id qcom_iommu_of_match
[] = {
922 { .compatible
= "qcom,msm-iommu-v1" },
923 { .compatible
= "qcom,msm-iommu-v2" },
927 static struct platform_driver qcom_iommu_driver
= {
929 .name
= "qcom-iommu",
930 .of_match_table
= qcom_iommu_of_match
,
931 .pm
= &qcom_iommu_pm_ops
,
933 .probe
= qcom_iommu_device_probe
,
934 .remove
= qcom_iommu_device_remove
,
937 static int __init
qcom_iommu_init(void)
941 ret
= platform_driver_register(&qcom_iommu_ctx_driver
);
945 ret
= platform_driver_register(&qcom_iommu_driver
);
947 platform_driver_unregister(&qcom_iommu_ctx_driver
);
951 device_initcall(qcom_iommu_init
);