1 // SPDX-License-Identifier: GPL-2.0-only
3 * IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c
5 * Copyright (C) 2013 ARM Limited
6 * Copyright (C) 2017 Red Hat
9 #include <linux/atomic.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/dma-iommu.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/interrupt.h>
18 #include <linux/io-64-nonatomic-hi-lo.h>
19 #include <linux/io-pgtable.h>
20 #include <linux/iommu.h>
21 #include <linux/iopoll.h>
22 #include <linux/kconfig.h>
23 #include <linux/init.h>
24 #include <linux/mutex.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/of_iommu.h>
29 #include <linux/platform_device.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/qcom_scm.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
38 #define SMMU_INTR_SEL_NS 0x2000
40 struct qcom_iommu_ctx
;
42 struct qcom_iommu_dev
{
43 /* IOMMU core code handle */
44 struct iommu_device iommu
;
46 struct clk
*iface_clk
;
48 void __iomem
*local_base
;
51 struct qcom_iommu_ctx
*ctxs
[]; /* indexed by asid-1 */
54 struct qcom_iommu_ctx
{
58 u8 asid
; /* asid and ctx bank # are 1:1 */
59 struct iommu_domain
*domain
;
62 struct qcom_iommu_domain
{
63 struct io_pgtable_ops
*pgtbl_ops
;
64 spinlock_t pgtbl_lock
;
65 struct mutex init_mutex
; /* Protects iommu pointer */
66 struct iommu_domain domain
;
67 struct qcom_iommu_dev
*iommu
;
70 static struct qcom_iommu_domain
*to_qcom_iommu_domain(struct iommu_domain
*dom
)
72 return container_of(dom
, struct qcom_iommu_domain
, domain
);
75 static const struct iommu_ops qcom_iommu_ops
;
77 static struct qcom_iommu_dev
* to_iommu(struct device
*dev
)
79 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
81 if (!fwspec
|| fwspec
->ops
!= &qcom_iommu_ops
)
84 return dev_iommu_priv_get(dev
);
87 static struct qcom_iommu_ctx
* to_ctx(struct device
*dev
, unsigned asid
)
89 struct qcom_iommu_dev
*qcom_iommu
= to_iommu(dev
);
92 return qcom_iommu
->ctxs
[asid
- 1];
96 iommu_writel(struct qcom_iommu_ctx
*ctx
, unsigned reg
, u32 val
)
98 writel_relaxed(val
, ctx
->base
+ reg
);
102 iommu_writeq(struct qcom_iommu_ctx
*ctx
, unsigned reg
, u64 val
)
104 writeq_relaxed(val
, ctx
->base
+ reg
);
108 iommu_readl(struct qcom_iommu_ctx
*ctx
, unsigned reg
)
110 return readl_relaxed(ctx
->base
+ reg
);
114 iommu_readq(struct qcom_iommu_ctx
*ctx
, unsigned reg
)
116 return readq_relaxed(ctx
->base
+ reg
);
119 static void qcom_iommu_tlb_sync(void *cookie
)
121 struct iommu_fwspec
*fwspec
;
122 struct device
*dev
= cookie
;
125 fwspec
= dev_iommu_fwspec_get(dev
);
127 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
128 struct qcom_iommu_ctx
*ctx
= to_ctx(dev
, fwspec
->ids
[i
]);
129 unsigned int val
, ret
;
131 iommu_writel(ctx
, ARM_SMMU_CB_TLBSYNC
, 0);
133 ret
= readl_poll_timeout(ctx
->base
+ ARM_SMMU_CB_TLBSTATUS
, val
,
134 (val
& 0x1) == 0, 0, 5000000);
136 dev_err(ctx
->dev
, "timeout waiting for TLB SYNC\n");
140 static void qcom_iommu_tlb_inv_context(void *cookie
)
142 struct device
*dev
= cookie
;
143 struct iommu_fwspec
*fwspec
;
146 fwspec
= dev_iommu_fwspec_get(dev
);
148 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
149 struct qcom_iommu_ctx
*ctx
= to_ctx(dev
, fwspec
->ids
[i
]);
150 iommu_writel(ctx
, ARM_SMMU_CB_S1_TLBIASID
, ctx
->asid
);
153 qcom_iommu_tlb_sync(cookie
);
156 static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova
, size_t size
,
157 size_t granule
, bool leaf
, void *cookie
)
159 struct device
*dev
= cookie
;
160 struct iommu_fwspec
*fwspec
;
163 reg
= leaf
? ARM_SMMU_CB_S1_TLBIVAL
: ARM_SMMU_CB_S1_TLBIVA
;
165 fwspec
= dev_iommu_fwspec_get(dev
);
167 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
168 struct qcom_iommu_ctx
*ctx
= to_ctx(dev
, fwspec
->ids
[i
]);
171 iova
= (iova
>> 12) << 12;
174 iommu_writel(ctx
, reg
, iova
);
176 } while (s
-= granule
);
180 static void qcom_iommu_tlb_flush_walk(unsigned long iova
, size_t size
,
181 size_t granule
, void *cookie
)
183 qcom_iommu_tlb_inv_range_nosync(iova
, size
, granule
, false, cookie
);
184 qcom_iommu_tlb_sync(cookie
);
187 static void qcom_iommu_tlb_flush_leaf(unsigned long iova
, size_t size
,
188 size_t granule
, void *cookie
)
190 qcom_iommu_tlb_inv_range_nosync(iova
, size
, granule
, true, cookie
);
191 qcom_iommu_tlb_sync(cookie
);
194 static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather
*gather
,
195 unsigned long iova
, size_t granule
,
198 qcom_iommu_tlb_inv_range_nosync(iova
, granule
, granule
, true, cookie
);
201 static const struct iommu_flush_ops qcom_flush_ops
= {
202 .tlb_flush_all
= qcom_iommu_tlb_inv_context
,
203 .tlb_flush_walk
= qcom_iommu_tlb_flush_walk
,
204 .tlb_flush_leaf
= qcom_iommu_tlb_flush_leaf
,
205 .tlb_add_page
= qcom_iommu_tlb_add_page
,
208 static irqreturn_t
qcom_iommu_fault(int irq
, void *dev
)
210 struct qcom_iommu_ctx
*ctx
= dev
;
214 fsr
= iommu_readl(ctx
, ARM_SMMU_CB_FSR
);
216 if (!(fsr
& ARM_SMMU_FSR_FAULT
))
219 fsynr
= iommu_readl(ctx
, ARM_SMMU_CB_FSYNR0
);
220 iova
= iommu_readq(ctx
, ARM_SMMU_CB_FAR
);
222 if (!report_iommu_fault(ctx
->domain
, ctx
->dev
, iova
, 0)) {
223 dev_err_ratelimited(ctx
->dev
,
224 "Unhandled context fault: fsr=0x%x, "
225 "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
226 fsr
, iova
, fsynr
, ctx
->asid
);
229 iommu_writel(ctx
, ARM_SMMU_CB_FSR
, fsr
);
230 iommu_writel(ctx
, ARM_SMMU_CB_RESUME
, ARM_SMMU_RESUME_TERMINATE
);
235 static int qcom_iommu_init_domain(struct iommu_domain
*domain
,
236 struct qcom_iommu_dev
*qcom_iommu
,
239 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
240 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
241 struct io_pgtable_ops
*pgtbl_ops
;
242 struct io_pgtable_cfg pgtbl_cfg
;
246 mutex_lock(&qcom_domain
->init_mutex
);
247 if (qcom_domain
->iommu
)
250 pgtbl_cfg
= (struct io_pgtable_cfg
) {
251 .pgsize_bitmap
= qcom_iommu_ops
.pgsize_bitmap
,
254 .tlb
= &qcom_flush_ops
,
255 .iommu_dev
= qcom_iommu
->dev
,
258 qcom_domain
->iommu
= qcom_iommu
;
259 pgtbl_ops
= alloc_io_pgtable_ops(ARM_32_LPAE_S1
, &pgtbl_cfg
, dev
);
261 dev_err(qcom_iommu
->dev
, "failed to allocate pagetable ops\n");
263 goto out_clear_iommu
;
266 /* Update the domain's page sizes to reflect the page table format */
267 domain
->pgsize_bitmap
= pgtbl_cfg
.pgsize_bitmap
;
268 domain
->geometry
.aperture_end
= (1ULL << pgtbl_cfg
.ias
) - 1;
269 domain
->geometry
.force_aperture
= true;
271 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
272 struct qcom_iommu_ctx
*ctx
= to_ctx(dev
, fwspec
->ids
[i
]);
274 if (!ctx
->secure_init
) {
275 ret
= qcom_scm_restore_sec_cfg(qcom_iommu
->sec_id
, ctx
->asid
);
277 dev_err(qcom_iommu
->dev
, "secure init failed: %d\n", ret
);
278 goto out_clear_iommu
;
280 ctx
->secure_init
= true;
284 iommu_writeq(ctx
, ARM_SMMU_CB_TTBR0
,
285 pgtbl_cfg
.arm_lpae_s1_cfg
.ttbr
|
286 FIELD_PREP(ARM_SMMU_TTBRn_ASID
, ctx
->asid
));
287 iommu_writeq(ctx
, ARM_SMMU_CB_TTBR1
, 0);
290 iommu_writel(ctx
, ARM_SMMU_CB_TCR2
,
291 arm_smmu_lpae_tcr2(&pgtbl_cfg
));
292 iommu_writel(ctx
, ARM_SMMU_CB_TCR
,
293 arm_smmu_lpae_tcr(&pgtbl_cfg
) | ARM_SMMU_TCR_EAE
);
295 /* MAIRs (stage-1 only) */
296 iommu_writel(ctx
, ARM_SMMU_CB_S1_MAIR0
,
297 pgtbl_cfg
.arm_lpae_s1_cfg
.mair
);
298 iommu_writel(ctx
, ARM_SMMU_CB_S1_MAIR1
,
299 pgtbl_cfg
.arm_lpae_s1_cfg
.mair
>> 32);
302 reg
= ARM_SMMU_SCTLR_CFIE
| ARM_SMMU_SCTLR_CFRE
|
303 ARM_SMMU_SCTLR_AFE
| ARM_SMMU_SCTLR_TRE
|
304 ARM_SMMU_SCTLR_M
| ARM_SMMU_SCTLR_S1_ASIDPNE
|
305 ARM_SMMU_SCTLR_CFCFG
;
307 if (IS_ENABLED(CONFIG_BIG_ENDIAN
))
308 reg
|= ARM_SMMU_SCTLR_E
;
310 iommu_writel(ctx
, ARM_SMMU_CB_SCTLR
, reg
);
312 ctx
->domain
= domain
;
315 mutex_unlock(&qcom_domain
->init_mutex
);
317 /* Publish page table ops for map/unmap */
318 qcom_domain
->pgtbl_ops
= pgtbl_ops
;
323 qcom_domain
->iommu
= NULL
;
325 mutex_unlock(&qcom_domain
->init_mutex
);
329 static struct iommu_domain
*qcom_iommu_domain_alloc(unsigned type
)
331 struct qcom_iommu_domain
*qcom_domain
;
333 if (type
!= IOMMU_DOMAIN_UNMANAGED
&& type
!= IOMMU_DOMAIN_DMA
)
336 * Allocate the domain and initialise some of its data structures.
337 * We can't really do anything meaningful until we've added a
340 qcom_domain
= kzalloc(sizeof(*qcom_domain
), GFP_KERNEL
);
344 if (type
== IOMMU_DOMAIN_DMA
&&
345 iommu_get_dma_cookie(&qcom_domain
->domain
)) {
350 mutex_init(&qcom_domain
->init_mutex
);
351 spin_lock_init(&qcom_domain
->pgtbl_lock
);
353 return &qcom_domain
->domain
;
356 static void qcom_iommu_domain_free(struct iommu_domain
*domain
)
358 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
360 iommu_put_dma_cookie(domain
);
362 if (qcom_domain
->iommu
) {
364 * NOTE: unmap can be called after client device is powered
365 * off, for example, with GPUs or anything involving dma-buf.
366 * So we cannot rely on the device_link. Make sure the IOMMU
367 * is on to avoid unclocked accesses in the TLB inv path:
369 pm_runtime_get_sync(qcom_domain
->iommu
->dev
);
370 free_io_pgtable_ops(qcom_domain
->pgtbl_ops
);
371 pm_runtime_put_sync(qcom_domain
->iommu
->dev
);
377 static int qcom_iommu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
379 struct qcom_iommu_dev
*qcom_iommu
= to_iommu(dev
);
380 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
384 dev_err(dev
, "cannot attach to IOMMU, is it on the same bus?\n");
388 /* Ensure that the domain is finalized */
389 pm_runtime_get_sync(qcom_iommu
->dev
);
390 ret
= qcom_iommu_init_domain(domain
, qcom_iommu
, dev
);
391 pm_runtime_put_sync(qcom_iommu
->dev
);
396 * Sanity check the domain. We don't support domains across
399 if (qcom_domain
->iommu
!= qcom_iommu
) {
400 dev_err(dev
, "cannot attach to IOMMU %s while already "
401 "attached to domain on IOMMU %s\n",
402 dev_name(qcom_domain
->iommu
->dev
),
403 dev_name(qcom_iommu
->dev
));
410 static void qcom_iommu_detach_dev(struct iommu_domain
*domain
, struct device
*dev
)
412 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
413 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
414 struct qcom_iommu_dev
*qcom_iommu
= to_iommu(dev
);
417 if (WARN_ON(!qcom_domain
->iommu
))
420 pm_runtime_get_sync(qcom_iommu
->dev
);
421 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
422 struct qcom_iommu_ctx
*ctx
= to_ctx(dev
, fwspec
->ids
[i
]);
424 /* Disable the context bank: */
425 iommu_writel(ctx
, ARM_SMMU_CB_SCTLR
, 0);
429 pm_runtime_put_sync(qcom_iommu
->dev
);
432 static int qcom_iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
433 phys_addr_t paddr
, size_t size
, int prot
, gfp_t gfp
)
437 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
438 struct io_pgtable_ops
*ops
= qcom_domain
->pgtbl_ops
;
443 spin_lock_irqsave(&qcom_domain
->pgtbl_lock
, flags
);
444 ret
= ops
->map(ops
, iova
, paddr
, size
, prot
);
445 spin_unlock_irqrestore(&qcom_domain
->pgtbl_lock
, flags
);
449 static size_t qcom_iommu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
450 size_t size
, struct iommu_iotlb_gather
*gather
)
454 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
455 struct io_pgtable_ops
*ops
= qcom_domain
->pgtbl_ops
;
460 /* NOTE: unmap can be called after client device is powered off,
461 * for example, with GPUs or anything involving dma-buf. So we
462 * cannot rely on the device_link. Make sure the IOMMU is on to
463 * avoid unclocked accesses in the TLB inv path:
465 pm_runtime_get_sync(qcom_domain
->iommu
->dev
);
466 spin_lock_irqsave(&qcom_domain
->pgtbl_lock
, flags
);
467 ret
= ops
->unmap(ops
, iova
, size
, gather
);
468 spin_unlock_irqrestore(&qcom_domain
->pgtbl_lock
, flags
);
469 pm_runtime_put_sync(qcom_domain
->iommu
->dev
);
474 static void qcom_iommu_flush_iotlb_all(struct iommu_domain
*domain
)
476 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
477 struct io_pgtable
*pgtable
= container_of(qcom_domain
->pgtbl_ops
,
478 struct io_pgtable
, ops
);
479 if (!qcom_domain
->pgtbl_ops
)
482 pm_runtime_get_sync(qcom_domain
->iommu
->dev
);
483 qcom_iommu_tlb_sync(pgtable
->cookie
);
484 pm_runtime_put_sync(qcom_domain
->iommu
->dev
);
487 static void qcom_iommu_iotlb_sync(struct iommu_domain
*domain
,
488 struct iommu_iotlb_gather
*gather
)
490 qcom_iommu_flush_iotlb_all(domain
);
493 static phys_addr_t
qcom_iommu_iova_to_phys(struct iommu_domain
*domain
,
498 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
499 struct io_pgtable_ops
*ops
= qcom_domain
->pgtbl_ops
;
504 spin_lock_irqsave(&qcom_domain
->pgtbl_lock
, flags
);
505 ret
= ops
->iova_to_phys(ops
, iova
);
506 spin_unlock_irqrestore(&qcom_domain
->pgtbl_lock
, flags
);
511 static bool qcom_iommu_capable(enum iommu_cap cap
)
514 case IOMMU_CAP_CACHE_COHERENCY
:
516 * Return true here as the SMMU can always send out coherent
520 case IOMMU_CAP_NOEXEC
:
527 static struct iommu_device
*qcom_iommu_probe_device(struct device
*dev
)
529 struct qcom_iommu_dev
*qcom_iommu
= to_iommu(dev
);
530 struct device_link
*link
;
533 return ERR_PTR(-ENODEV
);
536 * Establish the link between iommu and master, so that the
537 * iommu gets runtime enabled/disabled as per the master's
540 link
= device_link_add(dev
, qcom_iommu
->dev
, DL_FLAG_PM_RUNTIME
);
542 dev_err(qcom_iommu
->dev
, "Unable to create device link between %s and %s\n",
543 dev_name(qcom_iommu
->dev
), dev_name(dev
));
544 return ERR_PTR(-ENODEV
);
547 return &qcom_iommu
->iommu
;
550 static void qcom_iommu_release_device(struct device
*dev
)
552 struct qcom_iommu_dev
*qcom_iommu
= to_iommu(dev
);
557 iommu_fwspec_free(dev
);
560 static int qcom_iommu_of_xlate(struct device
*dev
, struct of_phandle_args
*args
)
562 struct qcom_iommu_dev
*qcom_iommu
;
563 struct platform_device
*iommu_pdev
;
564 unsigned asid
= args
->args
[0];
566 if (args
->args_count
!= 1) {
567 dev_err(dev
, "incorrect number of iommu params found for %s "
568 "(found %d, expected 1)\n",
569 args
->np
->full_name
, args
->args_count
);
573 iommu_pdev
= of_find_device_by_node(args
->np
);
574 if (WARN_ON(!iommu_pdev
))
577 qcom_iommu
= platform_get_drvdata(iommu_pdev
);
579 /* make sure the asid specified in dt is valid, so we don't have
580 * to sanity check this elsewhere, since 'asid - 1' is used to
581 * index into qcom_iommu->ctxs:
583 if (WARN_ON(asid
< 1) ||
584 WARN_ON(asid
> qcom_iommu
->num_ctxs
))
587 if (!dev_iommu_priv_get(dev
)) {
588 dev_iommu_priv_set(dev
, qcom_iommu
);
590 /* make sure devices iommus dt node isn't referring to
591 * multiple different iommu devices. Multiple context
592 * banks are ok, but multiple devices are not:
594 if (WARN_ON(qcom_iommu
!= dev_iommu_priv_get(dev
)))
598 return iommu_fwspec_add_ids(dev
, &asid
, 1);
601 static const struct iommu_ops qcom_iommu_ops
= {
602 .capable
= qcom_iommu_capable
,
603 .domain_alloc
= qcom_iommu_domain_alloc
,
604 .domain_free
= qcom_iommu_domain_free
,
605 .attach_dev
= qcom_iommu_attach_dev
,
606 .detach_dev
= qcom_iommu_detach_dev
,
607 .map
= qcom_iommu_map
,
608 .unmap
= qcom_iommu_unmap
,
609 .flush_iotlb_all
= qcom_iommu_flush_iotlb_all
,
610 .iotlb_sync
= qcom_iommu_iotlb_sync
,
611 .iova_to_phys
= qcom_iommu_iova_to_phys
,
612 .probe_device
= qcom_iommu_probe_device
,
613 .release_device
= qcom_iommu_release_device
,
614 .device_group
= generic_device_group
,
615 .of_xlate
= qcom_iommu_of_xlate
,
616 .pgsize_bitmap
= SZ_4K
| SZ_64K
| SZ_1M
| SZ_16M
,
619 static int qcom_iommu_enable_clocks(struct qcom_iommu_dev
*qcom_iommu
)
623 ret
= clk_prepare_enable(qcom_iommu
->iface_clk
);
625 dev_err(qcom_iommu
->dev
, "Couldn't enable iface_clk\n");
629 ret
= clk_prepare_enable(qcom_iommu
->bus_clk
);
631 dev_err(qcom_iommu
->dev
, "Couldn't enable bus_clk\n");
632 clk_disable_unprepare(qcom_iommu
->iface_clk
);
639 static void qcom_iommu_disable_clocks(struct qcom_iommu_dev
*qcom_iommu
)
641 clk_disable_unprepare(qcom_iommu
->bus_clk
);
642 clk_disable_unprepare(qcom_iommu
->iface_clk
);
645 static int qcom_iommu_sec_ptbl_init(struct device
*dev
)
648 unsigned int spare
= 0;
652 static bool allocated
= false;
658 ret
= qcom_scm_iommu_secure_ptbl_size(spare
, &psize
);
660 dev_err(dev
, "failed to get iommu secure pgtable size (%d)\n",
665 dev_info(dev
, "iommu sec: pgtable size: %zu\n", psize
);
667 attrs
= DMA_ATTR_NO_KERNEL_MAPPING
;
669 cpu_addr
= dma_alloc_attrs(dev
, psize
, &paddr
, GFP_KERNEL
, attrs
);
671 dev_err(dev
, "failed to allocate %zu bytes for pgtable\n",
676 ret
= qcom_scm_iommu_secure_ptbl_init(paddr
, psize
, spare
);
678 dev_err(dev
, "failed to init iommu pgtable (%d)\n", ret
);
686 dma_free_attrs(dev
, psize
, cpu_addr
, paddr
, attrs
);
690 static int get_asid(const struct device_node
*np
)
694 /* read the "reg" property directly to get the relative address
695 * of the context bank, and calculate the asid from that:
697 if (of_property_read_u32_index(np
, "reg", 0, ®
))
700 return reg
/ 0x1000; /* context banks are 0x1000 apart */
703 static int qcom_iommu_ctx_probe(struct platform_device
*pdev
)
705 struct qcom_iommu_ctx
*ctx
;
706 struct device
*dev
= &pdev
->dev
;
707 struct qcom_iommu_dev
*qcom_iommu
= dev_get_drvdata(dev
->parent
);
708 struct resource
*res
;
711 ctx
= devm_kzalloc(dev
, sizeof(*ctx
), GFP_KERNEL
);
716 platform_set_drvdata(pdev
, ctx
);
718 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
719 ctx
->base
= devm_ioremap_resource(dev
, res
);
720 if (IS_ERR(ctx
->base
))
721 return PTR_ERR(ctx
->base
);
723 irq
= platform_get_irq(pdev
, 0);
727 /* clear IRQs before registering fault handler, just in case the
728 * boot-loader left us a surprise:
730 iommu_writel(ctx
, ARM_SMMU_CB_FSR
, iommu_readl(ctx
, ARM_SMMU_CB_FSR
));
732 ret
= devm_request_irq(dev
, irq
,
738 dev_err(dev
, "failed to request IRQ %u\n", irq
);
742 ret
= get_asid(dev
->of_node
);
744 dev_err(dev
, "missing reg property\n");
750 dev_dbg(dev
, "found asid %u\n", ctx
->asid
);
752 qcom_iommu
->ctxs
[ctx
->asid
- 1] = ctx
;
757 static int qcom_iommu_ctx_remove(struct platform_device
*pdev
)
759 struct qcom_iommu_dev
*qcom_iommu
= dev_get_drvdata(pdev
->dev
.parent
);
760 struct qcom_iommu_ctx
*ctx
= platform_get_drvdata(pdev
);
762 platform_set_drvdata(pdev
, NULL
);
764 qcom_iommu
->ctxs
[ctx
->asid
- 1] = NULL
;
769 static const struct of_device_id ctx_of_match
[] = {
770 { .compatible
= "qcom,msm-iommu-v1-ns" },
771 { .compatible
= "qcom,msm-iommu-v1-sec" },
775 static struct platform_driver qcom_iommu_ctx_driver
= {
777 .name
= "qcom-iommu-ctx",
778 .of_match_table
= of_match_ptr(ctx_of_match
),
780 .probe
= qcom_iommu_ctx_probe
,
781 .remove
= qcom_iommu_ctx_remove
,
784 static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev
*qcom_iommu
)
786 struct device_node
*child
;
788 for_each_child_of_node(qcom_iommu
->dev
->of_node
, child
)
789 if (of_device_is_compatible(child
, "qcom,msm-iommu-v1-sec"))
795 static int qcom_iommu_device_probe(struct platform_device
*pdev
)
797 struct device_node
*child
;
798 struct qcom_iommu_dev
*qcom_iommu
;
799 struct device
*dev
= &pdev
->dev
;
800 struct resource
*res
;
801 int ret
, max_asid
= 0;
803 /* find the max asid (which is 1:1 to ctx bank idx), so we know how
804 * many child ctx devices we have:
806 for_each_child_of_node(dev
->of_node
, child
)
807 max_asid
= max(max_asid
, get_asid(child
));
809 qcom_iommu
= devm_kzalloc(dev
, struct_size(qcom_iommu
, ctxs
, max_asid
),
813 qcom_iommu
->num_ctxs
= max_asid
;
814 qcom_iommu
->dev
= dev
;
816 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
818 qcom_iommu
->local_base
= devm_ioremap_resource(dev
, res
);
819 if (IS_ERR(qcom_iommu
->local_base
))
820 return PTR_ERR(qcom_iommu
->local_base
);
823 qcom_iommu
->iface_clk
= devm_clk_get(dev
, "iface");
824 if (IS_ERR(qcom_iommu
->iface_clk
)) {
825 dev_err(dev
, "failed to get iface clock\n");
826 return PTR_ERR(qcom_iommu
->iface_clk
);
829 qcom_iommu
->bus_clk
= devm_clk_get(dev
, "bus");
830 if (IS_ERR(qcom_iommu
->bus_clk
)) {
831 dev_err(dev
, "failed to get bus clock\n");
832 return PTR_ERR(qcom_iommu
->bus_clk
);
835 if (of_property_read_u32(dev
->of_node
, "qcom,iommu-secure-id",
836 &qcom_iommu
->sec_id
)) {
837 dev_err(dev
, "missing qcom,iommu-secure-id property\n");
841 if (qcom_iommu_has_secure_context(qcom_iommu
)) {
842 ret
= qcom_iommu_sec_ptbl_init(dev
);
844 dev_err(dev
, "cannot init secure pg table(%d)\n", ret
);
849 platform_set_drvdata(pdev
, qcom_iommu
);
851 pm_runtime_enable(dev
);
853 /* register context bank devices, which are child nodes: */
854 ret
= devm_of_platform_populate(dev
);
856 dev_err(dev
, "Failed to populate iommu contexts\n");
860 ret
= iommu_device_sysfs_add(&qcom_iommu
->iommu
, dev
, NULL
,
863 dev_err(dev
, "Failed to register iommu in sysfs\n");
867 iommu_device_set_ops(&qcom_iommu
->iommu
, &qcom_iommu_ops
);
868 iommu_device_set_fwnode(&qcom_iommu
->iommu
, dev
->fwnode
);
870 ret
= iommu_device_register(&qcom_iommu
->iommu
);
872 dev_err(dev
, "Failed to register iommu\n");
876 bus_set_iommu(&platform_bus_type
, &qcom_iommu_ops
);
878 if (qcom_iommu
->local_base
) {
879 pm_runtime_get_sync(dev
);
880 writel_relaxed(0xffffffff, qcom_iommu
->local_base
+ SMMU_INTR_SEL_NS
);
881 pm_runtime_put_sync(dev
);
887 static int qcom_iommu_device_remove(struct platform_device
*pdev
)
889 struct qcom_iommu_dev
*qcom_iommu
= platform_get_drvdata(pdev
);
891 bus_set_iommu(&platform_bus_type
, NULL
);
893 pm_runtime_force_suspend(&pdev
->dev
);
894 platform_set_drvdata(pdev
, NULL
);
895 iommu_device_sysfs_remove(&qcom_iommu
->iommu
);
896 iommu_device_unregister(&qcom_iommu
->iommu
);
901 static int __maybe_unused
qcom_iommu_resume(struct device
*dev
)
903 struct qcom_iommu_dev
*qcom_iommu
= dev_get_drvdata(dev
);
905 return qcom_iommu_enable_clocks(qcom_iommu
);
908 static int __maybe_unused
qcom_iommu_suspend(struct device
*dev
)
910 struct qcom_iommu_dev
*qcom_iommu
= dev_get_drvdata(dev
);
912 qcom_iommu_disable_clocks(qcom_iommu
);
917 static const struct dev_pm_ops qcom_iommu_pm_ops
= {
918 SET_RUNTIME_PM_OPS(qcom_iommu_suspend
, qcom_iommu_resume
, NULL
)
919 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend
,
920 pm_runtime_force_resume
)
923 static const struct of_device_id qcom_iommu_of_match
[] = {
924 { .compatible
= "qcom,msm-iommu-v1" },
928 static struct platform_driver qcom_iommu_driver
= {
930 .name
= "qcom-iommu",
931 .of_match_table
= of_match_ptr(qcom_iommu_of_match
),
932 .pm
= &qcom_iommu_pm_ops
,
934 .probe
= qcom_iommu_device_probe
,
935 .remove
= qcom_iommu_device_remove
,
938 static int __init
qcom_iommu_init(void)
942 ret
= platform_driver_register(&qcom_iommu_ctx_driver
);
946 ret
= platform_driver_register(&qcom_iommu_driver
);
948 platform_driver_unregister(&qcom_iommu_ctx_driver
);
952 device_initcall(qcom_iommu_init
);