2 * IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2013 ARM Limited
17 * Copyright (C) 2017 Red Hat
20 #include <linux/atomic.h>
21 #include <linux/clk.h>
22 #include <linux/delay.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/err.h>
26 #include <linux/interrupt.h>
28 #include <linux/io-64-nonatomic-hi-lo.h>
29 #include <linux/iommu.h>
30 #include <linux/iopoll.h>
31 #include <linux/kconfig.h>
32 #include <linux/module.h>
33 #include <linux/mutex.h>
35 #include <linux/of_address.h>
36 #include <linux/of_device.h>
37 #include <linux/of_iommu.h>
38 #include <linux/platform_device.h>
40 #include <linux/pm_runtime.h>
41 #include <linux/qcom_scm.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
45 #include "io-pgtable.h"
46 #include "arm-smmu-regs.h"
48 #define SMMU_INTR_SEL_NS 0x2000
50 struct qcom_iommu_ctx
;
52 struct qcom_iommu_dev
{
53 /* IOMMU core code handle */
54 struct iommu_device iommu
;
56 struct clk
*iface_clk
;
58 void __iomem
*local_base
;
61 struct qcom_iommu_ctx
*ctxs
[0]; /* indexed by asid-1 */
64 struct qcom_iommu_ctx
{
68 u8 asid
; /* asid and ctx bank # are 1:1 */
71 struct qcom_iommu_domain
{
72 struct io_pgtable_ops
*pgtbl_ops
;
73 spinlock_t pgtbl_lock
;
74 struct mutex init_mutex
; /* Protects iommu pointer */
75 struct iommu_domain domain
;
76 struct qcom_iommu_dev
*iommu
;
79 static struct qcom_iommu_domain
*to_qcom_iommu_domain(struct iommu_domain
*dom
)
81 return container_of(dom
, struct qcom_iommu_domain
, domain
);
84 static const struct iommu_ops qcom_iommu_ops
;
86 static struct qcom_iommu_dev
* to_iommu(struct iommu_fwspec
*fwspec
)
88 if (!fwspec
|| fwspec
->ops
!= &qcom_iommu_ops
)
90 return fwspec
->iommu_priv
;
93 static struct qcom_iommu_ctx
* to_ctx(struct iommu_fwspec
*fwspec
, unsigned asid
)
95 struct qcom_iommu_dev
*qcom_iommu
= to_iommu(fwspec
);
98 return qcom_iommu
->ctxs
[asid
- 1];
102 iommu_writel(struct qcom_iommu_ctx
*ctx
, unsigned reg
, u32 val
)
104 writel_relaxed(val
, ctx
->base
+ reg
);
108 iommu_writeq(struct qcom_iommu_ctx
*ctx
, unsigned reg
, u64 val
)
110 writeq_relaxed(val
, ctx
->base
+ reg
);
114 iommu_readl(struct qcom_iommu_ctx
*ctx
, unsigned reg
)
116 return readl_relaxed(ctx
->base
+ reg
);
120 iommu_readq(struct qcom_iommu_ctx
*ctx
, unsigned reg
)
122 return readq_relaxed(ctx
->base
+ reg
);
125 static void qcom_iommu_tlb_sync(void *cookie
)
127 struct iommu_fwspec
*fwspec
= cookie
;
130 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
131 struct qcom_iommu_ctx
*ctx
= to_ctx(fwspec
, fwspec
->ids
[i
]);
132 unsigned int val
, ret
;
134 iommu_writel(ctx
, ARM_SMMU_CB_TLBSYNC
, 0);
136 ret
= readl_poll_timeout(ctx
->base
+ ARM_SMMU_CB_TLBSTATUS
, val
,
137 (val
& 0x1) == 0, 0, 5000000);
139 dev_err(ctx
->dev
, "timeout waiting for TLB SYNC\n");
143 static void qcom_iommu_tlb_inv_context(void *cookie
)
145 struct iommu_fwspec
*fwspec
= cookie
;
148 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
149 struct qcom_iommu_ctx
*ctx
= to_ctx(fwspec
, fwspec
->ids
[i
]);
150 iommu_writel(ctx
, ARM_SMMU_CB_S1_TLBIASID
, ctx
->asid
);
153 qcom_iommu_tlb_sync(cookie
);
156 static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova
, size_t size
,
157 size_t granule
, bool leaf
, void *cookie
)
159 struct iommu_fwspec
*fwspec
= cookie
;
162 reg
= leaf
? ARM_SMMU_CB_S1_TLBIVAL
: ARM_SMMU_CB_S1_TLBIVA
;
164 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
165 struct qcom_iommu_ctx
*ctx
= to_ctx(fwspec
, fwspec
->ids
[i
]);
171 iommu_writel(ctx
, reg
, iova
);
173 } while (s
-= granule
);
177 static const struct iommu_gather_ops qcom_gather_ops
= {
178 .tlb_flush_all
= qcom_iommu_tlb_inv_context
,
179 .tlb_add_flush
= qcom_iommu_tlb_inv_range_nosync
,
180 .tlb_sync
= qcom_iommu_tlb_sync
,
183 static irqreturn_t
qcom_iommu_fault(int irq
, void *dev
)
185 struct qcom_iommu_ctx
*ctx
= dev
;
189 fsr
= iommu_readl(ctx
, ARM_SMMU_CB_FSR
);
191 if (!(fsr
& FSR_FAULT
))
194 fsynr
= iommu_readl(ctx
, ARM_SMMU_CB_FSYNR0
);
195 iova
= iommu_readq(ctx
, ARM_SMMU_CB_FAR
);
197 dev_err_ratelimited(ctx
->dev
,
198 "Unhandled context fault: fsr=0x%x, "
199 "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
200 fsr
, iova
, fsynr
, ctx
->asid
);
202 iommu_writel(ctx
, ARM_SMMU_CB_FSR
, fsr
);
207 static int qcom_iommu_init_domain(struct iommu_domain
*domain
,
208 struct qcom_iommu_dev
*qcom_iommu
,
209 struct iommu_fwspec
*fwspec
)
211 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
212 struct io_pgtable_ops
*pgtbl_ops
;
213 struct io_pgtable_cfg pgtbl_cfg
;
217 mutex_lock(&qcom_domain
->init_mutex
);
218 if (qcom_domain
->iommu
)
221 pgtbl_cfg
= (struct io_pgtable_cfg
) {
222 .pgsize_bitmap
= qcom_iommu_ops
.pgsize_bitmap
,
225 .tlb
= &qcom_gather_ops
,
226 .iommu_dev
= qcom_iommu
->dev
,
229 qcom_domain
->iommu
= qcom_iommu
;
230 pgtbl_ops
= alloc_io_pgtable_ops(ARM_32_LPAE_S1
, &pgtbl_cfg
, fwspec
);
232 dev_err(qcom_iommu
->dev
, "failed to allocate pagetable ops\n");
234 goto out_clear_iommu
;
237 /* Update the domain's page sizes to reflect the page table format */
238 domain
->pgsize_bitmap
= pgtbl_cfg
.pgsize_bitmap
;
239 domain
->geometry
.aperture_end
= (1ULL << pgtbl_cfg
.ias
) - 1;
240 domain
->geometry
.force_aperture
= true;
242 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
243 struct qcom_iommu_ctx
*ctx
= to_ctx(fwspec
, fwspec
->ids
[i
]);
245 if (!ctx
->secure_init
) {
246 ret
= qcom_scm_restore_sec_cfg(qcom_iommu
->sec_id
, ctx
->asid
);
248 dev_err(qcom_iommu
->dev
, "secure init failed: %d\n", ret
);
249 goto out_clear_iommu
;
251 ctx
->secure_init
= true;
255 iommu_writeq(ctx
, ARM_SMMU_CB_TTBR0
,
256 pgtbl_cfg
.arm_lpae_s1_cfg
.ttbr
[0] |
257 ((u64
)ctx
->asid
<< TTBRn_ASID_SHIFT
));
258 iommu_writeq(ctx
, ARM_SMMU_CB_TTBR1
,
259 pgtbl_cfg
.arm_lpae_s1_cfg
.ttbr
[1] |
260 ((u64
)ctx
->asid
<< TTBRn_ASID_SHIFT
));
263 iommu_writel(ctx
, ARM_SMMU_CB_TTBCR2
,
264 (pgtbl_cfg
.arm_lpae_s1_cfg
.tcr
>> 32) |
265 TTBCR2_SEP_UPSTREAM
);
266 iommu_writel(ctx
, ARM_SMMU_CB_TTBCR
,
267 pgtbl_cfg
.arm_lpae_s1_cfg
.tcr
);
269 /* MAIRs (stage-1 only) */
270 iommu_writel(ctx
, ARM_SMMU_CB_S1_MAIR0
,
271 pgtbl_cfg
.arm_lpae_s1_cfg
.mair
[0]);
272 iommu_writel(ctx
, ARM_SMMU_CB_S1_MAIR1
,
273 pgtbl_cfg
.arm_lpae_s1_cfg
.mair
[1]);
276 reg
= SCTLR_CFIE
| SCTLR_CFRE
| SCTLR_AFE
| SCTLR_TRE
|
277 SCTLR_M
| SCTLR_S1_ASIDPNE
;
279 if (IS_ENABLED(CONFIG_BIG_ENDIAN
))
282 iommu_writel(ctx
, ARM_SMMU_CB_SCTLR
, reg
);
285 mutex_unlock(&qcom_domain
->init_mutex
);
287 /* Publish page table ops for map/unmap */
288 qcom_domain
->pgtbl_ops
= pgtbl_ops
;
293 qcom_domain
->iommu
= NULL
;
295 mutex_unlock(&qcom_domain
->init_mutex
);
299 static struct iommu_domain
*qcom_iommu_domain_alloc(unsigned type
)
301 struct qcom_iommu_domain
*qcom_domain
;
303 if (type
!= IOMMU_DOMAIN_UNMANAGED
&& type
!= IOMMU_DOMAIN_DMA
)
306 * Allocate the domain and initialise some of its data structures.
307 * We can't really do anything meaningful until we've added a
310 qcom_domain
= kzalloc(sizeof(*qcom_domain
), GFP_KERNEL
);
314 if (type
== IOMMU_DOMAIN_DMA
&&
315 iommu_get_dma_cookie(&qcom_domain
->domain
)) {
320 mutex_init(&qcom_domain
->init_mutex
);
321 spin_lock_init(&qcom_domain
->pgtbl_lock
);
323 return &qcom_domain
->domain
;
326 static void qcom_iommu_domain_free(struct iommu_domain
*domain
)
328 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
330 if (WARN_ON(qcom_domain
->iommu
)) /* forgot to detach? */
333 iommu_put_dma_cookie(domain
);
335 /* NOTE: unmap can be called after client device is powered off,
336 * for example, with GPUs or anything involving dma-buf. So we
337 * cannot rely on the device_link. Make sure the IOMMU is on to
338 * avoid unclocked accesses in the TLB inv path:
340 pm_runtime_get_sync(qcom_domain
->iommu
->dev
);
342 free_io_pgtable_ops(qcom_domain
->pgtbl_ops
);
344 pm_runtime_put_sync(qcom_domain
->iommu
->dev
);
349 static int qcom_iommu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
351 struct qcom_iommu_dev
*qcom_iommu
= to_iommu(dev
->iommu_fwspec
);
352 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
356 dev_err(dev
, "cannot attach to IOMMU, is it on the same bus?\n");
360 /* Ensure that the domain is finalized */
361 pm_runtime_get_sync(qcom_iommu
->dev
);
362 ret
= qcom_iommu_init_domain(domain
, qcom_iommu
, dev
->iommu_fwspec
);
363 pm_runtime_put_sync(qcom_iommu
->dev
);
368 * Sanity check the domain. We don't support domains across
371 if (qcom_domain
->iommu
!= qcom_iommu
) {
372 dev_err(dev
, "cannot attach to IOMMU %s while already "
373 "attached to domain on IOMMU %s\n",
374 dev_name(qcom_domain
->iommu
->dev
),
375 dev_name(qcom_iommu
->dev
));
382 static void qcom_iommu_detach_dev(struct iommu_domain
*domain
, struct device
*dev
)
384 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
385 struct qcom_iommu_dev
*qcom_iommu
= to_iommu(fwspec
);
386 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
389 if (!qcom_domain
->iommu
)
392 pm_runtime_get_sync(qcom_iommu
->dev
);
393 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
394 struct qcom_iommu_ctx
*ctx
= to_ctx(fwspec
, fwspec
->ids
[i
]);
396 /* Disable the context bank: */
397 iommu_writel(ctx
, ARM_SMMU_CB_SCTLR
, 0);
399 pm_runtime_put_sync(qcom_iommu
->dev
);
401 qcom_domain
->iommu
= NULL
;
404 static int qcom_iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
405 phys_addr_t paddr
, size_t size
, int prot
)
409 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
410 struct io_pgtable_ops
*ops
= qcom_domain
->pgtbl_ops
;
415 spin_lock_irqsave(&qcom_domain
->pgtbl_lock
, flags
);
416 ret
= ops
->map(ops
, iova
, paddr
, size
, prot
);
417 spin_unlock_irqrestore(&qcom_domain
->pgtbl_lock
, flags
);
421 static size_t qcom_iommu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
426 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
427 struct io_pgtable_ops
*ops
= qcom_domain
->pgtbl_ops
;
432 /* NOTE: unmap can be called after client device is powered off,
433 * for example, with GPUs or anything involving dma-buf. So we
434 * cannot rely on the device_link. Make sure the IOMMU is on to
435 * avoid unclocked accesses in the TLB inv path:
437 pm_runtime_get_sync(qcom_domain
->iommu
->dev
);
438 spin_lock_irqsave(&qcom_domain
->pgtbl_lock
, flags
);
439 ret
= ops
->unmap(ops
, iova
, size
);
440 spin_unlock_irqrestore(&qcom_domain
->pgtbl_lock
, flags
);
441 pm_runtime_put_sync(qcom_domain
->iommu
->dev
);
446 static phys_addr_t
qcom_iommu_iova_to_phys(struct iommu_domain
*domain
,
451 struct qcom_iommu_domain
*qcom_domain
= to_qcom_iommu_domain(domain
);
452 struct io_pgtable_ops
*ops
= qcom_domain
->pgtbl_ops
;
457 spin_lock_irqsave(&qcom_domain
->pgtbl_lock
, flags
);
458 ret
= ops
->iova_to_phys(ops
, iova
);
459 spin_unlock_irqrestore(&qcom_domain
->pgtbl_lock
, flags
);
464 static bool qcom_iommu_capable(enum iommu_cap cap
)
467 case IOMMU_CAP_CACHE_COHERENCY
:
469 * Return true here as the SMMU can always send out coherent
473 case IOMMU_CAP_NOEXEC
:
480 static int qcom_iommu_add_device(struct device
*dev
)
482 struct qcom_iommu_dev
*qcom_iommu
= to_iommu(dev
->iommu_fwspec
);
483 struct iommu_group
*group
;
484 struct device_link
*link
;
490 * Establish the link between iommu and master, so that the
491 * iommu gets runtime enabled/disabled as per the master's
494 link
= device_link_add(dev
, qcom_iommu
->dev
, DL_FLAG_PM_RUNTIME
);
496 dev_err(qcom_iommu
->dev
, "Unable to create device link between %s and %s\n",
497 dev_name(qcom_iommu
->dev
), dev_name(dev
));
501 group
= iommu_group_get_for_dev(dev
);
502 if (IS_ERR_OR_NULL(group
))
503 return PTR_ERR_OR_ZERO(group
);
505 iommu_group_put(group
);
506 iommu_device_link(&qcom_iommu
->iommu
, dev
);
511 static void qcom_iommu_remove_device(struct device
*dev
)
513 struct qcom_iommu_dev
*qcom_iommu
= to_iommu(dev
->iommu_fwspec
);
518 iommu_device_unlink(&qcom_iommu
->iommu
, dev
);
519 iommu_group_remove_device(dev
);
520 iommu_fwspec_free(dev
);
523 static int qcom_iommu_of_xlate(struct device
*dev
, struct of_phandle_args
*args
)
525 struct qcom_iommu_dev
*qcom_iommu
;
526 struct platform_device
*iommu_pdev
;
527 unsigned asid
= args
->args
[0];
529 if (args
->args_count
!= 1) {
530 dev_err(dev
, "incorrect number of iommu params found for %s "
531 "(found %d, expected 1)\n",
532 args
->np
->full_name
, args
->args_count
);
536 iommu_pdev
= of_find_device_by_node(args
->np
);
537 if (WARN_ON(!iommu_pdev
))
540 qcom_iommu
= platform_get_drvdata(iommu_pdev
);
542 /* make sure the asid specified in dt is valid, so we don't have
543 * to sanity check this elsewhere, since 'asid - 1' is used to
544 * index into qcom_iommu->ctxs:
546 if (WARN_ON(asid
< 1) ||
547 WARN_ON(asid
> qcom_iommu
->num_ctxs
))
550 if (!dev
->iommu_fwspec
->iommu_priv
) {
551 dev
->iommu_fwspec
->iommu_priv
= qcom_iommu
;
553 /* make sure devices iommus dt node isn't referring to
554 * multiple different iommu devices. Multiple context
555 * banks are ok, but multiple devices are not:
557 if (WARN_ON(qcom_iommu
!= dev
->iommu_fwspec
->iommu_priv
))
561 return iommu_fwspec_add_ids(dev
, &asid
, 1);
564 static const struct iommu_ops qcom_iommu_ops
= {
565 .capable
= qcom_iommu_capable
,
566 .domain_alloc
= qcom_iommu_domain_alloc
,
567 .domain_free
= qcom_iommu_domain_free
,
568 .attach_dev
= qcom_iommu_attach_dev
,
569 .detach_dev
= qcom_iommu_detach_dev
,
570 .map
= qcom_iommu_map
,
571 .unmap
= qcom_iommu_unmap
,
572 .map_sg
= default_iommu_map_sg
,
573 .iova_to_phys
= qcom_iommu_iova_to_phys
,
574 .add_device
= qcom_iommu_add_device
,
575 .remove_device
= qcom_iommu_remove_device
,
576 .device_group
= generic_device_group
,
577 .of_xlate
= qcom_iommu_of_xlate
,
578 .pgsize_bitmap
= SZ_4K
| SZ_64K
| SZ_1M
| SZ_16M
,
581 static int qcom_iommu_enable_clocks(struct qcom_iommu_dev
*qcom_iommu
)
585 ret
= clk_prepare_enable(qcom_iommu
->iface_clk
);
587 dev_err(qcom_iommu
->dev
, "Couldn't enable iface_clk\n");
591 ret
= clk_prepare_enable(qcom_iommu
->bus_clk
);
593 dev_err(qcom_iommu
->dev
, "Couldn't enable bus_clk\n");
594 clk_disable_unprepare(qcom_iommu
->iface_clk
);
601 static void qcom_iommu_disable_clocks(struct qcom_iommu_dev
*qcom_iommu
)
603 clk_disable_unprepare(qcom_iommu
->bus_clk
);
604 clk_disable_unprepare(qcom_iommu
->iface_clk
);
607 static int qcom_iommu_sec_ptbl_init(struct device
*dev
)
610 unsigned int spare
= 0;
614 static bool allocated
= false;
620 ret
= qcom_scm_iommu_secure_ptbl_size(spare
, &psize
);
622 dev_err(dev
, "failed to get iommu secure pgtable size (%d)\n",
627 dev_info(dev
, "iommu sec: pgtable size: %zu\n", psize
);
629 attrs
= DMA_ATTR_NO_KERNEL_MAPPING
;
631 cpu_addr
= dma_alloc_attrs(dev
, psize
, &paddr
, GFP_KERNEL
, attrs
);
633 dev_err(dev
, "failed to allocate %zu bytes for pgtable\n",
638 ret
= qcom_scm_iommu_secure_ptbl_init(paddr
, psize
, spare
);
640 dev_err(dev
, "failed to init iommu pgtable (%d)\n", ret
);
648 dma_free_attrs(dev
, psize
, cpu_addr
, paddr
, attrs
);
652 static int get_asid(const struct device_node
*np
)
656 /* read the "reg" property directly to get the relative address
657 * of the context bank, and calculate the asid from that:
659 if (of_property_read_u32_index(np
, "reg", 0, ®
))
662 return reg
/ 0x1000; /* context banks are 0x1000 apart */
665 static int qcom_iommu_ctx_probe(struct platform_device
*pdev
)
667 struct qcom_iommu_ctx
*ctx
;
668 struct device
*dev
= &pdev
->dev
;
669 struct qcom_iommu_dev
*qcom_iommu
= dev_get_drvdata(dev
->parent
);
670 struct resource
*res
;
673 ctx
= devm_kzalloc(dev
, sizeof(*ctx
), GFP_KERNEL
);
678 platform_set_drvdata(pdev
, ctx
);
680 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
681 ctx
->base
= devm_ioremap_resource(dev
, res
);
682 if (IS_ERR(ctx
->base
))
683 return PTR_ERR(ctx
->base
);
685 irq
= platform_get_irq(pdev
, 0);
687 dev_err(dev
, "failed to get irq\n");
691 /* clear IRQs before registering fault handler, just in case the
692 * boot-loader left us a surprise:
694 iommu_writel(ctx
, ARM_SMMU_CB_FSR
, iommu_readl(ctx
, ARM_SMMU_CB_FSR
));
696 ret
= devm_request_irq(dev
, irq
,
702 dev_err(dev
, "failed to request IRQ %u\n", irq
);
706 ret
= get_asid(dev
->of_node
);
708 dev_err(dev
, "missing reg property\n");
714 dev_dbg(dev
, "found asid %u\n", ctx
->asid
);
716 qcom_iommu
->ctxs
[ctx
->asid
- 1] = ctx
;
721 static int qcom_iommu_ctx_remove(struct platform_device
*pdev
)
723 struct qcom_iommu_dev
*qcom_iommu
= dev_get_drvdata(pdev
->dev
.parent
);
724 struct qcom_iommu_ctx
*ctx
= platform_get_drvdata(pdev
);
726 platform_set_drvdata(pdev
, NULL
);
728 qcom_iommu
->ctxs
[ctx
->asid
- 1] = NULL
;
733 static const struct of_device_id ctx_of_match
[] = {
734 { .compatible
= "qcom,msm-iommu-v1-ns" },
735 { .compatible
= "qcom,msm-iommu-v1-sec" },
739 static struct platform_driver qcom_iommu_ctx_driver
= {
741 .name
= "qcom-iommu-ctx",
742 .of_match_table
= of_match_ptr(ctx_of_match
),
744 .probe
= qcom_iommu_ctx_probe
,
745 .remove
= qcom_iommu_ctx_remove
,
748 static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev
*qcom_iommu
)
750 struct device_node
*child
;
752 for_each_child_of_node(qcom_iommu
->dev
->of_node
, child
)
753 if (of_device_is_compatible(child
, "qcom,msm-iommu-v1-sec"))
759 static int qcom_iommu_device_probe(struct platform_device
*pdev
)
761 struct device_node
*child
;
762 struct qcom_iommu_dev
*qcom_iommu
;
763 struct device
*dev
= &pdev
->dev
;
764 struct resource
*res
;
765 int ret
, sz
, max_asid
= 0;
767 /* find the max asid (which is 1:1 to ctx bank idx), so we know how
768 * many child ctx devices we have:
770 for_each_child_of_node(dev
->of_node
, child
)
771 max_asid
= max(max_asid
, get_asid(child
));
773 sz
= sizeof(*qcom_iommu
) + (max_asid
* sizeof(qcom_iommu
->ctxs
[0]));
775 qcom_iommu
= devm_kzalloc(dev
, sz
, GFP_KERNEL
);
778 qcom_iommu
->num_ctxs
= max_asid
;
779 qcom_iommu
->dev
= dev
;
781 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
783 qcom_iommu
->local_base
= devm_ioremap_resource(dev
, res
);
785 qcom_iommu
->iface_clk
= devm_clk_get(dev
, "iface");
786 if (IS_ERR(qcom_iommu
->iface_clk
)) {
787 dev_err(dev
, "failed to get iface clock\n");
788 return PTR_ERR(qcom_iommu
->iface_clk
);
791 qcom_iommu
->bus_clk
= devm_clk_get(dev
, "bus");
792 if (IS_ERR(qcom_iommu
->bus_clk
)) {
793 dev_err(dev
, "failed to get bus clock\n");
794 return PTR_ERR(qcom_iommu
->bus_clk
);
797 if (of_property_read_u32(dev
->of_node
, "qcom,iommu-secure-id",
798 &qcom_iommu
->sec_id
)) {
799 dev_err(dev
, "missing qcom,iommu-secure-id property\n");
803 if (qcom_iommu_has_secure_context(qcom_iommu
)) {
804 ret
= qcom_iommu_sec_ptbl_init(dev
);
806 dev_err(dev
, "cannot init secure pg table(%d)\n", ret
);
811 platform_set_drvdata(pdev
, qcom_iommu
);
813 pm_runtime_enable(dev
);
815 /* register context bank devices, which are child nodes: */
816 ret
= devm_of_platform_populate(dev
);
818 dev_err(dev
, "Failed to populate iommu contexts\n");
822 ret
= iommu_device_sysfs_add(&qcom_iommu
->iommu
, dev
, NULL
,
825 dev_err(dev
, "Failed to register iommu in sysfs\n");
829 iommu_device_set_ops(&qcom_iommu
->iommu
, &qcom_iommu_ops
);
830 iommu_device_set_fwnode(&qcom_iommu
->iommu
, dev
->fwnode
);
832 ret
= iommu_device_register(&qcom_iommu
->iommu
);
834 dev_err(dev
, "Failed to register iommu\n");
838 bus_set_iommu(&platform_bus_type
, &qcom_iommu_ops
);
840 if (qcom_iommu
->local_base
) {
841 pm_runtime_get_sync(dev
);
842 writel_relaxed(0xffffffff, qcom_iommu
->local_base
+ SMMU_INTR_SEL_NS
);
843 pm_runtime_put_sync(dev
);
849 static int qcom_iommu_device_remove(struct platform_device
*pdev
)
851 struct qcom_iommu_dev
*qcom_iommu
= platform_get_drvdata(pdev
);
853 bus_set_iommu(&platform_bus_type
, NULL
);
855 pm_runtime_force_suspend(&pdev
->dev
);
856 platform_set_drvdata(pdev
, NULL
);
857 iommu_device_sysfs_remove(&qcom_iommu
->iommu
);
858 iommu_device_unregister(&qcom_iommu
->iommu
);
863 static int __maybe_unused
qcom_iommu_resume(struct device
*dev
)
865 struct platform_device
*pdev
= to_platform_device(dev
);
866 struct qcom_iommu_dev
*qcom_iommu
= platform_get_drvdata(pdev
);
868 return qcom_iommu_enable_clocks(qcom_iommu
);
871 static int __maybe_unused
qcom_iommu_suspend(struct device
*dev
)
873 struct platform_device
*pdev
= to_platform_device(dev
);
874 struct qcom_iommu_dev
*qcom_iommu
= platform_get_drvdata(pdev
);
876 qcom_iommu_disable_clocks(qcom_iommu
);
881 static const struct dev_pm_ops qcom_iommu_pm_ops
= {
882 SET_RUNTIME_PM_OPS(qcom_iommu_suspend
, qcom_iommu_resume
, NULL
)
883 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend
,
884 pm_runtime_force_resume
)
887 static const struct of_device_id qcom_iommu_of_match
[] = {
888 { .compatible
= "qcom,msm-iommu-v1" },
891 MODULE_DEVICE_TABLE(of
, qcom_iommu_of_match
);
893 static struct platform_driver qcom_iommu_driver
= {
895 .name
= "qcom-iommu",
896 .of_match_table
= of_match_ptr(qcom_iommu_of_match
),
897 .pm
= &qcom_iommu_pm_ops
,
899 .probe
= qcom_iommu_device_probe
,
900 .remove
= qcom_iommu_device_remove
,
903 static int __init
qcom_iommu_init(void)
907 ret
= platform_driver_register(&qcom_iommu_ctx_driver
);
911 ret
= platform_driver_register(&qcom_iommu_driver
);
913 platform_driver_unregister(&qcom_iommu_ctx_driver
);
918 static void __exit
qcom_iommu_exit(void)
920 platform_driver_unregister(&qcom_iommu_driver
);
921 platform_driver_unregister(&qcom_iommu_ctx_driver
);
924 module_init(qcom_iommu_init
);
925 module_exit(qcom_iommu_exit
);
927 IOMMU_OF_DECLARE(qcom_iommu_dev
, "qcom,msm-iommu-v1", NULL
);
929 MODULE_DESCRIPTION("IOMMU API for QCOM IOMMU v1 implementations");
930 MODULE_LICENSE("GPL v2");