1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
4 * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/platform_device.h>
11 #include <linux/errno.h>
13 #include <linux/io-pgtable.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/spinlock.h>
17 #include <linux/slab.h>
18 #include <linux/iommu.h>
19 #include <linux/clk.h>
20 #include <linux/err.h>
21 #include <linux/of_iommu.h>
23 #include <asm/cacheflush.h>
24 #include <linux/sizes.h>
26 #include "msm_iommu_hw-8xxx.h"
27 #include "msm_iommu.h"
29 #define MRC(reg, processor, op1, crn, crm, op2) \
30 __asm__ __volatile__ ( \
31 " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
34 /* bitmap of the page sizes currently supported */
35 #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
37 DEFINE_SPINLOCK(msm_iommu_lock
);
38 static LIST_HEAD(qcom_iommu_devices
);
39 static struct iommu_ops msm_iommu_ops
;
42 struct list_head list_attached
;
43 struct iommu_domain domain
;
44 struct io_pgtable_cfg cfg
;
45 struct io_pgtable_ops
*iop
;
47 spinlock_t pgtlock
; /* pagetable lock */
50 static struct msm_priv
*to_msm_priv(struct iommu_domain
*dom
)
52 return container_of(dom
, struct msm_priv
, domain
);
55 static int __enable_clocks(struct msm_iommu_dev
*iommu
)
59 ret
= clk_enable(iommu
->pclk
);
64 ret
= clk_enable(iommu
->clk
);
66 clk_disable(iommu
->pclk
);
72 static void __disable_clocks(struct msm_iommu_dev
*iommu
)
75 clk_disable(iommu
->clk
);
76 clk_disable(iommu
->pclk
);
79 static void msm_iommu_reset(void __iomem
*base
, int ncb
)
85 SET_ESRRESTORE(base
, 0);
89 SET_TESTBUSCR(base
, 0);
91 SET_GLOBAL_TLBIALL(base
, 0);
93 SET_TLBLKCRWE(base
, 1);
95 for (ctx
= 0; ctx
< ncb
; ctx
++) {
96 SET_BPRCOSH(base
, ctx
, 0);
97 SET_BPRCISH(base
, ctx
, 0);
98 SET_BPRCNSH(base
, ctx
, 0);
99 SET_BPSHCFG(base
, ctx
, 0);
100 SET_BPMTCFG(base
, ctx
, 0);
101 SET_ACTLR(base
, ctx
, 0);
102 SET_SCTLR(base
, ctx
, 0);
103 SET_FSRRESTORE(base
, ctx
, 0);
104 SET_TTBR0(base
, ctx
, 0);
105 SET_TTBR1(base
, ctx
, 0);
106 SET_TTBCR(base
, ctx
, 0);
107 SET_BFBCR(base
, ctx
, 0);
108 SET_PAR(base
, ctx
, 0);
109 SET_FAR(base
, ctx
, 0);
110 SET_CTX_TLBIALL(base
, ctx
, 0);
111 SET_TLBFLPTER(base
, ctx
, 0);
112 SET_TLBSLPTER(base
, ctx
, 0);
113 SET_TLBLKCR(base
, ctx
, 0);
114 SET_CONTEXTIDR(base
, ctx
, 0);
118 static void __flush_iotlb(void *cookie
)
120 struct msm_priv
*priv
= cookie
;
121 struct msm_iommu_dev
*iommu
= NULL
;
122 struct msm_iommu_ctx_dev
*master
;
125 list_for_each_entry(iommu
, &priv
->list_attached
, dom_node
) {
126 ret
= __enable_clocks(iommu
);
130 list_for_each_entry(master
, &iommu
->ctx_list
, list
)
131 SET_CTX_TLBIALL(iommu
->base
, master
->num
, 0);
133 __disable_clocks(iommu
);
139 static void __flush_iotlb_range(unsigned long iova
, size_t size
,
140 size_t granule
, bool leaf
, void *cookie
)
142 struct msm_priv
*priv
= cookie
;
143 struct msm_iommu_dev
*iommu
= NULL
;
144 struct msm_iommu_ctx_dev
*master
;
148 list_for_each_entry(iommu
, &priv
->list_attached
, dom_node
) {
149 ret
= __enable_clocks(iommu
);
153 list_for_each_entry(master
, &iommu
->ctx_list
, list
) {
157 iova
|= GET_CONTEXTIDR_ASID(iommu
->base
,
159 SET_TLBIVA(iommu
->base
, master
->num
, iova
);
161 } while (temp_size
-= granule
);
164 __disable_clocks(iommu
);
171 static void __flush_iotlb_walk(unsigned long iova
, size_t size
,
172 size_t granule
, void *cookie
)
174 __flush_iotlb_range(iova
, size
, granule
, false, cookie
);
177 static void __flush_iotlb_leaf(unsigned long iova
, size_t size
,
178 size_t granule
, void *cookie
)
180 __flush_iotlb_range(iova
, size
, granule
, true, cookie
);
183 static void __flush_iotlb_page(struct iommu_iotlb_gather
*gather
,
184 unsigned long iova
, size_t granule
, void *cookie
)
186 __flush_iotlb_range(iova
, granule
, granule
, true, cookie
);
189 static const struct iommu_flush_ops msm_iommu_flush_ops
= {
190 .tlb_flush_all
= __flush_iotlb
,
191 .tlb_flush_walk
= __flush_iotlb_walk
,
192 .tlb_flush_leaf
= __flush_iotlb_leaf
,
193 .tlb_add_page
= __flush_iotlb_page
,
196 static int msm_iommu_alloc_ctx(unsigned long *map
, int start
, int end
)
201 idx
= find_next_zero_bit(map
, end
, start
);
204 } while (test_and_set_bit(idx
, map
));
209 static void msm_iommu_free_ctx(unsigned long *map
, int idx
)
214 static void config_mids(struct msm_iommu_dev
*iommu
,
215 struct msm_iommu_ctx_dev
*master
)
219 for (i
= 0; i
< master
->num_mids
; i
++) {
220 mid
= master
->mids
[i
];
223 SET_M2VCBR_N(iommu
->base
, mid
, 0);
224 SET_CBACR_N(iommu
->base
, ctx
, 0);
227 SET_VMID(iommu
->base
, mid
, 0);
229 /* Set the context number for that MID to this context */
230 SET_CBNDX(iommu
->base
, mid
, ctx
);
232 /* Set MID associated with this context bank to 0*/
233 SET_CBVMID(iommu
->base
, ctx
, 0);
235 /* Set the ASID for TLB tagging for this context */
236 SET_CONTEXTIDR_ASID(iommu
->base
, ctx
, ctx
);
238 /* Set security bit override to be Non-secure */
239 SET_NSCFG(iommu
->base
, mid
, 3);
243 static void __reset_context(void __iomem
*base
, int ctx
)
245 SET_BPRCOSH(base
, ctx
, 0);
246 SET_BPRCISH(base
, ctx
, 0);
247 SET_BPRCNSH(base
, ctx
, 0);
248 SET_BPSHCFG(base
, ctx
, 0);
249 SET_BPMTCFG(base
, ctx
, 0);
250 SET_ACTLR(base
, ctx
, 0);
251 SET_SCTLR(base
, ctx
, 0);
252 SET_FSRRESTORE(base
, ctx
, 0);
253 SET_TTBR0(base
, ctx
, 0);
254 SET_TTBR1(base
, ctx
, 0);
255 SET_TTBCR(base
, ctx
, 0);
256 SET_BFBCR(base
, ctx
, 0);
257 SET_PAR(base
, ctx
, 0);
258 SET_FAR(base
, ctx
, 0);
259 SET_CTX_TLBIALL(base
, ctx
, 0);
260 SET_TLBFLPTER(base
, ctx
, 0);
261 SET_TLBSLPTER(base
, ctx
, 0);
262 SET_TLBLKCR(base
, ctx
, 0);
265 static void __program_context(void __iomem
*base
, int ctx
,
266 struct msm_priv
*priv
)
268 __reset_context(base
, ctx
);
270 /* Turn on TEX Remap */
271 SET_TRE(base
, ctx
, 1);
272 SET_AFE(base
, ctx
, 1);
274 /* Set up HTW mode */
275 /* TLB miss configuration: perform HTW on miss */
276 SET_TLBMCFG(base
, ctx
, 0x3);
278 /* V2P configuration: HTW for access */
279 SET_V2PCFG(base
, ctx
, 0x3);
281 SET_TTBCR(base
, ctx
, priv
->cfg
.arm_v7s_cfg
.tcr
);
282 SET_TTBR0(base
, ctx
, priv
->cfg
.arm_v7s_cfg
.ttbr
[0]);
283 SET_TTBR1(base
, ctx
, priv
->cfg
.arm_v7s_cfg
.ttbr
[1]);
285 /* Set prrr and nmrr */
286 SET_PRRR(base
, ctx
, priv
->cfg
.arm_v7s_cfg
.prrr
);
287 SET_NMRR(base
, ctx
, priv
->cfg
.arm_v7s_cfg
.nmrr
);
289 /* Invalidate the TLB for this context */
290 SET_CTX_TLBIALL(base
, ctx
, 0);
292 /* Set interrupt number to "secure" interrupt */
293 SET_IRPTNDX(base
, ctx
, 0);
295 /* Enable context fault interrupt */
296 SET_CFEIE(base
, ctx
, 1);
298 /* Stall access on a context fault and let the handler deal with it */
299 SET_CFCFG(base
, ctx
, 1);
301 /* Redirect all cacheable requests to L2 slave port. */
302 SET_RCISH(base
, ctx
, 1);
303 SET_RCOSH(base
, ctx
, 1);
304 SET_RCNSH(base
, ctx
, 1);
306 /* Turn on BFB prefetch */
307 SET_BFBDFE(base
, ctx
, 1);
313 static struct iommu_domain
*msm_iommu_domain_alloc(unsigned type
)
315 struct msm_priv
*priv
;
317 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
320 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
324 INIT_LIST_HEAD(&priv
->list_attached
);
326 priv
->domain
.geometry
.aperture_start
= 0;
327 priv
->domain
.geometry
.aperture_end
= (1ULL << 32) - 1;
328 priv
->domain
.geometry
.force_aperture
= true;
330 return &priv
->domain
;
337 static void msm_iommu_domain_free(struct iommu_domain
*domain
)
339 struct msm_priv
*priv
;
342 spin_lock_irqsave(&msm_iommu_lock
, flags
);
343 priv
= to_msm_priv(domain
);
345 spin_unlock_irqrestore(&msm_iommu_lock
, flags
);
348 static int msm_iommu_domain_config(struct msm_priv
*priv
)
350 spin_lock_init(&priv
->pgtlock
);
352 priv
->cfg
= (struct io_pgtable_cfg
) {
353 .quirks
= IO_PGTABLE_QUIRK_TLBI_ON_MAP
,
354 .pgsize_bitmap
= msm_iommu_ops
.pgsize_bitmap
,
357 .tlb
= &msm_iommu_flush_ops
,
358 .iommu_dev
= priv
->dev
,
361 priv
->iop
= alloc_io_pgtable_ops(ARM_V7S
, &priv
->cfg
, priv
);
363 dev_err(priv
->dev
, "Failed to allocate pgtable\n");
367 msm_iommu_ops
.pgsize_bitmap
= priv
->cfg
.pgsize_bitmap
;
372 /* Must be called under msm_iommu_lock */
373 static struct msm_iommu_dev
*find_iommu_for_dev(struct device
*dev
)
375 struct msm_iommu_dev
*iommu
, *ret
= NULL
;
376 struct msm_iommu_ctx_dev
*master
;
378 list_for_each_entry(iommu
, &qcom_iommu_devices
, dev_node
) {
379 master
= list_first_entry(&iommu
->ctx_list
,
380 struct msm_iommu_ctx_dev
,
382 if (master
->of_node
== dev
->of_node
) {
391 static int msm_iommu_add_device(struct device
*dev
)
393 struct msm_iommu_dev
*iommu
;
394 struct iommu_group
*group
;
397 spin_lock_irqsave(&msm_iommu_lock
, flags
);
398 iommu
= find_iommu_for_dev(dev
);
399 spin_unlock_irqrestore(&msm_iommu_lock
, flags
);
402 iommu_device_link(&iommu
->iommu
, dev
);
406 group
= iommu_group_get_for_dev(dev
);
408 return PTR_ERR(group
);
410 iommu_group_put(group
);
415 static void msm_iommu_remove_device(struct device
*dev
)
417 struct msm_iommu_dev
*iommu
;
420 spin_lock_irqsave(&msm_iommu_lock
, flags
);
421 iommu
= find_iommu_for_dev(dev
);
422 spin_unlock_irqrestore(&msm_iommu_lock
, flags
);
425 iommu_device_unlink(&iommu
->iommu
, dev
);
427 iommu_group_remove_device(dev
);
430 static int msm_iommu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
434 struct msm_iommu_dev
*iommu
;
435 struct msm_priv
*priv
= to_msm_priv(domain
);
436 struct msm_iommu_ctx_dev
*master
;
439 msm_iommu_domain_config(priv
);
441 spin_lock_irqsave(&msm_iommu_lock
, flags
);
442 list_for_each_entry(iommu
, &qcom_iommu_devices
, dev_node
) {
443 master
= list_first_entry(&iommu
->ctx_list
,
444 struct msm_iommu_ctx_dev
,
446 if (master
->of_node
== dev
->of_node
) {
447 ret
= __enable_clocks(iommu
);
451 list_for_each_entry(master
, &iommu
->ctx_list
, list
) {
453 dev_err(dev
, "domain already attached");
458 msm_iommu_alloc_ctx(iommu
->context_map
,
460 if (IS_ERR_VALUE(master
->num
)) {
464 config_mids(iommu
, master
);
465 __program_context(iommu
->base
, master
->num
,
468 __disable_clocks(iommu
);
469 list_add(&iommu
->dom_node
, &priv
->list_attached
);
474 spin_unlock_irqrestore(&msm_iommu_lock
, flags
);
479 static void msm_iommu_detach_dev(struct iommu_domain
*domain
,
482 struct msm_priv
*priv
= to_msm_priv(domain
);
484 struct msm_iommu_dev
*iommu
;
485 struct msm_iommu_ctx_dev
*master
;
488 free_io_pgtable_ops(priv
->iop
);
490 spin_lock_irqsave(&msm_iommu_lock
, flags
);
491 list_for_each_entry(iommu
, &priv
->list_attached
, dom_node
) {
492 ret
= __enable_clocks(iommu
);
496 list_for_each_entry(master
, &iommu
->ctx_list
, list
) {
497 msm_iommu_free_ctx(iommu
->context_map
, master
->num
);
498 __reset_context(iommu
->base
, master
->num
);
500 __disable_clocks(iommu
);
503 spin_unlock_irqrestore(&msm_iommu_lock
, flags
);
506 static int msm_iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
507 phys_addr_t pa
, size_t len
, int prot
)
509 struct msm_priv
*priv
= to_msm_priv(domain
);
513 spin_lock_irqsave(&priv
->pgtlock
, flags
);
514 ret
= priv
->iop
->map(priv
->iop
, iova
, pa
, len
, prot
);
515 spin_unlock_irqrestore(&priv
->pgtlock
, flags
);
520 static size_t msm_iommu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
521 size_t len
, struct iommu_iotlb_gather
*gather
)
523 struct msm_priv
*priv
= to_msm_priv(domain
);
526 spin_lock_irqsave(&priv
->pgtlock
, flags
);
527 len
= priv
->iop
->unmap(priv
->iop
, iova
, len
, gather
);
528 spin_unlock_irqrestore(&priv
->pgtlock
, flags
);
533 static phys_addr_t
msm_iommu_iova_to_phys(struct iommu_domain
*domain
,
536 struct msm_priv
*priv
;
537 struct msm_iommu_dev
*iommu
;
538 struct msm_iommu_ctx_dev
*master
;
543 spin_lock_irqsave(&msm_iommu_lock
, flags
);
545 priv
= to_msm_priv(domain
);
546 iommu
= list_first_entry(&priv
->list_attached
,
547 struct msm_iommu_dev
, dom_node
);
549 if (list_empty(&iommu
->ctx_list
))
552 master
= list_first_entry(&iommu
->ctx_list
,
553 struct msm_iommu_ctx_dev
, list
);
557 ret
= __enable_clocks(iommu
);
561 /* Invalidate context TLB */
562 SET_CTX_TLBIALL(iommu
->base
, master
->num
, 0);
563 SET_V2PPR(iommu
->base
, master
->num
, va
& V2Pxx_VA
);
565 par
= GET_PAR(iommu
->base
, master
->num
);
567 /* We are dealing with a supersection */
568 if (GET_NOFAULT_SS(iommu
->base
, master
->num
))
569 ret
= (par
& 0xFF000000) | (va
& 0x00FFFFFF);
570 else /* Upper 20 bits from PAR, lower 12 from VA */
571 ret
= (par
& 0xFFFFF000) | (va
& 0x00000FFF);
573 if (GET_FAULT(iommu
->base
, master
->num
))
576 __disable_clocks(iommu
);
578 spin_unlock_irqrestore(&msm_iommu_lock
, flags
);
582 static bool msm_iommu_capable(enum iommu_cap cap
)
587 static void print_ctx_regs(void __iomem
*base
, int ctx
)
589 unsigned int fsr
= GET_FSR(base
, ctx
);
590 pr_err("FAR = %08x PAR = %08x\n",
591 GET_FAR(base
, ctx
), GET_PAR(base
, ctx
));
592 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr
,
593 (fsr
& 0x02) ? "TF " : "",
594 (fsr
& 0x04) ? "AFF " : "",
595 (fsr
& 0x08) ? "APF " : "",
596 (fsr
& 0x10) ? "TLBMF " : "",
597 (fsr
& 0x20) ? "HTWDEEF " : "",
598 (fsr
& 0x40) ? "HTWSEEF " : "",
599 (fsr
& 0x80) ? "MHF " : "",
600 (fsr
& 0x10000) ? "SL " : "",
601 (fsr
& 0x40000000) ? "SS " : "",
602 (fsr
& 0x80000000) ? "MULTI " : "");
604 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
605 GET_FSYNR0(base
, ctx
), GET_FSYNR1(base
, ctx
));
606 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
607 GET_TTBR0(base
, ctx
), GET_TTBR1(base
, ctx
));
608 pr_err("SCTLR = %08x ACTLR = %08x\n",
609 GET_SCTLR(base
, ctx
), GET_ACTLR(base
, ctx
));
612 static void insert_iommu_master(struct device
*dev
,
613 struct msm_iommu_dev
**iommu
,
614 struct of_phandle_args
*spec
)
616 struct msm_iommu_ctx_dev
*master
= dev
->archdata
.iommu
;
619 if (list_empty(&(*iommu
)->ctx_list
)) {
620 master
= kzalloc(sizeof(*master
), GFP_ATOMIC
);
621 master
->of_node
= dev
->of_node
;
622 list_add(&master
->list
, &(*iommu
)->ctx_list
);
623 dev
->archdata
.iommu
= master
;
626 for (sid
= 0; sid
< master
->num_mids
; sid
++)
627 if (master
->mids
[sid
] == spec
->args
[0]) {
628 dev_warn(dev
, "Stream ID 0x%hx repeated; ignoring\n",
633 master
->mids
[master
->num_mids
++] = spec
->args
[0];
636 static int qcom_iommu_of_xlate(struct device
*dev
,
637 struct of_phandle_args
*spec
)
639 struct msm_iommu_dev
*iommu
;
643 spin_lock_irqsave(&msm_iommu_lock
, flags
);
644 list_for_each_entry(iommu
, &qcom_iommu_devices
, dev_node
)
645 if (iommu
->dev
->of_node
== spec
->np
)
648 if (!iommu
|| iommu
->dev
->of_node
!= spec
->np
) {
653 insert_iommu_master(dev
, &iommu
, spec
);
655 spin_unlock_irqrestore(&msm_iommu_lock
, flags
);
660 irqreturn_t
msm_iommu_fault_handler(int irq
, void *dev_id
)
662 struct msm_iommu_dev
*iommu
= dev_id
;
666 spin_lock(&msm_iommu_lock
);
669 pr_err("Invalid device ID in context interrupt handler\n");
673 pr_err("Unexpected IOMMU page fault!\n");
674 pr_err("base = %08x\n", (unsigned int)iommu
->base
);
676 ret
= __enable_clocks(iommu
);
680 for (i
= 0; i
< iommu
->ncb
; i
++) {
681 fsr
= GET_FSR(iommu
->base
, i
);
683 pr_err("Fault occurred in context %d.\n", i
);
684 pr_err("Interesting registers:\n");
685 print_ctx_regs(iommu
->base
, i
);
686 SET_FSR(iommu
->base
, i
, 0x4000000F);
689 __disable_clocks(iommu
);
691 spin_unlock(&msm_iommu_lock
);
695 static struct iommu_ops msm_iommu_ops
= {
696 .capable
= msm_iommu_capable
,
697 .domain_alloc
= msm_iommu_domain_alloc
,
698 .domain_free
= msm_iommu_domain_free
,
699 .attach_dev
= msm_iommu_attach_dev
,
700 .detach_dev
= msm_iommu_detach_dev
,
701 .map
= msm_iommu_map
,
702 .unmap
= msm_iommu_unmap
,
704 * Nothing is needed here, the barrier to guarantee
705 * completion of the tlb sync operation is implicitly
706 * taken care when the iommu client does a writel before
707 * kick starting the other master.
710 .iova_to_phys
= msm_iommu_iova_to_phys
,
711 .add_device
= msm_iommu_add_device
,
712 .remove_device
= msm_iommu_remove_device
,
713 .device_group
= generic_device_group
,
714 .pgsize_bitmap
= MSM_IOMMU_PGSIZES
,
715 .of_xlate
= qcom_iommu_of_xlate
,
718 static int msm_iommu_probe(struct platform_device
*pdev
)
721 resource_size_t ioaddr
;
722 struct msm_iommu_dev
*iommu
;
725 iommu
= devm_kzalloc(&pdev
->dev
, sizeof(*iommu
), GFP_KERNEL
);
729 iommu
->dev
= &pdev
->dev
;
730 INIT_LIST_HEAD(&iommu
->ctx_list
);
732 iommu
->pclk
= devm_clk_get(iommu
->dev
, "smmu_pclk");
733 if (IS_ERR(iommu
->pclk
)) {
734 dev_err(iommu
->dev
, "could not get smmu_pclk\n");
735 return PTR_ERR(iommu
->pclk
);
738 ret
= clk_prepare(iommu
->pclk
);
740 dev_err(iommu
->dev
, "could not prepare smmu_pclk\n");
744 iommu
->clk
= devm_clk_get(iommu
->dev
, "iommu_clk");
745 if (IS_ERR(iommu
->clk
)) {
746 dev_err(iommu
->dev
, "could not get iommu_clk\n");
747 clk_unprepare(iommu
->pclk
);
748 return PTR_ERR(iommu
->clk
);
751 ret
= clk_prepare(iommu
->clk
);
753 dev_err(iommu
->dev
, "could not prepare iommu_clk\n");
754 clk_unprepare(iommu
->pclk
);
758 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
759 iommu
->base
= devm_ioremap_resource(iommu
->dev
, r
);
760 if (IS_ERR(iommu
->base
)) {
761 dev_err(iommu
->dev
, "could not get iommu base\n");
762 ret
= PTR_ERR(iommu
->base
);
767 iommu
->irq
= platform_get_irq(pdev
, 0);
768 if (iommu
->irq
< 0) {
773 ret
= of_property_read_u32(iommu
->dev
->of_node
, "qcom,ncb", &val
);
775 dev_err(iommu
->dev
, "could not get ncb\n");
780 msm_iommu_reset(iommu
->base
, iommu
->ncb
);
781 SET_M(iommu
->base
, 0, 1);
782 SET_PAR(iommu
->base
, 0, 0);
783 SET_V2PCFG(iommu
->base
, 0, 1);
784 SET_V2PPR(iommu
->base
, 0, 0);
785 par
= GET_PAR(iommu
->base
, 0);
786 SET_V2PCFG(iommu
->base
, 0, 0);
787 SET_M(iommu
->base
, 0, 0);
790 pr_err("Invalid PAR value detected\n");
795 ret
= devm_request_threaded_irq(iommu
->dev
, iommu
->irq
, NULL
,
796 msm_iommu_fault_handler
,
797 IRQF_ONESHOT
| IRQF_SHARED
,
798 "msm_iommu_secure_irpt_handler",
801 pr_err("Request IRQ %d failed with ret=%d\n", iommu
->irq
, ret
);
805 list_add(&iommu
->dev_node
, &qcom_iommu_devices
);
807 ret
= iommu_device_sysfs_add(&iommu
->iommu
, iommu
->dev
, NULL
,
808 "msm-smmu.%pa", &ioaddr
);
810 pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr
);
814 iommu_device_set_ops(&iommu
->iommu
, &msm_iommu_ops
);
815 iommu_device_set_fwnode(&iommu
->iommu
, &pdev
->dev
.of_node
->fwnode
);
817 ret
= iommu_device_register(&iommu
->iommu
);
819 pr_err("Could not register msm-smmu at %pa\n", &ioaddr
);
823 bus_set_iommu(&platform_bus_type
, &msm_iommu_ops
);
825 pr_info("device mapped at %p, irq %d with %d ctx banks\n",
826 iommu
->base
, iommu
->irq
, iommu
->ncb
);
830 clk_unprepare(iommu
->clk
);
831 clk_unprepare(iommu
->pclk
);
835 static const struct of_device_id msm_iommu_dt_match
[] = {
836 { .compatible
= "qcom,apq8064-iommu" },
840 static int msm_iommu_remove(struct platform_device
*pdev
)
842 struct msm_iommu_dev
*iommu
= platform_get_drvdata(pdev
);
844 clk_unprepare(iommu
->clk
);
845 clk_unprepare(iommu
->pclk
);
849 static struct platform_driver msm_iommu_driver
= {
852 .of_match_table
= msm_iommu_dt_match
,
854 .probe
= msm_iommu_probe
,
855 .remove
= msm_iommu_remove
,
858 static int __init
msm_iommu_driver_init(void)
862 ret
= platform_driver_register(&msm_iommu_driver
);
864 pr_err("Failed to register IOMMU driver\n");
868 subsys_initcall(msm_iommu_driver_init
);