1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
4 * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/platform_device.h>
11 #include <linux/errno.h>
13 #include <linux/io-pgtable.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/spinlock.h>
17 #include <linux/slab.h>
18 #include <linux/iommu.h>
19 #include <linux/clk.h>
20 #include <linux/err.h>
22 #include <asm/cacheflush.h>
23 #include <linux/sizes.h>
25 #include "msm_iommu_hw-8xxx.h"
26 #include "msm_iommu.h"
28 #define MRC(reg, processor, op1, crn, crm, op2) \
29 __asm__ __volatile__ ( \
30 " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
33 /* bitmap of the page sizes currently supported */
34 #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
36 static DEFINE_SPINLOCK(msm_iommu_lock
);
37 static LIST_HEAD(qcom_iommu_devices
);
38 static struct iommu_ops msm_iommu_ops
;
41 struct list_head list_attached
;
42 struct iommu_domain domain
;
43 struct io_pgtable_cfg cfg
;
44 struct io_pgtable_ops
*iop
;
46 spinlock_t pgtlock
; /* pagetable lock */
49 static struct msm_priv
*to_msm_priv(struct iommu_domain
*dom
)
51 return container_of(dom
, struct msm_priv
, domain
);
54 static int __enable_clocks(struct msm_iommu_dev
*iommu
)
58 ret
= clk_enable(iommu
->pclk
);
63 ret
= clk_enable(iommu
->clk
);
65 clk_disable(iommu
->pclk
);
71 static void __disable_clocks(struct msm_iommu_dev
*iommu
)
74 clk_disable(iommu
->clk
);
75 clk_disable(iommu
->pclk
);
78 static void msm_iommu_reset(void __iomem
*base
, int ncb
)
84 SET_ESRRESTORE(base
, 0);
88 SET_TESTBUSCR(base
, 0);
90 SET_GLOBAL_TLBIALL(base
, 0);
92 SET_TLBLKCRWE(base
, 1);
94 for (ctx
= 0; ctx
< ncb
; ctx
++) {
95 SET_BPRCOSH(base
, ctx
, 0);
96 SET_BPRCISH(base
, ctx
, 0);
97 SET_BPRCNSH(base
, ctx
, 0);
98 SET_BPSHCFG(base
, ctx
, 0);
99 SET_BPMTCFG(base
, ctx
, 0);
100 SET_ACTLR(base
, ctx
, 0);
101 SET_SCTLR(base
, ctx
, 0);
102 SET_FSRRESTORE(base
, ctx
, 0);
103 SET_TTBR0(base
, ctx
, 0);
104 SET_TTBR1(base
, ctx
, 0);
105 SET_TTBCR(base
, ctx
, 0);
106 SET_BFBCR(base
, ctx
, 0);
107 SET_PAR(base
, ctx
, 0);
108 SET_FAR(base
, ctx
, 0);
109 SET_CTX_TLBIALL(base
, ctx
, 0);
110 SET_TLBFLPTER(base
, ctx
, 0);
111 SET_TLBSLPTER(base
, ctx
, 0);
112 SET_TLBLKCR(base
, ctx
, 0);
113 SET_CONTEXTIDR(base
, ctx
, 0);
117 static void __flush_iotlb(void *cookie
)
119 struct msm_priv
*priv
= cookie
;
120 struct msm_iommu_dev
*iommu
= NULL
;
121 struct msm_iommu_ctx_dev
*master
;
124 list_for_each_entry(iommu
, &priv
->list_attached
, dom_node
) {
125 ret
= __enable_clocks(iommu
);
129 list_for_each_entry(master
, &iommu
->ctx_list
, list
)
130 SET_CTX_TLBIALL(iommu
->base
, master
->num
, 0);
132 __disable_clocks(iommu
);
138 static void __flush_iotlb_range(unsigned long iova
, size_t size
,
139 size_t granule
, bool leaf
, void *cookie
)
141 struct msm_priv
*priv
= cookie
;
142 struct msm_iommu_dev
*iommu
= NULL
;
143 struct msm_iommu_ctx_dev
*master
;
147 list_for_each_entry(iommu
, &priv
->list_attached
, dom_node
) {
148 ret
= __enable_clocks(iommu
);
152 list_for_each_entry(master
, &iommu
->ctx_list
, list
) {
156 iova
|= GET_CONTEXTIDR_ASID(iommu
->base
,
158 SET_TLBIVA(iommu
->base
, master
->num
, iova
);
160 } while (temp_size
-= granule
);
163 __disable_clocks(iommu
);
170 static void __flush_iotlb_walk(unsigned long iova
, size_t size
,
171 size_t granule
, void *cookie
)
173 __flush_iotlb_range(iova
, size
, granule
, false, cookie
);
176 static void __flush_iotlb_page(struct iommu_iotlb_gather
*gather
,
177 unsigned long iova
, size_t granule
, void *cookie
)
179 __flush_iotlb_range(iova
, granule
, granule
, true, cookie
);
182 static const struct iommu_flush_ops msm_iommu_flush_ops
= {
183 .tlb_flush_all
= __flush_iotlb
,
184 .tlb_flush_walk
= __flush_iotlb_walk
,
185 .tlb_add_page
= __flush_iotlb_page
,
188 static int msm_iommu_alloc_ctx(unsigned long *map
, int start
, int end
)
193 idx
= find_next_zero_bit(map
, end
, start
);
196 } while (test_and_set_bit(idx
, map
));
201 static void msm_iommu_free_ctx(unsigned long *map
, int idx
)
206 static void config_mids(struct msm_iommu_dev
*iommu
,
207 struct msm_iommu_ctx_dev
*master
)
211 for (i
= 0; i
< master
->num_mids
; i
++) {
212 mid
= master
->mids
[i
];
215 SET_M2VCBR_N(iommu
->base
, mid
, 0);
216 SET_CBACR_N(iommu
->base
, ctx
, 0);
219 SET_VMID(iommu
->base
, mid
, 0);
221 /* Set the context number for that MID to this context */
222 SET_CBNDX(iommu
->base
, mid
, ctx
);
224 /* Set MID associated with this context bank to 0*/
225 SET_CBVMID(iommu
->base
, ctx
, 0);
227 /* Set the ASID for TLB tagging for this context */
228 SET_CONTEXTIDR_ASID(iommu
->base
, ctx
, ctx
);
230 /* Set security bit override to be Non-secure */
231 SET_NSCFG(iommu
->base
, mid
, 3);
235 static void __reset_context(void __iomem
*base
, int ctx
)
237 SET_BPRCOSH(base
, ctx
, 0);
238 SET_BPRCISH(base
, ctx
, 0);
239 SET_BPRCNSH(base
, ctx
, 0);
240 SET_BPSHCFG(base
, ctx
, 0);
241 SET_BPMTCFG(base
, ctx
, 0);
242 SET_ACTLR(base
, ctx
, 0);
243 SET_SCTLR(base
, ctx
, 0);
244 SET_FSRRESTORE(base
, ctx
, 0);
245 SET_TTBR0(base
, ctx
, 0);
246 SET_TTBR1(base
, ctx
, 0);
247 SET_TTBCR(base
, ctx
, 0);
248 SET_BFBCR(base
, ctx
, 0);
249 SET_PAR(base
, ctx
, 0);
250 SET_FAR(base
, ctx
, 0);
251 SET_CTX_TLBIALL(base
, ctx
, 0);
252 SET_TLBFLPTER(base
, ctx
, 0);
253 SET_TLBSLPTER(base
, ctx
, 0);
254 SET_TLBLKCR(base
, ctx
, 0);
257 static void __program_context(void __iomem
*base
, int ctx
,
258 struct msm_priv
*priv
)
260 __reset_context(base
, ctx
);
262 /* Turn on TEX Remap */
263 SET_TRE(base
, ctx
, 1);
264 SET_AFE(base
, ctx
, 1);
266 /* Set up HTW mode */
267 /* TLB miss configuration: perform HTW on miss */
268 SET_TLBMCFG(base
, ctx
, 0x3);
270 /* V2P configuration: HTW for access */
271 SET_V2PCFG(base
, ctx
, 0x3);
273 SET_TTBCR(base
, ctx
, priv
->cfg
.arm_v7s_cfg
.tcr
);
274 SET_TTBR0(base
, ctx
, priv
->cfg
.arm_v7s_cfg
.ttbr
);
275 SET_TTBR1(base
, ctx
, 0);
277 /* Set prrr and nmrr */
278 SET_PRRR(base
, ctx
, priv
->cfg
.arm_v7s_cfg
.prrr
);
279 SET_NMRR(base
, ctx
, priv
->cfg
.arm_v7s_cfg
.nmrr
);
281 /* Invalidate the TLB for this context */
282 SET_CTX_TLBIALL(base
, ctx
, 0);
284 /* Set interrupt number to "secure" interrupt */
285 SET_IRPTNDX(base
, ctx
, 0);
287 /* Enable context fault interrupt */
288 SET_CFEIE(base
, ctx
, 1);
290 /* Stall access on a context fault and let the handler deal with it */
291 SET_CFCFG(base
, ctx
, 1);
293 /* Redirect all cacheable requests to L2 slave port. */
294 SET_RCISH(base
, ctx
, 1);
295 SET_RCOSH(base
, ctx
, 1);
296 SET_RCNSH(base
, ctx
, 1);
298 /* Turn on BFB prefetch */
299 SET_BFBDFE(base
, ctx
, 1);
305 static struct iommu_domain
*msm_iommu_domain_alloc_paging(struct device
*dev
)
307 struct msm_priv
*priv
;
309 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
313 INIT_LIST_HEAD(&priv
->list_attached
);
315 priv
->domain
.geometry
.aperture_start
= 0;
316 priv
->domain
.geometry
.aperture_end
= (1ULL << 32) - 1;
317 priv
->domain
.geometry
.force_aperture
= true;
319 return &priv
->domain
;
326 static void msm_iommu_domain_free(struct iommu_domain
*domain
)
328 struct msm_priv
*priv
;
331 spin_lock_irqsave(&msm_iommu_lock
, flags
);
332 priv
= to_msm_priv(domain
);
334 spin_unlock_irqrestore(&msm_iommu_lock
, flags
);
337 static int msm_iommu_domain_config(struct msm_priv
*priv
)
339 spin_lock_init(&priv
->pgtlock
);
341 priv
->cfg
= (struct io_pgtable_cfg
) {
342 .pgsize_bitmap
= msm_iommu_ops
.pgsize_bitmap
,
345 .tlb
= &msm_iommu_flush_ops
,
346 .iommu_dev
= priv
->dev
,
349 priv
->iop
= alloc_io_pgtable_ops(ARM_V7S
, &priv
->cfg
, priv
);
351 dev_err(priv
->dev
, "Failed to allocate pgtable\n");
355 msm_iommu_ops
.pgsize_bitmap
= priv
->cfg
.pgsize_bitmap
;
360 /* Must be called under msm_iommu_lock */
361 static struct msm_iommu_dev
*find_iommu_for_dev(struct device
*dev
)
363 struct msm_iommu_dev
*iommu
, *ret
= NULL
;
364 struct msm_iommu_ctx_dev
*master
;
366 list_for_each_entry(iommu
, &qcom_iommu_devices
, dev_node
) {
367 master
= list_first_entry(&iommu
->ctx_list
,
368 struct msm_iommu_ctx_dev
,
370 if (master
->of_node
== dev
->of_node
) {
379 static struct iommu_device
*msm_iommu_probe_device(struct device
*dev
)
381 struct msm_iommu_dev
*iommu
;
384 spin_lock_irqsave(&msm_iommu_lock
, flags
);
385 iommu
= find_iommu_for_dev(dev
);
386 spin_unlock_irqrestore(&msm_iommu_lock
, flags
);
389 return ERR_PTR(-ENODEV
);
391 return &iommu
->iommu
;
394 static int msm_iommu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
398 struct msm_iommu_dev
*iommu
;
399 struct msm_priv
*priv
= to_msm_priv(domain
);
400 struct msm_iommu_ctx_dev
*master
;
403 msm_iommu_domain_config(priv
);
405 spin_lock_irqsave(&msm_iommu_lock
, flags
);
406 list_for_each_entry(iommu
, &qcom_iommu_devices
, dev_node
) {
407 master
= list_first_entry(&iommu
->ctx_list
,
408 struct msm_iommu_ctx_dev
,
410 if (master
->of_node
== dev
->of_node
) {
411 ret
= __enable_clocks(iommu
);
415 list_for_each_entry(master
, &iommu
->ctx_list
, list
) {
417 dev_err(dev
, "domain already attached");
422 msm_iommu_alloc_ctx(iommu
->context_map
,
424 if (IS_ERR_VALUE(master
->num
)) {
428 config_mids(iommu
, master
);
429 __program_context(iommu
->base
, master
->num
,
432 __disable_clocks(iommu
);
433 list_add(&iommu
->dom_node
, &priv
->list_attached
);
438 spin_unlock_irqrestore(&msm_iommu_lock
, flags
);
443 static int msm_iommu_identity_attach(struct iommu_domain
*identity_domain
,
446 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
447 struct msm_priv
*priv
;
449 struct msm_iommu_dev
*iommu
;
450 struct msm_iommu_ctx_dev
*master
;
453 if (domain
== identity_domain
|| !domain
)
456 priv
= to_msm_priv(domain
);
457 free_io_pgtable_ops(priv
->iop
);
459 spin_lock_irqsave(&msm_iommu_lock
, flags
);
460 list_for_each_entry(iommu
, &priv
->list_attached
, dom_node
) {
461 ret
= __enable_clocks(iommu
);
465 list_for_each_entry(master
, &iommu
->ctx_list
, list
) {
466 msm_iommu_free_ctx(iommu
->context_map
, master
->num
);
467 __reset_context(iommu
->base
, master
->num
);
469 __disable_clocks(iommu
);
472 spin_unlock_irqrestore(&msm_iommu_lock
, flags
);
476 static struct iommu_domain_ops msm_iommu_identity_ops
= {
477 .attach_dev
= msm_iommu_identity_attach
,
480 static struct iommu_domain msm_iommu_identity_domain
= {
481 .type
= IOMMU_DOMAIN_IDENTITY
,
482 .ops
= &msm_iommu_identity_ops
,
485 static int msm_iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
486 phys_addr_t pa
, size_t pgsize
, size_t pgcount
,
487 int prot
, gfp_t gfp
, size_t *mapped
)
489 struct msm_priv
*priv
= to_msm_priv(domain
);
493 spin_lock_irqsave(&priv
->pgtlock
, flags
);
494 ret
= priv
->iop
->map_pages(priv
->iop
, iova
, pa
, pgsize
, pgcount
, prot
,
496 spin_unlock_irqrestore(&priv
->pgtlock
, flags
);
501 static int msm_iommu_sync_map(struct iommu_domain
*domain
, unsigned long iova
,
504 struct msm_priv
*priv
= to_msm_priv(domain
);
506 __flush_iotlb_range(iova
, size
, SZ_4K
, false, priv
);
510 static size_t msm_iommu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
511 size_t pgsize
, size_t pgcount
,
512 struct iommu_iotlb_gather
*gather
)
514 struct msm_priv
*priv
= to_msm_priv(domain
);
518 spin_lock_irqsave(&priv
->pgtlock
, flags
);
519 ret
= priv
->iop
->unmap_pages(priv
->iop
, iova
, pgsize
, pgcount
, gather
);
520 spin_unlock_irqrestore(&priv
->pgtlock
, flags
);
525 static phys_addr_t
msm_iommu_iova_to_phys(struct iommu_domain
*domain
,
528 struct msm_priv
*priv
;
529 struct msm_iommu_dev
*iommu
;
530 struct msm_iommu_ctx_dev
*master
;
535 spin_lock_irqsave(&msm_iommu_lock
, flags
);
537 priv
= to_msm_priv(domain
);
538 iommu
= list_first_entry(&priv
->list_attached
,
539 struct msm_iommu_dev
, dom_node
);
541 if (list_empty(&iommu
->ctx_list
))
544 master
= list_first_entry(&iommu
->ctx_list
,
545 struct msm_iommu_ctx_dev
, list
);
549 ret
= __enable_clocks(iommu
);
553 /* Invalidate context TLB */
554 SET_CTX_TLBIALL(iommu
->base
, master
->num
, 0);
555 SET_V2PPR(iommu
->base
, master
->num
, va
& V2Pxx_VA
);
557 par
= GET_PAR(iommu
->base
, master
->num
);
559 /* We are dealing with a supersection */
560 if (GET_NOFAULT_SS(iommu
->base
, master
->num
))
561 ret
= (par
& 0xFF000000) | (va
& 0x00FFFFFF);
562 else /* Upper 20 bits from PAR, lower 12 from VA */
563 ret
= (par
& 0xFFFFF000) | (va
& 0x00000FFF);
565 if (GET_FAULT(iommu
->base
, master
->num
))
568 __disable_clocks(iommu
);
570 spin_unlock_irqrestore(&msm_iommu_lock
, flags
);
574 static void print_ctx_regs(void __iomem
*base
, int ctx
)
576 unsigned int fsr
= GET_FSR(base
, ctx
);
577 pr_err("FAR = %08x PAR = %08x\n",
578 GET_FAR(base
, ctx
), GET_PAR(base
, ctx
));
579 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr
,
580 (fsr
& 0x02) ? "TF " : "",
581 (fsr
& 0x04) ? "AFF " : "",
582 (fsr
& 0x08) ? "APF " : "",
583 (fsr
& 0x10) ? "TLBMF " : "",
584 (fsr
& 0x20) ? "HTWDEEF " : "",
585 (fsr
& 0x40) ? "HTWSEEF " : "",
586 (fsr
& 0x80) ? "MHF " : "",
587 (fsr
& 0x10000) ? "SL " : "",
588 (fsr
& 0x40000000) ? "SS " : "",
589 (fsr
& 0x80000000) ? "MULTI " : "");
591 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
592 GET_FSYNR0(base
, ctx
), GET_FSYNR1(base
, ctx
));
593 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
594 GET_TTBR0(base
, ctx
), GET_TTBR1(base
, ctx
));
595 pr_err("SCTLR = %08x ACTLR = %08x\n",
596 GET_SCTLR(base
, ctx
), GET_ACTLR(base
, ctx
));
599 static int insert_iommu_master(struct device
*dev
,
600 struct msm_iommu_dev
**iommu
,
601 const struct of_phandle_args
*spec
)
603 struct msm_iommu_ctx_dev
*master
= dev_iommu_priv_get(dev
);
606 if (list_empty(&(*iommu
)->ctx_list
)) {
607 master
= kzalloc(sizeof(*master
), GFP_ATOMIC
);
609 dev_err(dev
, "Failed to allocate iommu_master\n");
612 master
->of_node
= dev
->of_node
;
613 list_add(&master
->list
, &(*iommu
)->ctx_list
);
614 dev_iommu_priv_set(dev
, master
);
617 for (sid
= 0; sid
< master
->num_mids
; sid
++)
618 if (master
->mids
[sid
] == spec
->args
[0]) {
619 dev_warn(dev
, "Stream ID 0x%x repeated; ignoring\n",
624 master
->mids
[master
->num_mids
++] = spec
->args
[0];
628 static int qcom_iommu_of_xlate(struct device
*dev
,
629 const struct of_phandle_args
*spec
)
631 struct msm_iommu_dev
*iommu
= NULL
, *iter
;
635 spin_lock_irqsave(&msm_iommu_lock
, flags
);
636 list_for_each_entry(iter
, &qcom_iommu_devices
, dev_node
) {
637 if (iter
->dev
->of_node
== spec
->np
) {
648 ret
= insert_iommu_master(dev
, &iommu
, spec
);
650 spin_unlock_irqrestore(&msm_iommu_lock
, flags
);
655 irqreturn_t
msm_iommu_fault_handler(int irq
, void *dev_id
)
657 struct msm_iommu_dev
*iommu
= dev_id
;
661 spin_lock(&msm_iommu_lock
);
664 pr_err("Invalid device ID in context interrupt handler\n");
668 pr_err("Unexpected IOMMU page fault!\n");
669 pr_err("base = %08x\n", (unsigned int)iommu
->base
);
671 ret
= __enable_clocks(iommu
);
675 for (i
= 0; i
< iommu
->ncb
; i
++) {
676 fsr
= GET_FSR(iommu
->base
, i
);
678 pr_err("Fault occurred in context %d.\n", i
);
679 pr_err("Interesting registers:\n");
680 print_ctx_regs(iommu
->base
, i
);
681 SET_FSR(iommu
->base
, i
, 0x4000000F);
684 __disable_clocks(iommu
);
686 spin_unlock(&msm_iommu_lock
);
690 static struct iommu_ops msm_iommu_ops
= {
691 .identity_domain
= &msm_iommu_identity_domain
,
692 .domain_alloc_paging
= msm_iommu_domain_alloc_paging
,
693 .probe_device
= msm_iommu_probe_device
,
694 .device_group
= generic_device_group
,
695 .pgsize_bitmap
= MSM_IOMMU_PGSIZES
,
696 .of_xlate
= qcom_iommu_of_xlate
,
697 .default_domain_ops
= &(const struct iommu_domain_ops
) {
698 .attach_dev
= msm_iommu_attach_dev
,
699 .map_pages
= msm_iommu_map
,
700 .unmap_pages
= msm_iommu_unmap
,
702 * Nothing is needed here, the barrier to guarantee
703 * completion of the tlb sync operation is implicitly
704 * taken care when the iommu client does a writel before
705 * kick starting the other master.
708 .iotlb_sync_map
= msm_iommu_sync_map
,
709 .iova_to_phys
= msm_iommu_iova_to_phys
,
710 .free
= msm_iommu_domain_free
,
714 static int msm_iommu_probe(struct platform_device
*pdev
)
717 resource_size_t ioaddr
;
718 struct msm_iommu_dev
*iommu
;
721 iommu
= devm_kzalloc(&pdev
->dev
, sizeof(*iommu
), GFP_KERNEL
);
725 iommu
->dev
= &pdev
->dev
;
726 INIT_LIST_HEAD(&iommu
->ctx_list
);
728 iommu
->pclk
= devm_clk_get(iommu
->dev
, "smmu_pclk");
729 if (IS_ERR(iommu
->pclk
))
730 return dev_err_probe(iommu
->dev
, PTR_ERR(iommu
->pclk
),
731 "could not get smmu_pclk\n");
733 ret
= clk_prepare(iommu
->pclk
);
735 return dev_err_probe(iommu
->dev
, ret
,
736 "could not prepare smmu_pclk\n");
738 iommu
->clk
= devm_clk_get(iommu
->dev
, "iommu_clk");
739 if (IS_ERR(iommu
->clk
)) {
740 clk_unprepare(iommu
->pclk
);
741 return dev_err_probe(iommu
->dev
, PTR_ERR(iommu
->clk
),
742 "could not get iommu_clk\n");
745 ret
= clk_prepare(iommu
->clk
);
747 clk_unprepare(iommu
->pclk
);
748 return dev_err_probe(iommu
->dev
, ret
, "could not prepare iommu_clk\n");
751 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
752 iommu
->base
= devm_ioremap_resource(iommu
->dev
, r
);
753 if (IS_ERR(iommu
->base
)) {
754 ret
= dev_err_probe(iommu
->dev
, PTR_ERR(iommu
->base
), "could not get iommu base\n");
759 iommu
->irq
= platform_get_irq(pdev
, 0);
760 if (iommu
->irq
< 0) {
765 ret
= of_property_read_u32(iommu
->dev
->of_node
, "qcom,ncb", &val
);
767 dev_err(iommu
->dev
, "could not get ncb\n");
772 msm_iommu_reset(iommu
->base
, iommu
->ncb
);
773 SET_M(iommu
->base
, 0, 1);
774 SET_PAR(iommu
->base
, 0, 0);
775 SET_V2PCFG(iommu
->base
, 0, 1);
776 SET_V2PPR(iommu
->base
, 0, 0);
777 par
= GET_PAR(iommu
->base
, 0);
778 SET_V2PCFG(iommu
->base
, 0, 0);
779 SET_M(iommu
->base
, 0, 0);
782 pr_err("Invalid PAR value detected\n");
787 ret
= devm_request_threaded_irq(iommu
->dev
, iommu
->irq
, NULL
,
788 msm_iommu_fault_handler
,
789 IRQF_ONESHOT
| IRQF_SHARED
,
790 "msm_iommu_secure_irpt_handler",
793 pr_err("Request IRQ %d failed with ret=%d\n", iommu
->irq
, ret
);
797 list_add(&iommu
->dev_node
, &qcom_iommu_devices
);
799 ret
= iommu_device_sysfs_add(&iommu
->iommu
, iommu
->dev
, NULL
,
800 "msm-smmu.%pa", &ioaddr
);
802 pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr
);
806 ret
= iommu_device_register(&iommu
->iommu
, &msm_iommu_ops
, &pdev
->dev
);
808 pr_err("Could not register msm-smmu at %pa\n", &ioaddr
);
812 pr_info("device mapped at %p, irq %d with %d ctx banks\n",
813 iommu
->base
, iommu
->irq
, iommu
->ncb
);
817 clk_unprepare(iommu
->clk
);
818 clk_unprepare(iommu
->pclk
);
822 static const struct of_device_id msm_iommu_dt_match
[] = {
823 { .compatible
= "qcom,apq8064-iommu" },
827 static void msm_iommu_remove(struct platform_device
*pdev
)
829 struct msm_iommu_dev
*iommu
= platform_get_drvdata(pdev
);
831 clk_unprepare(iommu
->clk
);
832 clk_unprepare(iommu
->pclk
);
835 static struct platform_driver msm_iommu_driver
= {
838 .of_match_table
= msm_iommu_dt_match
,
840 .probe
= msm_iommu_probe
,
841 .remove
= msm_iommu_remove
,
843 builtin_platform_driver(msm_iommu_driver
);