dma-fence: Add some more fence-merge-unwrap tests
[drm/drm-misc.git] / drivers / iommu / msm_iommu.c
blobce40f0a419ea093ee9b8ae6ff421656f96f6d49b
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
4 * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
5 */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/platform_device.h>
11 #include <linux/errno.h>
12 #include <linux/io.h>
13 #include <linux/io-pgtable.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/spinlock.h>
17 #include <linux/slab.h>
18 #include <linux/iommu.h>
19 #include <linux/clk.h>
20 #include <linux/err.h>
22 #include <asm/cacheflush.h>
23 #include <linux/sizes.h>
25 #include "msm_iommu_hw-8xxx.h"
26 #include "msm_iommu.h"
28 #define MRC(reg, processor, op1, crn, crm, op2) \
29 __asm__ __volatile__ ( \
30 " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
31 : "=r" (reg))
33 /* bitmap of the page sizes currently supported */
34 #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
36 static DEFINE_SPINLOCK(msm_iommu_lock);
37 static LIST_HEAD(qcom_iommu_devices);
38 static struct iommu_ops msm_iommu_ops;
40 struct msm_priv {
41 struct list_head list_attached;
42 struct iommu_domain domain;
43 struct io_pgtable_cfg cfg;
44 struct io_pgtable_ops *iop;
45 struct device *dev;
46 spinlock_t pgtlock; /* pagetable lock */
49 static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
51 return container_of(dom, struct msm_priv, domain);
54 static int __enable_clocks(struct msm_iommu_dev *iommu)
56 int ret;
58 ret = clk_enable(iommu->pclk);
59 if (ret)
60 goto fail;
62 if (iommu->clk) {
63 ret = clk_enable(iommu->clk);
64 if (ret)
65 clk_disable(iommu->pclk);
67 fail:
68 return ret;
71 static void __disable_clocks(struct msm_iommu_dev *iommu)
73 if (iommu->clk)
74 clk_disable(iommu->clk);
75 clk_disable(iommu->pclk);
78 static void msm_iommu_reset(void __iomem *base, int ncb)
80 int ctx;
82 SET_RPUE(base, 0);
83 SET_RPUEIE(base, 0);
84 SET_ESRRESTORE(base, 0);
85 SET_TBE(base, 0);
86 SET_CR(base, 0);
87 SET_SPDMBE(base, 0);
88 SET_TESTBUSCR(base, 0);
89 SET_TLBRSW(base, 0);
90 SET_GLOBAL_TLBIALL(base, 0);
91 SET_RPU_ACR(base, 0);
92 SET_TLBLKCRWE(base, 1);
94 for (ctx = 0; ctx < ncb; ctx++) {
95 SET_BPRCOSH(base, ctx, 0);
96 SET_BPRCISH(base, ctx, 0);
97 SET_BPRCNSH(base, ctx, 0);
98 SET_BPSHCFG(base, ctx, 0);
99 SET_BPMTCFG(base, ctx, 0);
100 SET_ACTLR(base, ctx, 0);
101 SET_SCTLR(base, ctx, 0);
102 SET_FSRRESTORE(base, ctx, 0);
103 SET_TTBR0(base, ctx, 0);
104 SET_TTBR1(base, ctx, 0);
105 SET_TTBCR(base, ctx, 0);
106 SET_BFBCR(base, ctx, 0);
107 SET_PAR(base, ctx, 0);
108 SET_FAR(base, ctx, 0);
109 SET_CTX_TLBIALL(base, ctx, 0);
110 SET_TLBFLPTER(base, ctx, 0);
111 SET_TLBSLPTER(base, ctx, 0);
112 SET_TLBLKCR(base, ctx, 0);
113 SET_CONTEXTIDR(base, ctx, 0);
117 static void __flush_iotlb(void *cookie)
119 struct msm_priv *priv = cookie;
120 struct msm_iommu_dev *iommu = NULL;
121 struct msm_iommu_ctx_dev *master;
122 int ret = 0;
124 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
125 ret = __enable_clocks(iommu);
126 if (ret)
127 goto fail;
129 list_for_each_entry(master, &iommu->ctx_list, list)
130 SET_CTX_TLBIALL(iommu->base, master->num, 0);
132 __disable_clocks(iommu);
134 fail:
135 return;
138 static void __flush_iotlb_range(unsigned long iova, size_t size,
139 size_t granule, bool leaf, void *cookie)
141 struct msm_priv *priv = cookie;
142 struct msm_iommu_dev *iommu = NULL;
143 struct msm_iommu_ctx_dev *master;
144 int ret = 0;
145 int temp_size;
147 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
148 ret = __enable_clocks(iommu);
149 if (ret)
150 goto fail;
152 list_for_each_entry(master, &iommu->ctx_list, list) {
153 temp_size = size;
154 do {
155 iova &= TLBIVA_VA;
156 iova |= GET_CONTEXTIDR_ASID(iommu->base,
157 master->num);
158 SET_TLBIVA(iommu->base, master->num, iova);
159 iova += granule;
160 } while (temp_size -= granule);
163 __disable_clocks(iommu);
166 fail:
167 return;
170 static void __flush_iotlb_walk(unsigned long iova, size_t size,
171 size_t granule, void *cookie)
173 __flush_iotlb_range(iova, size, granule, false, cookie);
176 static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
177 unsigned long iova, size_t granule, void *cookie)
179 __flush_iotlb_range(iova, granule, granule, true, cookie);
182 static const struct iommu_flush_ops msm_iommu_flush_ops = {
183 .tlb_flush_all = __flush_iotlb,
184 .tlb_flush_walk = __flush_iotlb_walk,
185 .tlb_add_page = __flush_iotlb_page,
188 static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
190 int idx;
192 do {
193 idx = find_next_zero_bit(map, end, start);
194 if (idx == end)
195 return -ENOSPC;
196 } while (test_and_set_bit(idx, map));
198 return idx;
201 static void msm_iommu_free_ctx(unsigned long *map, int idx)
203 clear_bit(idx, map);
206 static void config_mids(struct msm_iommu_dev *iommu,
207 struct msm_iommu_ctx_dev *master)
209 int mid, ctx, i;
211 for (i = 0; i < master->num_mids; i++) {
212 mid = master->mids[i];
213 ctx = master->num;
215 SET_M2VCBR_N(iommu->base, mid, 0);
216 SET_CBACR_N(iommu->base, ctx, 0);
218 /* Set VMID = 0 */
219 SET_VMID(iommu->base, mid, 0);
221 /* Set the context number for that MID to this context */
222 SET_CBNDX(iommu->base, mid, ctx);
224 /* Set MID associated with this context bank to 0*/
225 SET_CBVMID(iommu->base, ctx, 0);
227 /* Set the ASID for TLB tagging for this context */
228 SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
230 /* Set security bit override to be Non-secure */
231 SET_NSCFG(iommu->base, mid, 3);
235 static void __reset_context(void __iomem *base, int ctx)
237 SET_BPRCOSH(base, ctx, 0);
238 SET_BPRCISH(base, ctx, 0);
239 SET_BPRCNSH(base, ctx, 0);
240 SET_BPSHCFG(base, ctx, 0);
241 SET_BPMTCFG(base, ctx, 0);
242 SET_ACTLR(base, ctx, 0);
243 SET_SCTLR(base, ctx, 0);
244 SET_FSRRESTORE(base, ctx, 0);
245 SET_TTBR0(base, ctx, 0);
246 SET_TTBR1(base, ctx, 0);
247 SET_TTBCR(base, ctx, 0);
248 SET_BFBCR(base, ctx, 0);
249 SET_PAR(base, ctx, 0);
250 SET_FAR(base, ctx, 0);
251 SET_CTX_TLBIALL(base, ctx, 0);
252 SET_TLBFLPTER(base, ctx, 0);
253 SET_TLBSLPTER(base, ctx, 0);
254 SET_TLBLKCR(base, ctx, 0);
257 static void __program_context(void __iomem *base, int ctx,
258 struct msm_priv *priv)
260 __reset_context(base, ctx);
262 /* Turn on TEX Remap */
263 SET_TRE(base, ctx, 1);
264 SET_AFE(base, ctx, 1);
266 /* Set up HTW mode */
267 /* TLB miss configuration: perform HTW on miss */
268 SET_TLBMCFG(base, ctx, 0x3);
270 /* V2P configuration: HTW for access */
271 SET_V2PCFG(base, ctx, 0x3);
273 SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
274 SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
275 SET_TTBR1(base, ctx, 0);
277 /* Set prrr and nmrr */
278 SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
279 SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
281 /* Invalidate the TLB for this context */
282 SET_CTX_TLBIALL(base, ctx, 0);
284 /* Set interrupt number to "secure" interrupt */
285 SET_IRPTNDX(base, ctx, 0);
287 /* Enable context fault interrupt */
288 SET_CFEIE(base, ctx, 1);
290 /* Stall access on a context fault and let the handler deal with it */
291 SET_CFCFG(base, ctx, 1);
293 /* Redirect all cacheable requests to L2 slave port. */
294 SET_RCISH(base, ctx, 1);
295 SET_RCOSH(base, ctx, 1);
296 SET_RCNSH(base, ctx, 1);
298 /* Turn on BFB prefetch */
299 SET_BFBDFE(base, ctx, 1);
301 /* Enable the MMU */
302 SET_M(base, ctx, 1);
305 static struct iommu_domain *msm_iommu_domain_alloc_paging(struct device *dev)
307 struct msm_priv *priv;
309 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
310 if (!priv)
311 goto fail_nomem;
313 INIT_LIST_HEAD(&priv->list_attached);
315 priv->domain.geometry.aperture_start = 0;
316 priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
317 priv->domain.geometry.force_aperture = true;
319 return &priv->domain;
321 fail_nomem:
322 kfree(priv);
323 return NULL;
326 static void msm_iommu_domain_free(struct iommu_domain *domain)
328 struct msm_priv *priv;
329 unsigned long flags;
331 spin_lock_irqsave(&msm_iommu_lock, flags);
332 priv = to_msm_priv(domain);
333 kfree(priv);
334 spin_unlock_irqrestore(&msm_iommu_lock, flags);
337 static int msm_iommu_domain_config(struct msm_priv *priv)
339 spin_lock_init(&priv->pgtlock);
341 priv->cfg = (struct io_pgtable_cfg) {
342 .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
343 .ias = 32,
344 .oas = 32,
345 .tlb = &msm_iommu_flush_ops,
346 .iommu_dev = priv->dev,
349 priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
350 if (!priv->iop) {
351 dev_err(priv->dev, "Failed to allocate pgtable\n");
352 return -EINVAL;
355 msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
357 return 0;
360 /* Must be called under msm_iommu_lock */
361 static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
363 struct msm_iommu_dev *iommu, *ret = NULL;
364 struct msm_iommu_ctx_dev *master;
366 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
367 master = list_first_entry(&iommu->ctx_list,
368 struct msm_iommu_ctx_dev,
369 list);
370 if (master->of_node == dev->of_node) {
371 ret = iommu;
372 break;
376 return ret;
379 static struct iommu_device *msm_iommu_probe_device(struct device *dev)
381 struct msm_iommu_dev *iommu;
382 unsigned long flags;
384 spin_lock_irqsave(&msm_iommu_lock, flags);
385 iommu = find_iommu_for_dev(dev);
386 spin_unlock_irqrestore(&msm_iommu_lock, flags);
388 if (!iommu)
389 return ERR_PTR(-ENODEV);
391 return &iommu->iommu;
394 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
396 int ret = 0;
397 unsigned long flags;
398 struct msm_iommu_dev *iommu;
399 struct msm_priv *priv = to_msm_priv(domain);
400 struct msm_iommu_ctx_dev *master;
402 priv->dev = dev;
403 msm_iommu_domain_config(priv);
405 spin_lock_irqsave(&msm_iommu_lock, flags);
406 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
407 master = list_first_entry(&iommu->ctx_list,
408 struct msm_iommu_ctx_dev,
409 list);
410 if (master->of_node == dev->of_node) {
411 ret = __enable_clocks(iommu);
412 if (ret)
413 goto fail;
415 list_for_each_entry(master, &iommu->ctx_list, list) {
416 if (master->num) {
417 dev_err(dev, "domain already attached");
418 ret = -EEXIST;
419 goto fail;
421 master->num =
422 msm_iommu_alloc_ctx(iommu->context_map,
423 0, iommu->ncb);
424 if (IS_ERR_VALUE(master->num)) {
425 ret = -ENODEV;
426 goto fail;
428 config_mids(iommu, master);
429 __program_context(iommu->base, master->num,
430 priv);
432 __disable_clocks(iommu);
433 list_add(&iommu->dom_node, &priv->list_attached);
437 fail:
438 spin_unlock_irqrestore(&msm_iommu_lock, flags);
440 return ret;
443 static int msm_iommu_identity_attach(struct iommu_domain *identity_domain,
444 struct device *dev)
446 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
447 struct msm_priv *priv;
448 unsigned long flags;
449 struct msm_iommu_dev *iommu;
450 struct msm_iommu_ctx_dev *master;
451 int ret = 0;
453 if (domain == identity_domain || !domain)
454 return 0;
456 priv = to_msm_priv(domain);
457 free_io_pgtable_ops(priv->iop);
459 spin_lock_irqsave(&msm_iommu_lock, flags);
460 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
461 ret = __enable_clocks(iommu);
462 if (ret)
463 goto fail;
465 list_for_each_entry(master, &iommu->ctx_list, list) {
466 msm_iommu_free_ctx(iommu->context_map, master->num);
467 __reset_context(iommu->base, master->num);
469 __disable_clocks(iommu);
471 fail:
472 spin_unlock_irqrestore(&msm_iommu_lock, flags);
473 return ret;
476 static struct iommu_domain_ops msm_iommu_identity_ops = {
477 .attach_dev = msm_iommu_identity_attach,
480 static struct iommu_domain msm_iommu_identity_domain = {
481 .type = IOMMU_DOMAIN_IDENTITY,
482 .ops = &msm_iommu_identity_ops,
485 static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
486 phys_addr_t pa, size_t pgsize, size_t pgcount,
487 int prot, gfp_t gfp, size_t *mapped)
489 struct msm_priv *priv = to_msm_priv(domain);
490 unsigned long flags;
491 int ret;
493 spin_lock_irqsave(&priv->pgtlock, flags);
494 ret = priv->iop->map_pages(priv->iop, iova, pa, pgsize, pgcount, prot,
495 GFP_ATOMIC, mapped);
496 spin_unlock_irqrestore(&priv->pgtlock, flags);
498 return ret;
501 static int msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
502 size_t size)
504 struct msm_priv *priv = to_msm_priv(domain);
506 __flush_iotlb_range(iova, size, SZ_4K, false, priv);
507 return 0;
510 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
511 size_t pgsize, size_t pgcount,
512 struct iommu_iotlb_gather *gather)
514 struct msm_priv *priv = to_msm_priv(domain);
515 unsigned long flags;
516 size_t ret;
518 spin_lock_irqsave(&priv->pgtlock, flags);
519 ret = priv->iop->unmap_pages(priv->iop, iova, pgsize, pgcount, gather);
520 spin_unlock_irqrestore(&priv->pgtlock, flags);
522 return ret;
525 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
526 dma_addr_t va)
528 struct msm_priv *priv;
529 struct msm_iommu_dev *iommu;
530 struct msm_iommu_ctx_dev *master;
531 unsigned int par;
532 unsigned long flags;
533 phys_addr_t ret = 0;
535 spin_lock_irqsave(&msm_iommu_lock, flags);
537 priv = to_msm_priv(domain);
538 iommu = list_first_entry(&priv->list_attached,
539 struct msm_iommu_dev, dom_node);
541 if (list_empty(&iommu->ctx_list))
542 goto fail;
544 master = list_first_entry(&iommu->ctx_list,
545 struct msm_iommu_ctx_dev, list);
546 if (!master)
547 goto fail;
549 ret = __enable_clocks(iommu);
550 if (ret)
551 goto fail;
553 /* Invalidate context TLB */
554 SET_CTX_TLBIALL(iommu->base, master->num, 0);
555 SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
557 par = GET_PAR(iommu->base, master->num);
559 /* We are dealing with a supersection */
560 if (GET_NOFAULT_SS(iommu->base, master->num))
561 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
562 else /* Upper 20 bits from PAR, lower 12 from VA */
563 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
565 if (GET_FAULT(iommu->base, master->num))
566 ret = 0;
568 __disable_clocks(iommu);
569 fail:
570 spin_unlock_irqrestore(&msm_iommu_lock, flags);
571 return ret;
574 static void print_ctx_regs(void __iomem *base, int ctx)
576 unsigned int fsr = GET_FSR(base, ctx);
577 pr_err("FAR = %08x PAR = %08x\n",
578 GET_FAR(base, ctx), GET_PAR(base, ctx));
579 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
580 (fsr & 0x02) ? "TF " : "",
581 (fsr & 0x04) ? "AFF " : "",
582 (fsr & 0x08) ? "APF " : "",
583 (fsr & 0x10) ? "TLBMF " : "",
584 (fsr & 0x20) ? "HTWDEEF " : "",
585 (fsr & 0x40) ? "HTWSEEF " : "",
586 (fsr & 0x80) ? "MHF " : "",
587 (fsr & 0x10000) ? "SL " : "",
588 (fsr & 0x40000000) ? "SS " : "",
589 (fsr & 0x80000000) ? "MULTI " : "");
591 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
592 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
593 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
594 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
595 pr_err("SCTLR = %08x ACTLR = %08x\n",
596 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
599 static int insert_iommu_master(struct device *dev,
600 struct msm_iommu_dev **iommu,
601 const struct of_phandle_args *spec)
603 struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
604 int sid;
606 if (list_empty(&(*iommu)->ctx_list)) {
607 master = kzalloc(sizeof(*master), GFP_ATOMIC);
608 if (!master) {
609 dev_err(dev, "Failed to allocate iommu_master\n");
610 return -ENOMEM;
612 master->of_node = dev->of_node;
613 list_add(&master->list, &(*iommu)->ctx_list);
614 dev_iommu_priv_set(dev, master);
617 for (sid = 0; sid < master->num_mids; sid++)
618 if (master->mids[sid] == spec->args[0]) {
619 dev_warn(dev, "Stream ID 0x%x repeated; ignoring\n",
620 sid);
621 return 0;
624 master->mids[master->num_mids++] = spec->args[0];
625 return 0;
628 static int qcom_iommu_of_xlate(struct device *dev,
629 const struct of_phandle_args *spec)
631 struct msm_iommu_dev *iommu = NULL, *iter;
632 unsigned long flags;
633 int ret = 0;
635 spin_lock_irqsave(&msm_iommu_lock, flags);
636 list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
637 if (iter->dev->of_node == spec->np) {
638 iommu = iter;
639 break;
643 if (!iommu) {
644 ret = -ENODEV;
645 goto fail;
648 ret = insert_iommu_master(dev, &iommu, spec);
649 fail:
650 spin_unlock_irqrestore(&msm_iommu_lock, flags);
652 return ret;
655 irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
657 struct msm_iommu_dev *iommu = dev_id;
658 unsigned int fsr;
659 int i, ret;
661 spin_lock(&msm_iommu_lock);
663 if (!iommu) {
664 pr_err("Invalid device ID in context interrupt handler\n");
665 goto fail;
668 pr_err("Unexpected IOMMU page fault!\n");
669 pr_err("base = %08x\n", (unsigned int)iommu->base);
671 ret = __enable_clocks(iommu);
672 if (ret)
673 goto fail;
675 for (i = 0; i < iommu->ncb; i++) {
676 fsr = GET_FSR(iommu->base, i);
677 if (fsr) {
678 pr_err("Fault occurred in context %d.\n", i);
679 pr_err("Interesting registers:\n");
680 print_ctx_regs(iommu->base, i);
681 SET_FSR(iommu->base, i, 0x4000000F);
684 __disable_clocks(iommu);
685 fail:
686 spin_unlock(&msm_iommu_lock);
687 return 0;
690 static struct iommu_ops msm_iommu_ops = {
691 .identity_domain = &msm_iommu_identity_domain,
692 .domain_alloc_paging = msm_iommu_domain_alloc_paging,
693 .probe_device = msm_iommu_probe_device,
694 .device_group = generic_device_group,
695 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
696 .of_xlate = qcom_iommu_of_xlate,
697 .default_domain_ops = &(const struct iommu_domain_ops) {
698 .attach_dev = msm_iommu_attach_dev,
699 .map_pages = msm_iommu_map,
700 .unmap_pages = msm_iommu_unmap,
702 * Nothing is needed here, the barrier to guarantee
703 * completion of the tlb sync operation is implicitly
704 * taken care when the iommu client does a writel before
705 * kick starting the other master.
707 .iotlb_sync = NULL,
708 .iotlb_sync_map = msm_iommu_sync_map,
709 .iova_to_phys = msm_iommu_iova_to_phys,
710 .free = msm_iommu_domain_free,
714 static int msm_iommu_probe(struct platform_device *pdev)
716 struct resource *r;
717 resource_size_t ioaddr;
718 struct msm_iommu_dev *iommu;
719 int ret, par, val;
721 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
722 if (!iommu)
723 return -ENODEV;
725 iommu->dev = &pdev->dev;
726 INIT_LIST_HEAD(&iommu->ctx_list);
728 iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
729 if (IS_ERR(iommu->pclk))
730 return dev_err_probe(iommu->dev, PTR_ERR(iommu->pclk),
731 "could not get smmu_pclk\n");
733 ret = clk_prepare(iommu->pclk);
734 if (ret)
735 return dev_err_probe(iommu->dev, ret,
736 "could not prepare smmu_pclk\n");
738 iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
739 if (IS_ERR(iommu->clk)) {
740 clk_unprepare(iommu->pclk);
741 return dev_err_probe(iommu->dev, PTR_ERR(iommu->clk),
742 "could not get iommu_clk\n");
745 ret = clk_prepare(iommu->clk);
746 if (ret) {
747 clk_unprepare(iommu->pclk);
748 return dev_err_probe(iommu->dev, ret, "could not prepare iommu_clk\n");
751 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
752 iommu->base = devm_ioremap_resource(iommu->dev, r);
753 if (IS_ERR(iommu->base)) {
754 ret = dev_err_probe(iommu->dev, PTR_ERR(iommu->base), "could not get iommu base\n");
755 goto fail;
757 ioaddr = r->start;
759 iommu->irq = platform_get_irq(pdev, 0);
760 if (iommu->irq < 0) {
761 ret = -ENODEV;
762 goto fail;
765 ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
766 if (ret) {
767 dev_err(iommu->dev, "could not get ncb\n");
768 goto fail;
770 iommu->ncb = val;
772 msm_iommu_reset(iommu->base, iommu->ncb);
773 SET_M(iommu->base, 0, 1);
774 SET_PAR(iommu->base, 0, 0);
775 SET_V2PCFG(iommu->base, 0, 1);
776 SET_V2PPR(iommu->base, 0, 0);
777 par = GET_PAR(iommu->base, 0);
778 SET_V2PCFG(iommu->base, 0, 0);
779 SET_M(iommu->base, 0, 0);
781 if (!par) {
782 pr_err("Invalid PAR value detected\n");
783 ret = -ENODEV;
784 goto fail;
787 ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
788 msm_iommu_fault_handler,
789 IRQF_ONESHOT | IRQF_SHARED,
790 "msm_iommu_secure_irpt_handler",
791 iommu);
792 if (ret) {
793 pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
794 goto fail;
797 list_add(&iommu->dev_node, &qcom_iommu_devices);
799 ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
800 "msm-smmu.%pa", &ioaddr);
801 if (ret) {
802 pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
803 goto fail;
806 ret = iommu_device_register(&iommu->iommu, &msm_iommu_ops, &pdev->dev);
807 if (ret) {
808 pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
809 goto fail;
812 pr_info("device mapped at %p, irq %d with %d ctx banks\n",
813 iommu->base, iommu->irq, iommu->ncb);
815 return ret;
816 fail:
817 clk_unprepare(iommu->clk);
818 clk_unprepare(iommu->pclk);
819 return ret;
822 static const struct of_device_id msm_iommu_dt_match[] = {
823 { .compatible = "qcom,apq8064-iommu" },
827 static void msm_iommu_remove(struct platform_device *pdev)
829 struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
831 clk_unprepare(iommu->clk);
832 clk_unprepare(iommu->pclk);
835 static struct platform_driver msm_iommu_driver = {
836 .driver = {
837 .name = "msm_iommu",
838 .of_match_table = msm_iommu_dt_match,
840 .probe = msm_iommu_probe,
841 .remove = msm_iommu_remove,
843 builtin_platform_driver(msm_iommu_driver);