Revert "tty: hvc: Fix data abort due to race in hvc_open"
[linux/fpc-iii.git] / drivers / iommu / qcom_iommu.c
blob5b3b270972f809c62eb11714613e53e75fc63320
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c
5 * Copyright (C) 2013 ARM Limited
6 * Copyright (C) 2017 Red Hat
7 */
9 #include <linux/atomic.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/dma-iommu.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/io-64-nonatomic-hi-lo.h>
19 #include <linux/io-pgtable.h>
20 #include <linux/iommu.h>
21 #include <linux/iopoll.h>
22 #include <linux/kconfig.h>
23 #include <linux/init.h>
24 #include <linux/mutex.h>
25 #include <linux/of.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/of_iommu.h>
29 #include <linux/platform_device.h>
30 #include <linux/pm.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/qcom_scm.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
36 #include "arm-smmu.h"
38 #define SMMU_INTR_SEL_NS 0x2000
40 struct qcom_iommu_ctx;
42 struct qcom_iommu_dev {
43 /* IOMMU core code handle */
44 struct iommu_device iommu;
45 struct device *dev;
46 struct clk *iface_clk;
47 struct clk *bus_clk;
48 void __iomem *local_base;
49 u32 sec_id;
50 u8 num_ctxs;
51 struct qcom_iommu_ctx *ctxs[]; /* indexed by asid-1 */
54 struct qcom_iommu_ctx {
55 struct device *dev;
56 void __iomem *base;
57 bool secure_init;
58 u8 asid; /* asid and ctx bank # are 1:1 */
59 struct iommu_domain *domain;
62 struct qcom_iommu_domain {
63 struct io_pgtable_ops *pgtbl_ops;
64 spinlock_t pgtbl_lock;
65 struct mutex init_mutex; /* Protects iommu pointer */
66 struct iommu_domain domain;
67 struct qcom_iommu_dev *iommu;
70 static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
72 return container_of(dom, struct qcom_iommu_domain, domain);
75 static const struct iommu_ops qcom_iommu_ops;
77 static struct qcom_iommu_dev * to_iommu(struct device *dev)
79 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
81 if (!fwspec || fwspec->ops != &qcom_iommu_ops)
82 return NULL;
84 return dev_iommu_priv_get(dev);
87 static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid)
89 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
90 if (!qcom_iommu)
91 return NULL;
92 return qcom_iommu->ctxs[asid - 1];
95 static inline void
96 iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val)
98 writel_relaxed(val, ctx->base + reg);
101 static inline void
102 iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val)
104 writeq_relaxed(val, ctx->base + reg);
107 static inline u32
108 iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg)
110 return readl_relaxed(ctx->base + reg);
113 static inline u64
114 iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
116 return readq_relaxed(ctx->base + reg);
119 static void qcom_iommu_tlb_sync(void *cookie)
121 struct iommu_fwspec *fwspec;
122 struct device *dev = cookie;
123 unsigned i;
125 fwspec = dev_iommu_fwspec_get(dev);
127 for (i = 0; i < fwspec->num_ids; i++) {
128 struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
129 unsigned int val, ret;
131 iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
133 ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val,
134 (val & 0x1) == 0, 0, 5000000);
135 if (ret)
136 dev_err(ctx->dev, "timeout waiting for TLB SYNC\n");
140 static void qcom_iommu_tlb_inv_context(void *cookie)
142 struct device *dev = cookie;
143 struct iommu_fwspec *fwspec;
144 unsigned i;
146 fwspec = dev_iommu_fwspec_get(dev);
148 for (i = 0; i < fwspec->num_ids; i++) {
149 struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
150 iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
153 qcom_iommu_tlb_sync(cookie);
156 static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
157 size_t granule, bool leaf, void *cookie)
159 struct device *dev = cookie;
160 struct iommu_fwspec *fwspec;
161 unsigned i, reg;
163 reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
165 fwspec = dev_iommu_fwspec_get(dev);
167 for (i = 0; i < fwspec->num_ids; i++) {
168 struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
169 size_t s = size;
171 iova = (iova >> 12) << 12;
172 iova |= ctx->asid;
173 do {
174 iommu_writel(ctx, reg, iova);
175 iova += granule;
176 } while (s -= granule);
180 static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
181 size_t granule, void *cookie)
183 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
184 qcom_iommu_tlb_sync(cookie);
187 static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
188 size_t granule, void *cookie)
190 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
191 qcom_iommu_tlb_sync(cookie);
194 static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
195 unsigned long iova, size_t granule,
196 void *cookie)
198 qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
201 static const struct iommu_flush_ops qcom_flush_ops = {
202 .tlb_flush_all = qcom_iommu_tlb_inv_context,
203 .tlb_flush_walk = qcom_iommu_tlb_flush_walk,
204 .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
205 .tlb_add_page = qcom_iommu_tlb_add_page,
208 static irqreturn_t qcom_iommu_fault(int irq, void *dev)
210 struct qcom_iommu_ctx *ctx = dev;
211 u32 fsr, fsynr;
212 u64 iova;
214 fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
216 if (!(fsr & ARM_SMMU_FSR_FAULT))
217 return IRQ_NONE;
219 fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
220 iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
222 if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
223 dev_err_ratelimited(ctx->dev,
224 "Unhandled context fault: fsr=0x%x, "
225 "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
226 fsr, iova, fsynr, ctx->asid);
229 iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
230 iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
232 return IRQ_HANDLED;
235 static int qcom_iommu_init_domain(struct iommu_domain *domain,
236 struct qcom_iommu_dev *qcom_iommu,
237 struct device *dev)
239 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
240 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
241 struct io_pgtable_ops *pgtbl_ops;
242 struct io_pgtable_cfg pgtbl_cfg;
243 int i, ret = 0;
244 u32 reg;
246 mutex_lock(&qcom_domain->init_mutex);
247 if (qcom_domain->iommu)
248 goto out_unlock;
250 pgtbl_cfg = (struct io_pgtable_cfg) {
251 .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
252 .ias = 32,
253 .oas = 40,
254 .tlb = &qcom_flush_ops,
255 .iommu_dev = qcom_iommu->dev,
258 qcom_domain->iommu = qcom_iommu;
259 pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev);
260 if (!pgtbl_ops) {
261 dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
262 ret = -ENOMEM;
263 goto out_clear_iommu;
266 /* Update the domain's page sizes to reflect the page table format */
267 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
268 domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1;
269 domain->geometry.force_aperture = true;
271 for (i = 0; i < fwspec->num_ids; i++) {
272 struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
274 if (!ctx->secure_init) {
275 ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
276 if (ret) {
277 dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret);
278 goto out_clear_iommu;
280 ctx->secure_init = true;
283 /* TTBRs */
284 iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
285 pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
286 FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid));
287 iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0);
289 /* TCR */
290 iommu_writel(ctx, ARM_SMMU_CB_TCR2,
291 arm_smmu_lpae_tcr2(&pgtbl_cfg));
292 iommu_writel(ctx, ARM_SMMU_CB_TCR,
293 arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE);
295 /* MAIRs (stage-1 only) */
296 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
297 pgtbl_cfg.arm_lpae_s1_cfg.mair);
298 iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
299 pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
301 /* SCTLR */
302 reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE |
303 ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE |
304 ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
305 ARM_SMMU_SCTLR_CFCFG;
307 if (IS_ENABLED(CONFIG_BIG_ENDIAN))
308 reg |= ARM_SMMU_SCTLR_E;
310 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
312 ctx->domain = domain;
315 mutex_unlock(&qcom_domain->init_mutex);
317 /* Publish page table ops for map/unmap */
318 qcom_domain->pgtbl_ops = pgtbl_ops;
320 return 0;
322 out_clear_iommu:
323 qcom_domain->iommu = NULL;
324 out_unlock:
325 mutex_unlock(&qcom_domain->init_mutex);
326 return ret;
329 static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type)
331 struct qcom_iommu_domain *qcom_domain;
333 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
334 return NULL;
336 * Allocate the domain and initialise some of its data structures.
337 * We can't really do anything meaningful until we've added a
338 * master.
340 qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL);
341 if (!qcom_domain)
342 return NULL;
344 if (type == IOMMU_DOMAIN_DMA &&
345 iommu_get_dma_cookie(&qcom_domain->domain)) {
346 kfree(qcom_domain);
347 return NULL;
350 mutex_init(&qcom_domain->init_mutex);
351 spin_lock_init(&qcom_domain->pgtbl_lock);
353 return &qcom_domain->domain;
356 static void qcom_iommu_domain_free(struct iommu_domain *domain)
358 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
360 iommu_put_dma_cookie(domain);
362 if (qcom_domain->iommu) {
364 * NOTE: unmap can be called after client device is powered
365 * off, for example, with GPUs or anything involving dma-buf.
366 * So we cannot rely on the device_link. Make sure the IOMMU
367 * is on to avoid unclocked accesses in the TLB inv path:
369 pm_runtime_get_sync(qcom_domain->iommu->dev);
370 free_io_pgtable_ops(qcom_domain->pgtbl_ops);
371 pm_runtime_put_sync(qcom_domain->iommu->dev);
374 kfree(qcom_domain);
377 static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
379 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
380 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
381 int ret;
383 if (!qcom_iommu) {
384 dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n");
385 return -ENXIO;
388 /* Ensure that the domain is finalized */
389 pm_runtime_get_sync(qcom_iommu->dev);
390 ret = qcom_iommu_init_domain(domain, qcom_iommu, dev);
391 pm_runtime_put_sync(qcom_iommu->dev);
392 if (ret < 0)
393 return ret;
396 * Sanity check the domain. We don't support domains across
397 * different IOMMUs.
399 if (qcom_domain->iommu != qcom_iommu) {
400 dev_err(dev, "cannot attach to IOMMU %s while already "
401 "attached to domain on IOMMU %s\n",
402 dev_name(qcom_domain->iommu->dev),
403 dev_name(qcom_iommu->dev));
404 return -EINVAL;
407 return 0;
410 static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)
412 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
413 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
414 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
415 unsigned i;
417 if (WARN_ON(!qcom_domain->iommu))
418 return;
420 pm_runtime_get_sync(qcom_iommu->dev);
421 for (i = 0; i < fwspec->num_ids; i++) {
422 struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
424 /* Disable the context bank: */
425 iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
427 ctx->domain = NULL;
429 pm_runtime_put_sync(qcom_iommu->dev);
432 static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
433 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
435 int ret;
436 unsigned long flags;
437 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
438 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
440 if (!ops)
441 return -ENODEV;
443 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
444 ret = ops->map(ops, iova, paddr, size, prot);
445 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
446 return ret;
449 static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
450 size_t size, struct iommu_iotlb_gather *gather)
452 size_t ret;
453 unsigned long flags;
454 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
455 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
457 if (!ops)
458 return 0;
460 /* NOTE: unmap can be called after client device is powered off,
461 * for example, with GPUs or anything involving dma-buf. So we
462 * cannot rely on the device_link. Make sure the IOMMU is on to
463 * avoid unclocked accesses in the TLB inv path:
465 pm_runtime_get_sync(qcom_domain->iommu->dev);
466 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
467 ret = ops->unmap(ops, iova, size, gather);
468 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
469 pm_runtime_put_sync(qcom_domain->iommu->dev);
471 return ret;
474 static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain)
476 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
477 struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
478 struct io_pgtable, ops);
479 if (!qcom_domain->pgtbl_ops)
480 return;
482 pm_runtime_get_sync(qcom_domain->iommu->dev);
483 qcom_iommu_tlb_sync(pgtable->cookie);
484 pm_runtime_put_sync(qcom_domain->iommu->dev);
487 static void qcom_iommu_iotlb_sync(struct iommu_domain *domain,
488 struct iommu_iotlb_gather *gather)
490 qcom_iommu_flush_iotlb_all(domain);
493 static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
494 dma_addr_t iova)
496 phys_addr_t ret;
497 unsigned long flags;
498 struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
499 struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
501 if (!ops)
502 return 0;
504 spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
505 ret = ops->iova_to_phys(ops, iova);
506 spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
508 return ret;
511 static bool qcom_iommu_capable(enum iommu_cap cap)
513 switch (cap) {
514 case IOMMU_CAP_CACHE_COHERENCY:
516 * Return true here as the SMMU can always send out coherent
517 * requests.
519 return true;
520 case IOMMU_CAP_NOEXEC:
521 return true;
522 default:
523 return false;
527 static int qcom_iommu_add_device(struct device *dev)
529 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
530 struct iommu_group *group;
531 struct device_link *link;
533 if (!qcom_iommu)
534 return -ENODEV;
537 * Establish the link between iommu and master, so that the
538 * iommu gets runtime enabled/disabled as per the master's
539 * needs.
541 link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME);
542 if (!link) {
543 dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n",
544 dev_name(qcom_iommu->dev), dev_name(dev));
545 return -ENODEV;
548 group = iommu_group_get_for_dev(dev);
549 if (IS_ERR(group))
550 return PTR_ERR(group);
552 iommu_group_put(group);
553 iommu_device_link(&qcom_iommu->iommu, dev);
555 return 0;
558 static void qcom_iommu_remove_device(struct device *dev)
560 struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
562 if (!qcom_iommu)
563 return;
565 iommu_device_unlink(&qcom_iommu->iommu, dev);
566 iommu_group_remove_device(dev);
567 iommu_fwspec_free(dev);
570 static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
572 struct qcom_iommu_dev *qcom_iommu;
573 struct platform_device *iommu_pdev;
574 unsigned asid = args->args[0];
576 if (args->args_count != 1) {
577 dev_err(dev, "incorrect number of iommu params found for %s "
578 "(found %d, expected 1)\n",
579 args->np->full_name, args->args_count);
580 return -EINVAL;
583 iommu_pdev = of_find_device_by_node(args->np);
584 if (WARN_ON(!iommu_pdev))
585 return -EINVAL;
587 qcom_iommu = platform_get_drvdata(iommu_pdev);
589 /* make sure the asid specified in dt is valid, so we don't have
590 * to sanity check this elsewhere, since 'asid - 1' is used to
591 * index into qcom_iommu->ctxs:
593 if (WARN_ON(asid < 1) ||
594 WARN_ON(asid > qcom_iommu->num_ctxs))
595 return -EINVAL;
597 if (!dev_iommu_priv_get(dev)) {
598 dev_iommu_priv_set(dev, qcom_iommu);
599 } else {
600 /* make sure devices iommus dt node isn't referring to
601 * multiple different iommu devices. Multiple context
602 * banks are ok, but multiple devices are not:
604 if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev)))
605 return -EINVAL;
608 return iommu_fwspec_add_ids(dev, &asid, 1);
611 static const struct iommu_ops qcom_iommu_ops = {
612 .capable = qcom_iommu_capable,
613 .domain_alloc = qcom_iommu_domain_alloc,
614 .domain_free = qcom_iommu_domain_free,
615 .attach_dev = qcom_iommu_attach_dev,
616 .detach_dev = qcom_iommu_detach_dev,
617 .map = qcom_iommu_map,
618 .unmap = qcom_iommu_unmap,
619 .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
620 .iotlb_sync = qcom_iommu_iotlb_sync,
621 .iova_to_phys = qcom_iommu_iova_to_phys,
622 .add_device = qcom_iommu_add_device,
623 .remove_device = qcom_iommu_remove_device,
624 .device_group = generic_device_group,
625 .of_xlate = qcom_iommu_of_xlate,
626 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
629 static int qcom_iommu_enable_clocks(struct qcom_iommu_dev *qcom_iommu)
631 int ret;
633 ret = clk_prepare_enable(qcom_iommu->iface_clk);
634 if (ret) {
635 dev_err(qcom_iommu->dev, "Couldn't enable iface_clk\n");
636 return ret;
639 ret = clk_prepare_enable(qcom_iommu->bus_clk);
640 if (ret) {
641 dev_err(qcom_iommu->dev, "Couldn't enable bus_clk\n");
642 clk_disable_unprepare(qcom_iommu->iface_clk);
643 return ret;
646 return 0;
649 static void qcom_iommu_disable_clocks(struct qcom_iommu_dev *qcom_iommu)
651 clk_disable_unprepare(qcom_iommu->bus_clk);
652 clk_disable_unprepare(qcom_iommu->iface_clk);
655 static int qcom_iommu_sec_ptbl_init(struct device *dev)
657 size_t psize = 0;
658 unsigned int spare = 0;
659 void *cpu_addr;
660 dma_addr_t paddr;
661 unsigned long attrs;
662 static bool allocated = false;
663 int ret;
665 if (allocated)
666 return 0;
668 ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize);
669 if (ret) {
670 dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
671 ret);
672 return ret;
675 dev_info(dev, "iommu sec: pgtable size: %zu\n", psize);
677 attrs = DMA_ATTR_NO_KERNEL_MAPPING;
679 cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs);
680 if (!cpu_addr) {
681 dev_err(dev, "failed to allocate %zu bytes for pgtable\n",
682 psize);
683 return -ENOMEM;
686 ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare);
687 if (ret) {
688 dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
689 goto free_mem;
692 allocated = true;
693 return 0;
695 free_mem:
696 dma_free_attrs(dev, psize, cpu_addr, paddr, attrs);
697 return ret;
700 static int get_asid(const struct device_node *np)
702 u32 reg;
704 /* read the "reg" property directly to get the relative address
705 * of the context bank, and calculate the asid from that:
707 if (of_property_read_u32_index(np, "reg", 0, &reg))
708 return -ENODEV;
710 return reg / 0x1000; /* context banks are 0x1000 apart */
713 static int qcom_iommu_ctx_probe(struct platform_device *pdev)
715 struct qcom_iommu_ctx *ctx;
716 struct device *dev = &pdev->dev;
717 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent);
718 struct resource *res;
719 int ret, irq;
721 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
722 if (!ctx)
723 return -ENOMEM;
725 ctx->dev = dev;
726 platform_set_drvdata(pdev, ctx);
728 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
729 ctx->base = devm_ioremap_resource(dev, res);
730 if (IS_ERR(ctx->base))
731 return PTR_ERR(ctx->base);
733 irq = platform_get_irq(pdev, 0);
734 if (irq < 0)
735 return -ENODEV;
737 /* clear IRQs before registering fault handler, just in case the
738 * boot-loader left us a surprise:
740 iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
742 ret = devm_request_irq(dev, irq,
743 qcom_iommu_fault,
744 IRQF_SHARED,
745 "qcom-iommu-fault",
746 ctx);
747 if (ret) {
748 dev_err(dev, "failed to request IRQ %u\n", irq);
749 return ret;
752 ret = get_asid(dev->of_node);
753 if (ret < 0) {
754 dev_err(dev, "missing reg property\n");
755 return ret;
758 ctx->asid = ret;
760 dev_dbg(dev, "found asid %u\n", ctx->asid);
762 qcom_iommu->ctxs[ctx->asid - 1] = ctx;
764 return 0;
767 static int qcom_iommu_ctx_remove(struct platform_device *pdev)
769 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent);
770 struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev);
772 platform_set_drvdata(pdev, NULL);
774 qcom_iommu->ctxs[ctx->asid - 1] = NULL;
776 return 0;
779 static const struct of_device_id ctx_of_match[] = {
780 { .compatible = "qcom,msm-iommu-v1-ns" },
781 { .compatible = "qcom,msm-iommu-v1-sec" },
782 { /* sentinel */ }
785 static struct platform_driver qcom_iommu_ctx_driver = {
786 .driver = {
787 .name = "qcom-iommu-ctx",
788 .of_match_table = of_match_ptr(ctx_of_match),
790 .probe = qcom_iommu_ctx_probe,
791 .remove = qcom_iommu_ctx_remove,
794 static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
796 struct device_node *child;
798 for_each_child_of_node(qcom_iommu->dev->of_node, child)
799 if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec"))
800 return true;
802 return false;
805 static int qcom_iommu_device_probe(struct platform_device *pdev)
807 struct device_node *child;
808 struct qcom_iommu_dev *qcom_iommu;
809 struct device *dev = &pdev->dev;
810 struct resource *res;
811 int ret, max_asid = 0;
813 /* find the max asid (which is 1:1 to ctx bank idx), so we know how
814 * many child ctx devices we have:
816 for_each_child_of_node(dev->of_node, child)
817 max_asid = max(max_asid, get_asid(child));
819 qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
820 GFP_KERNEL);
821 if (!qcom_iommu)
822 return -ENOMEM;
823 qcom_iommu->num_ctxs = max_asid;
824 qcom_iommu->dev = dev;
826 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
827 if (res) {
828 qcom_iommu->local_base = devm_ioremap_resource(dev, res);
829 if (IS_ERR(qcom_iommu->local_base))
830 return PTR_ERR(qcom_iommu->local_base);
833 qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
834 if (IS_ERR(qcom_iommu->iface_clk)) {
835 dev_err(dev, "failed to get iface clock\n");
836 return PTR_ERR(qcom_iommu->iface_clk);
839 qcom_iommu->bus_clk = devm_clk_get(dev, "bus");
840 if (IS_ERR(qcom_iommu->bus_clk)) {
841 dev_err(dev, "failed to get bus clock\n");
842 return PTR_ERR(qcom_iommu->bus_clk);
845 if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id",
846 &qcom_iommu->sec_id)) {
847 dev_err(dev, "missing qcom,iommu-secure-id property\n");
848 return -ENODEV;
851 if (qcom_iommu_has_secure_context(qcom_iommu)) {
852 ret = qcom_iommu_sec_ptbl_init(dev);
853 if (ret) {
854 dev_err(dev, "cannot init secure pg table(%d)\n", ret);
855 return ret;
859 platform_set_drvdata(pdev, qcom_iommu);
861 pm_runtime_enable(dev);
863 /* register context bank devices, which are child nodes: */
864 ret = devm_of_platform_populate(dev);
865 if (ret) {
866 dev_err(dev, "Failed to populate iommu contexts\n");
867 return ret;
870 ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL,
871 dev_name(dev));
872 if (ret) {
873 dev_err(dev, "Failed to register iommu in sysfs\n");
874 return ret;
877 iommu_device_set_ops(&qcom_iommu->iommu, &qcom_iommu_ops);
878 iommu_device_set_fwnode(&qcom_iommu->iommu, dev->fwnode);
880 ret = iommu_device_register(&qcom_iommu->iommu);
881 if (ret) {
882 dev_err(dev, "Failed to register iommu\n");
883 return ret;
886 bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
888 if (qcom_iommu->local_base) {
889 pm_runtime_get_sync(dev);
890 writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS);
891 pm_runtime_put_sync(dev);
894 return 0;
897 static int qcom_iommu_device_remove(struct platform_device *pdev)
899 struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
901 bus_set_iommu(&platform_bus_type, NULL);
903 pm_runtime_force_suspend(&pdev->dev);
904 platform_set_drvdata(pdev, NULL);
905 iommu_device_sysfs_remove(&qcom_iommu->iommu);
906 iommu_device_unregister(&qcom_iommu->iommu);
908 return 0;
911 static int __maybe_unused qcom_iommu_resume(struct device *dev)
913 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
915 return qcom_iommu_enable_clocks(qcom_iommu);
918 static int __maybe_unused qcom_iommu_suspend(struct device *dev)
920 struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
922 qcom_iommu_disable_clocks(qcom_iommu);
924 return 0;
927 static const struct dev_pm_ops qcom_iommu_pm_ops = {
928 SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL)
929 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
930 pm_runtime_force_resume)
933 static const struct of_device_id qcom_iommu_of_match[] = {
934 { .compatible = "qcom,msm-iommu-v1" },
935 { /* sentinel */ }
938 static struct platform_driver qcom_iommu_driver = {
939 .driver = {
940 .name = "qcom-iommu",
941 .of_match_table = of_match_ptr(qcom_iommu_of_match),
942 .pm = &qcom_iommu_pm_ops,
944 .probe = qcom_iommu_device_probe,
945 .remove = qcom_iommu_device_remove,
948 static int __init qcom_iommu_init(void)
950 int ret;
952 ret = platform_driver_register(&qcom_iommu_ctx_driver);
953 if (ret)
954 return ret;
956 ret = platform_driver_register(&qcom_iommu_driver);
957 if (ret)
958 platform_driver_unregister(&qcom_iommu_ctx_driver);
960 return ret;
962 device_initcall(qcom_iommu_init);