zram: fix unbalanced idr management at hot removal
[linux/fpc-iii.git] / drivers / iommu / mtk_iommu.c
blobb12c12d74c331a054d8fefb3fe3e4fa5aa5e2d61
1 /*
2 * Copyright (c) 2015-2016 MediaTek Inc.
3 * Author: Yong Wu <yong.wu@mediatek.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 #include <linux/bootmem.h>
15 #include <linux/bug.h>
16 #include <linux/clk.h>
17 #include <linux/component.h>
18 #include <linux/device.h>
19 #include <linux/dma-iommu.h>
20 #include <linux/err.h>
21 #include <linux/interrupt.h>
22 #include <linux/io.h>
23 #include <linux/iommu.h>
24 #include <linux/iopoll.h>
25 #include <linux/list.h>
26 #include <linux/of_address.h>
27 #include <linux/of_iommu.h>
28 #include <linux/of_irq.h>
29 #include <linux/of_platform.h>
30 #include <linux/platform_device.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <asm/barrier.h>
34 #include <dt-bindings/memory/mt8173-larb-port.h>
35 #include <soc/mediatek/smi.h>
37 #include "mtk_iommu.h"
39 #define REG_MMU_PT_BASE_ADDR 0x000
41 #define REG_MMU_INVALIDATE 0x020
42 #define F_ALL_INVLD 0x2
43 #define F_MMU_INV_RANGE 0x1
45 #define REG_MMU_INVLD_START_A 0x024
46 #define REG_MMU_INVLD_END_A 0x028
48 #define REG_MMU_INV_SEL 0x038
49 #define F_INVLD_EN0 BIT(0)
50 #define F_INVLD_EN1 BIT(1)
52 #define REG_MMU_STANDARD_AXI_MODE 0x048
53 #define REG_MMU_DCM_DIS 0x050
55 #define REG_MMU_CTRL_REG 0x110
56 #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
57 #define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5)
59 #define REG_MMU_IVRP_PADDR 0x114
60 #define F_MMU_IVRP_PA_SET(pa, ext) (((pa) >> 1) | ((!!(ext)) << 31))
62 #define REG_MMU_INT_CONTROL0 0x120
63 #define F_L2_MULIT_HIT_EN BIT(0)
64 #define F_TABLE_WALK_FAULT_INT_EN BIT(1)
65 #define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2)
66 #define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3)
67 #define F_PREFETCH_FIFO_ERR_INT_EN BIT(5)
68 #define F_MISS_FIFO_ERR_INT_EN BIT(6)
69 #define F_INT_CLR_BIT BIT(12)
71 #define REG_MMU_INT_MAIN_CONTROL 0x124
72 #define F_INT_TRANSLATION_FAULT BIT(0)
73 #define F_INT_MAIN_MULTI_HIT_FAULT BIT(1)
74 #define F_INT_INVALID_PA_FAULT BIT(2)
75 #define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3)
76 #define F_INT_TLB_MISS_FAULT BIT(4)
77 #define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5)
78 #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6)
80 #define REG_MMU_CPE_DONE 0x12C
82 #define REG_MMU_FAULT_ST1 0x134
84 #define REG_MMU_FAULT_VA 0x13c
85 #define F_MMU_FAULT_VA_MSK 0xfffff000
86 #define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
87 #define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
89 #define REG_MMU_INVLD_PA 0x140
90 #define REG_MMU_INT_ID 0x150
91 #define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
92 #define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
94 #define MTK_PROTECT_PA_ALIGN 128
96 struct mtk_iommu_domain {
97 spinlock_t pgtlock; /* lock for page table */
99 struct io_pgtable_cfg cfg;
100 struct io_pgtable_ops *iop;
102 struct iommu_domain domain;
105 static struct iommu_ops mtk_iommu_ops;
107 static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
109 return container_of(dom, struct mtk_iommu_domain, domain);
112 static void mtk_iommu_tlb_flush_all(void *cookie)
114 struct mtk_iommu_data *data = cookie;
116 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL);
117 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
118 wmb(); /* Make sure the tlb flush all done */
121 static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
122 size_t granule, bool leaf,
123 void *cookie)
125 struct mtk_iommu_data *data = cookie;
127 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL);
129 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
130 writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A);
131 writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
134 static void mtk_iommu_tlb_sync(void *cookie)
136 struct mtk_iommu_data *data = cookie;
137 int ret;
138 u32 tmp;
140 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp,
141 tmp != 0, 10, 100000);
142 if (ret) {
143 dev_warn(data->dev,
144 "Partial TLB flush timed out, falling back to full flush\n");
145 mtk_iommu_tlb_flush_all(cookie);
147 /* Clear the CPE status */
148 writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
151 static const struct iommu_gather_ops mtk_iommu_gather_ops = {
152 .tlb_flush_all = mtk_iommu_tlb_flush_all,
153 .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
154 .tlb_sync = mtk_iommu_tlb_sync,
157 static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
159 struct mtk_iommu_data *data = dev_id;
160 struct mtk_iommu_domain *dom = data->m4u_dom;
161 u32 int_state, regval, fault_iova, fault_pa;
162 unsigned int fault_larb, fault_port;
163 bool layer, write;
165 /* Read error info from registers */
166 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
167 fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
168 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
169 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
170 fault_iova &= F_MMU_FAULT_VA_MSK;
171 fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
172 regval = readl_relaxed(data->base + REG_MMU_INT_ID);
173 fault_larb = F_MMU0_INT_ID_LARB_ID(regval);
174 fault_port = F_MMU0_INT_ID_PORT_ID(regval);
176 if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
177 write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
178 dev_err_ratelimited(
179 data->dev,
180 "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
181 int_state, fault_iova, fault_pa, fault_larb, fault_port,
182 layer, write ? "write" : "read");
185 /* Interrupt clear */
186 regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
187 regval |= F_INT_CLR_BIT;
188 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
190 mtk_iommu_tlb_flush_all(data);
192 return IRQ_HANDLED;
195 static void mtk_iommu_config(struct mtk_iommu_data *data,
196 struct device *dev, bool enable)
198 struct mtk_iommu_client_priv *head, *cur, *next;
199 struct mtk_smi_larb_iommu *larb_mmu;
200 unsigned int larbid, portid;
202 head = dev->archdata.iommu;
203 list_for_each_entry_safe(cur, next, &head->client, client) {
204 larbid = MTK_M4U_TO_LARB(cur->mtk_m4u_id);
205 portid = MTK_M4U_TO_PORT(cur->mtk_m4u_id);
206 larb_mmu = &data->smi_imu.larb_imu[larbid];
208 dev_dbg(dev, "%s iommu port: %d\n",
209 enable ? "enable" : "disable", portid);
211 if (enable)
212 larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
213 else
214 larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
218 static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
220 struct mtk_iommu_domain *dom = data->m4u_dom;
222 spin_lock_init(&dom->pgtlock);
224 dom->cfg = (struct io_pgtable_cfg) {
225 .quirks = IO_PGTABLE_QUIRK_ARM_NS |
226 IO_PGTABLE_QUIRK_NO_PERMS |
227 IO_PGTABLE_QUIRK_TLBI_ON_MAP,
228 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
229 .ias = 32,
230 .oas = 32,
231 .tlb = &mtk_iommu_gather_ops,
232 .iommu_dev = data->dev,
235 if (data->enable_4GB)
236 dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
238 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
239 if (!dom->iop) {
240 dev_err(data->dev, "Failed to alloc io pgtable\n");
241 return -EINVAL;
244 /* Update our support page sizes bitmap */
245 dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
247 writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
248 data->base + REG_MMU_PT_BASE_ADDR);
249 return 0;
252 static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
254 struct mtk_iommu_domain *dom;
256 if (type != IOMMU_DOMAIN_DMA)
257 return NULL;
259 dom = kzalloc(sizeof(*dom), GFP_KERNEL);
260 if (!dom)
261 return NULL;
263 if (iommu_get_dma_cookie(&dom->domain)) {
264 kfree(dom);
265 return NULL;
268 dom->domain.geometry.aperture_start = 0;
269 dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
270 dom->domain.geometry.force_aperture = true;
272 return &dom->domain;
275 static void mtk_iommu_domain_free(struct iommu_domain *domain)
277 iommu_put_dma_cookie(domain);
278 kfree(to_mtk_domain(domain));
281 static int mtk_iommu_attach_device(struct iommu_domain *domain,
282 struct device *dev)
284 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
285 struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
286 struct mtk_iommu_data *data;
287 int ret;
289 if (!priv)
290 return -ENODEV;
292 data = dev_get_drvdata(priv->m4udev);
293 if (!data->m4u_dom) {
294 data->m4u_dom = dom;
295 ret = mtk_iommu_domain_finalise(data);
296 if (ret) {
297 data->m4u_dom = NULL;
298 return ret;
300 } else if (data->m4u_dom != dom) {
301 /* All the client devices should be in the same m4u domain */
302 dev_err(dev, "try to attach into the error iommu domain\n");
303 return -EPERM;
306 mtk_iommu_config(data, dev, true);
307 return 0;
310 static void mtk_iommu_detach_device(struct iommu_domain *domain,
311 struct device *dev)
313 struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
314 struct mtk_iommu_data *data;
316 if (!priv)
317 return;
319 data = dev_get_drvdata(priv->m4udev);
320 mtk_iommu_config(data, dev, false);
323 static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
324 phys_addr_t paddr, size_t size, int prot)
326 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
327 unsigned long flags;
328 int ret;
330 spin_lock_irqsave(&dom->pgtlock, flags);
331 ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
332 spin_unlock_irqrestore(&dom->pgtlock, flags);
334 return ret;
337 static size_t mtk_iommu_unmap(struct iommu_domain *domain,
338 unsigned long iova, size_t size)
340 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
341 unsigned long flags;
342 size_t unmapsz;
344 spin_lock_irqsave(&dom->pgtlock, flags);
345 unmapsz = dom->iop->unmap(dom->iop, iova, size);
346 spin_unlock_irqrestore(&dom->pgtlock, flags);
348 return unmapsz;
351 static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
352 dma_addr_t iova)
354 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
355 unsigned long flags;
356 phys_addr_t pa;
358 spin_lock_irqsave(&dom->pgtlock, flags);
359 pa = dom->iop->iova_to_phys(dom->iop, iova);
360 spin_unlock_irqrestore(&dom->pgtlock, flags);
362 return pa;
365 static int mtk_iommu_add_device(struct device *dev)
367 struct iommu_group *group;
369 if (!dev->archdata.iommu) /* Not a iommu client device */
370 return -ENODEV;
372 group = iommu_group_get_for_dev(dev);
373 if (IS_ERR(group))
374 return PTR_ERR(group);
376 iommu_group_put(group);
377 return 0;
380 static void mtk_iommu_remove_device(struct device *dev)
382 struct mtk_iommu_client_priv *head, *cur, *next;
384 head = dev->archdata.iommu;
385 if (!head)
386 return;
388 list_for_each_entry_safe(cur, next, &head->client, client) {
389 list_del(&cur->client);
390 kfree(cur);
392 kfree(head);
393 dev->archdata.iommu = NULL;
395 iommu_group_remove_device(dev);
398 static struct iommu_group *mtk_iommu_device_group(struct device *dev)
400 struct mtk_iommu_data *data;
401 struct mtk_iommu_client_priv *priv;
403 priv = dev->archdata.iommu;
404 if (!priv)
405 return ERR_PTR(-ENODEV);
407 /* All the client devices are in the same m4u iommu-group */
408 data = dev_get_drvdata(priv->m4udev);
409 if (!data->m4u_group) {
410 data->m4u_group = iommu_group_alloc();
411 if (IS_ERR(data->m4u_group))
412 dev_err(dev, "Failed to allocate M4U IOMMU group\n");
414 return data->m4u_group;
417 static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
419 struct mtk_iommu_client_priv *head, *priv, *next;
420 struct platform_device *m4updev;
422 if (args->args_count != 1) {
423 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
424 args->args_count);
425 return -EINVAL;
428 if (!dev->archdata.iommu) {
429 /* Get the m4u device */
430 m4updev = of_find_device_by_node(args->np);
431 if (WARN_ON(!m4updev))
432 return -EINVAL;
434 head = kzalloc(sizeof(*head), GFP_KERNEL);
435 if (!head)
436 return -ENOMEM;
438 dev->archdata.iommu = head;
439 INIT_LIST_HEAD(&head->client);
440 head->m4udev = &m4updev->dev;
441 } else {
442 head = dev->archdata.iommu;
445 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
446 if (!priv)
447 goto err_free_mem;
449 priv->mtk_m4u_id = args->args[0];
450 list_add_tail(&priv->client, &head->client);
452 return 0;
454 err_free_mem:
455 list_for_each_entry_safe(priv, next, &head->client, client)
456 kfree(priv);
457 kfree(head);
458 dev->archdata.iommu = NULL;
459 return -ENOMEM;
462 static struct iommu_ops mtk_iommu_ops = {
463 .domain_alloc = mtk_iommu_domain_alloc,
464 .domain_free = mtk_iommu_domain_free,
465 .attach_dev = mtk_iommu_attach_device,
466 .detach_dev = mtk_iommu_detach_device,
467 .map = mtk_iommu_map,
468 .unmap = mtk_iommu_unmap,
469 .map_sg = default_iommu_map_sg,
470 .iova_to_phys = mtk_iommu_iova_to_phys,
471 .add_device = mtk_iommu_add_device,
472 .remove_device = mtk_iommu_remove_device,
473 .device_group = mtk_iommu_device_group,
474 .of_xlate = mtk_iommu_of_xlate,
475 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
478 static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
480 u32 regval;
481 int ret;
483 ret = clk_prepare_enable(data->bclk);
484 if (ret) {
485 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
486 return ret;
489 regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
490 F_MMU_TF_PROTECT_SEL(2);
491 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
493 regval = F_L2_MULIT_HIT_EN |
494 F_TABLE_WALK_FAULT_INT_EN |
495 F_PREETCH_FIFO_OVERFLOW_INT_EN |
496 F_MISS_FIFO_OVERFLOW_INT_EN |
497 F_PREFETCH_FIFO_ERR_INT_EN |
498 F_MISS_FIFO_ERR_INT_EN;
499 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
501 regval = F_INT_TRANSLATION_FAULT |
502 F_INT_MAIN_MULTI_HIT_FAULT |
503 F_INT_INVALID_PA_FAULT |
504 F_INT_ENTRY_REPLACEMENT_FAULT |
505 F_INT_TLB_MISS_FAULT |
506 F_INT_MISS_TRANSACTION_FIFO_FAULT |
507 F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
508 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
510 writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
511 data->base + REG_MMU_IVRP_PADDR);
513 writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
514 writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
516 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
517 dev_name(data->dev), (void *)data)) {
518 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
519 clk_disable_unprepare(data->bclk);
520 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
521 return -ENODEV;
524 return 0;
527 static const struct component_master_ops mtk_iommu_com_ops = {
528 .bind = mtk_iommu_bind,
529 .unbind = mtk_iommu_unbind,
532 static int mtk_iommu_probe(struct platform_device *pdev)
534 struct mtk_iommu_data *data;
535 struct device *dev = &pdev->dev;
536 struct resource *res;
537 struct component_match *match = NULL;
538 void *protect;
539 int i, larb_nr, ret;
541 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
542 if (!data)
543 return -ENOMEM;
544 data->dev = dev;
546 /* Protect memory. HW will access here while translation fault.*/
547 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
548 if (!protect)
549 return -ENOMEM;
550 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
552 /* Whether the current dram is over 4GB */
553 data->enable_4GB = !!(max_pfn > (0xffffffffUL >> PAGE_SHIFT));
555 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
556 data->base = devm_ioremap_resource(dev, res);
557 if (IS_ERR(data->base))
558 return PTR_ERR(data->base);
560 data->irq = platform_get_irq(pdev, 0);
561 if (data->irq < 0)
562 return data->irq;
564 data->bclk = devm_clk_get(dev, "bclk");
565 if (IS_ERR(data->bclk))
566 return PTR_ERR(data->bclk);
568 larb_nr = of_count_phandle_with_args(dev->of_node,
569 "mediatek,larbs", NULL);
570 if (larb_nr < 0)
571 return larb_nr;
572 data->smi_imu.larb_nr = larb_nr;
574 for (i = 0; i < larb_nr; i++) {
575 struct device_node *larbnode;
576 struct platform_device *plarbdev;
578 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
579 if (!larbnode)
580 return -EINVAL;
582 if (!of_device_is_available(larbnode))
583 continue;
585 plarbdev = of_find_device_by_node(larbnode);
586 of_node_put(larbnode);
587 if (!plarbdev) {
588 plarbdev = of_platform_device_create(
589 larbnode, NULL,
590 platform_bus_type.dev_root);
591 if (!plarbdev)
592 return -EPROBE_DEFER;
594 data->smi_imu.larb_imu[i].dev = &plarbdev->dev;
596 component_match_add(dev, &match, compare_of, larbnode);
599 platform_set_drvdata(pdev, data);
601 ret = mtk_iommu_hw_init(data);
602 if (ret)
603 return ret;
605 if (!iommu_present(&platform_bus_type))
606 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
608 return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
611 static int mtk_iommu_remove(struct platform_device *pdev)
613 struct mtk_iommu_data *data = platform_get_drvdata(pdev);
615 if (iommu_present(&platform_bus_type))
616 bus_set_iommu(&platform_bus_type, NULL);
618 free_io_pgtable_ops(data->m4u_dom->iop);
619 clk_disable_unprepare(data->bclk);
620 devm_free_irq(&pdev->dev, data->irq, data);
621 component_master_del(&pdev->dev, &mtk_iommu_com_ops);
622 return 0;
625 static int __maybe_unused mtk_iommu_suspend(struct device *dev)
627 struct mtk_iommu_data *data = dev_get_drvdata(dev);
628 struct mtk_iommu_suspend_reg *reg = &data->reg;
629 void __iomem *base = data->base;
631 reg->standard_axi_mode = readl_relaxed(base +
632 REG_MMU_STANDARD_AXI_MODE);
633 reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
634 reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
635 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
636 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
637 return 0;
640 static int __maybe_unused mtk_iommu_resume(struct device *dev)
642 struct mtk_iommu_data *data = dev_get_drvdata(dev);
643 struct mtk_iommu_suspend_reg *reg = &data->reg;
644 void __iomem *base = data->base;
646 writel_relaxed(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
647 base + REG_MMU_PT_BASE_ADDR);
648 writel_relaxed(reg->standard_axi_mode,
649 base + REG_MMU_STANDARD_AXI_MODE);
650 writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
651 writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
652 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
653 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
654 writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
655 base + REG_MMU_IVRP_PADDR);
656 return 0;
659 const struct dev_pm_ops mtk_iommu_pm_ops = {
660 SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
663 static const struct of_device_id mtk_iommu_of_ids[] = {
664 { .compatible = "mediatek,mt8173-m4u", },
668 static struct platform_driver mtk_iommu_driver = {
669 .probe = mtk_iommu_probe,
670 .remove = mtk_iommu_remove,
671 .driver = {
672 .name = "mtk-iommu",
673 .of_match_table = mtk_iommu_of_ids,
674 .pm = &mtk_iommu_pm_ops,
678 static int mtk_iommu_init_fn(struct device_node *np)
680 int ret;
681 struct platform_device *pdev;
683 pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
684 if (!pdev)
685 return -ENOMEM;
687 ret = platform_driver_register(&mtk_iommu_driver);
688 if (ret) {
689 pr_err("%s: Failed to register driver\n", __func__);
690 return ret;
693 of_iommu_set_ops(np, &mtk_iommu_ops);
694 return 0;
697 IOMMU_OF_DECLARE(mtkm4u, "mediatek,mt8173-m4u", mtk_iommu_init_fn);