Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux/fpc-iii.git] / drivers / iommu / exynos-iommu.c
blob074018979cdfb047f96619bf050b0f26ba5a623f
1 /* linux/drivers/iommu/exynos_iommu.c
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
11 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12 #define DEBUG
13 #endif
15 #include <linux/io.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/clk.h>
21 #include <linux/err.h>
22 #include <linux/mm.h>
23 #include <linux/iommu.h>
24 #include <linux/errno.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/export.h>
29 #include <asm/cacheflush.h>
30 #include <asm/pgtable.h>
32 #include <mach/sysmmu.h>
34 /* We does not consider super section mapping (16MB) */
35 #define SECT_ORDER 20
36 #define LPAGE_ORDER 16
37 #define SPAGE_ORDER 12
39 #define SECT_SIZE (1 << SECT_ORDER)
40 #define LPAGE_SIZE (1 << LPAGE_ORDER)
41 #define SPAGE_SIZE (1 << SPAGE_ORDER)
43 #define SECT_MASK (~(SECT_SIZE - 1))
44 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
45 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
47 #define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
48 #define lv1ent_page(sent) ((*(sent) & 3) == 1)
49 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
51 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
52 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
53 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
55 #define section_phys(sent) (*(sent) & SECT_MASK)
56 #define section_offs(iova) ((iova) & 0xFFFFF)
57 #define lpage_phys(pent) (*(pent) & LPAGE_MASK)
58 #define lpage_offs(iova) ((iova) & 0xFFFF)
59 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
60 #define spage_offs(iova) ((iova) & 0xFFF)
62 #define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
63 #define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
65 #define NUM_LV1ENTRIES 4096
66 #define NUM_LV2ENTRIES 256
68 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
70 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
72 #define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
74 #define mk_lv1ent_sect(pa) ((pa) | 2)
75 #define mk_lv1ent_page(pa) ((pa) | 1)
76 #define mk_lv2ent_lpage(pa) ((pa) | 1)
77 #define mk_lv2ent_spage(pa) ((pa) | 2)
79 #define CTRL_ENABLE 0x5
80 #define CTRL_BLOCK 0x7
81 #define CTRL_DISABLE 0x0
83 #define REG_MMU_CTRL 0x000
84 #define REG_MMU_CFG 0x004
85 #define REG_MMU_STATUS 0x008
86 #define REG_MMU_FLUSH 0x00C
87 #define REG_MMU_FLUSH_ENTRY 0x010
88 #define REG_PT_BASE_ADDR 0x014
89 #define REG_INT_STATUS 0x018
90 #define REG_INT_CLEAR 0x01C
92 #define REG_PAGE_FAULT_ADDR 0x024
93 #define REG_AW_FAULT_ADDR 0x028
94 #define REG_AR_FAULT_ADDR 0x02C
95 #define REG_DEFAULT_SLAVE_ADDR 0x030
97 #define REG_MMU_VERSION 0x034
99 #define REG_PB0_SADDR 0x04C
100 #define REG_PB0_EADDR 0x050
101 #define REG_PB1_SADDR 0x054
102 #define REG_PB1_EADDR 0x058
104 static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
106 return pgtable + lv1ent_offset(iova);
109 static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
111 return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
114 enum exynos_sysmmu_inttype {
115 SYSMMU_PAGEFAULT,
116 SYSMMU_AR_MULTIHIT,
117 SYSMMU_AW_MULTIHIT,
118 SYSMMU_BUSERROR,
119 SYSMMU_AR_SECURITY,
120 SYSMMU_AR_ACCESS,
121 SYSMMU_AW_SECURITY,
122 SYSMMU_AW_PROTECTION, /* 7 */
123 SYSMMU_FAULT_UNKNOWN,
124 SYSMMU_FAULTS_NUM
128 * @itype: type of fault.
129 * @pgtable_base: the physical address of page table base. This is 0 if @itype
130 * is SYSMMU_BUSERROR.
131 * @fault_addr: the device (virtual) address that the System MMU tried to
132 * translated. This is 0 if @itype is SYSMMU_BUSERROR.
134 typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
135 unsigned long pgtable_base, unsigned long fault_addr);
137 static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
138 REG_PAGE_FAULT_ADDR,
139 REG_AR_FAULT_ADDR,
140 REG_AW_FAULT_ADDR,
141 REG_DEFAULT_SLAVE_ADDR,
142 REG_AR_FAULT_ADDR,
143 REG_AR_FAULT_ADDR,
144 REG_AW_FAULT_ADDR,
145 REG_AW_FAULT_ADDR
148 static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
149 "PAGE FAULT",
150 "AR MULTI-HIT FAULT",
151 "AW MULTI-HIT FAULT",
152 "BUS ERROR",
153 "AR SECURITY PROTECTION FAULT",
154 "AR ACCESS PROTECTION FAULT",
155 "AW SECURITY PROTECTION FAULT",
156 "AW ACCESS PROTECTION FAULT",
157 "UNKNOWN FAULT"
160 struct exynos_iommu_domain {
161 struct list_head clients; /* list of sysmmu_drvdata.node */
162 unsigned long *pgtable; /* lv1 page table, 16KB */
163 short *lv2entcnt; /* free lv2 entry counter for each section */
164 spinlock_t lock; /* lock for this structure */
165 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
168 struct sysmmu_drvdata {
169 struct list_head node; /* entry of exynos_iommu_domain.clients */
170 struct device *sysmmu; /* System MMU's device descriptor */
171 struct device *dev; /* Owner of system MMU */
172 char *dbgname;
173 int nsfrs;
174 void __iomem **sfrbases;
175 struct clk *clk[2];
176 int activations;
177 rwlock_t lock;
178 struct iommu_domain *domain;
179 sysmmu_fault_handler_t fault_handler;
180 unsigned long pgtable;
183 static bool set_sysmmu_active(struct sysmmu_drvdata *data)
185 /* return true if the System MMU was not active previously
186 and it needs to be initialized */
187 return ++data->activations == 1;
190 static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
192 /* return true if the System MMU is needed to be disabled */
193 BUG_ON(data->activations < 1);
194 return --data->activations == 0;
197 static bool is_sysmmu_active(struct sysmmu_drvdata *data)
199 return data->activations > 0;
202 static void sysmmu_unblock(void __iomem *sfrbase)
204 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
207 static bool sysmmu_block(void __iomem *sfrbase)
209 int i = 120;
211 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
212 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
213 --i;
215 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
216 sysmmu_unblock(sfrbase);
217 return false;
220 return true;
223 static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
225 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
228 static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
229 unsigned long iova)
231 __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
234 static void __sysmmu_set_ptbase(void __iomem *sfrbase,
235 unsigned long pgd)
237 __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
238 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
240 __sysmmu_tlb_invalidate(sfrbase);
243 static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
244 unsigned long size, int idx)
246 __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
247 __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8);
250 static void __set_fault_handler(struct sysmmu_drvdata *data,
251 sysmmu_fault_handler_t handler)
253 unsigned long flags;
255 write_lock_irqsave(&data->lock, flags);
256 data->fault_handler = handler;
257 write_unlock_irqrestore(&data->lock, flags);
260 void exynos_sysmmu_set_fault_handler(struct device *dev,
261 sysmmu_fault_handler_t handler)
263 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
265 __set_fault_handler(data, handler);
268 static int default_fault_handler(enum exynos_sysmmu_inttype itype,
269 unsigned long pgtable_base, unsigned long fault_addr)
271 unsigned long *ent;
273 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
274 itype = SYSMMU_FAULT_UNKNOWN;
276 pr_err("%s occurred at 0x%lx(Page table base: 0x%lx)\n",
277 sysmmu_fault_name[itype], fault_addr, pgtable_base);
279 ent = section_entry(__va(pgtable_base), fault_addr);
280 pr_err("\tLv1 entry: 0x%lx\n", *ent);
282 if (lv1ent_page(ent)) {
283 ent = page_entry(ent, fault_addr);
284 pr_err("\t Lv2 entry: 0x%lx\n", *ent);
287 pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
289 BUG();
291 return 0;
294 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
296 /* SYSMMU is in blocked when interrupt occurred. */
297 struct sysmmu_drvdata *data = dev_id;
298 struct resource *irqres;
299 struct platform_device *pdev;
300 enum exynos_sysmmu_inttype itype;
301 unsigned long addr = -1;
303 int i, ret = -ENOSYS;
305 read_lock(&data->lock);
307 WARN_ON(!is_sysmmu_active(data));
309 pdev = to_platform_device(data->sysmmu);
310 for (i = 0; i < (pdev->num_resources / 2); i++) {
311 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
312 if (irqres && ((int)irqres->start == irq))
313 break;
316 if (i == pdev->num_resources) {
317 itype = SYSMMU_FAULT_UNKNOWN;
318 } else {
319 itype = (enum exynos_sysmmu_inttype)
320 __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
321 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
322 itype = SYSMMU_FAULT_UNKNOWN;
323 else
324 addr = __raw_readl(
325 data->sfrbases[i] + fault_reg_offset[itype]);
328 if (data->domain)
329 ret = report_iommu_fault(data->domain, data->dev,
330 addr, itype);
332 if ((ret == -ENOSYS) && data->fault_handler) {
333 unsigned long base = data->pgtable;
334 if (itype != SYSMMU_FAULT_UNKNOWN)
335 base = __raw_readl(
336 data->sfrbases[i] + REG_PT_BASE_ADDR);
337 ret = data->fault_handler(itype, base, addr);
340 if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
341 __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
342 else
343 dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
344 data->dbgname, sysmmu_fault_name[itype]);
346 if (itype != SYSMMU_FAULT_UNKNOWN)
347 sysmmu_unblock(data->sfrbases[i]);
349 read_unlock(&data->lock);
351 return IRQ_HANDLED;
354 static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
356 unsigned long flags;
357 bool disabled = false;
358 int i;
360 write_lock_irqsave(&data->lock, flags);
362 if (!set_sysmmu_inactive(data))
363 goto finish;
365 for (i = 0; i < data->nsfrs; i++)
366 __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
368 if (data->clk[1])
369 clk_disable(data->clk[1]);
370 if (data->clk[0])
371 clk_disable(data->clk[0]);
373 disabled = true;
374 data->pgtable = 0;
375 data->domain = NULL;
376 finish:
377 write_unlock_irqrestore(&data->lock, flags);
379 if (disabled)
380 dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
381 else
382 dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
383 data->dbgname, data->activations);
385 return disabled;
388 /* __exynos_sysmmu_enable: Enables System MMU
390 * returns -error if an error occurred and System MMU is not enabled,
391 * 0 if the System MMU has been just enabled and 1 if System MMU was already
392 * enabled before.
394 static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
395 unsigned long pgtable, struct iommu_domain *domain)
397 int i, ret = 0;
398 unsigned long flags;
400 write_lock_irqsave(&data->lock, flags);
402 if (!set_sysmmu_active(data)) {
403 if (WARN_ON(pgtable != data->pgtable)) {
404 ret = -EBUSY;
405 set_sysmmu_inactive(data);
406 } else {
407 ret = 1;
410 dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
411 goto finish;
414 if (data->clk[0])
415 clk_enable(data->clk[0]);
416 if (data->clk[1])
417 clk_enable(data->clk[1]);
419 data->pgtable = pgtable;
421 for (i = 0; i < data->nsfrs; i++) {
422 __sysmmu_set_ptbase(data->sfrbases[i], pgtable);
424 if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
425 /* System MMU version is 3.x */
426 __raw_writel((1 << 12) | (2 << 28),
427 data->sfrbases[i] + REG_MMU_CFG);
428 __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
429 __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
432 __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
435 data->domain = domain;
437 dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
438 finish:
439 write_unlock_irqrestore(&data->lock, flags);
441 return ret;
444 int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
446 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
447 int ret;
449 BUG_ON(!memblock_is_memory(pgtable));
451 ret = pm_runtime_get_sync(data->sysmmu);
452 if (ret < 0) {
453 dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
454 return ret;
457 ret = __exynos_sysmmu_enable(data, pgtable, NULL);
458 if (WARN_ON(ret < 0)) {
459 pm_runtime_put(data->sysmmu);
460 dev_err(data->sysmmu,
461 "(%s) Already enabled with page table %#lx\n",
462 data->dbgname, data->pgtable);
463 } else {
464 data->dev = dev;
467 return ret;
470 static bool exynos_sysmmu_disable(struct device *dev)
472 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
473 bool disabled;
475 disabled = __exynos_sysmmu_disable(data);
476 pm_runtime_put(data->sysmmu);
478 return disabled;
481 static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
483 unsigned long flags;
484 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
486 read_lock_irqsave(&data->lock, flags);
488 if (is_sysmmu_active(data)) {
489 int i;
490 for (i = 0; i < data->nsfrs; i++) {
491 if (sysmmu_block(data->sfrbases[i])) {
492 __sysmmu_tlb_invalidate_entry(
493 data->sfrbases[i], iova);
494 sysmmu_unblock(data->sfrbases[i]);
497 } else {
498 dev_dbg(data->sysmmu,
499 "(%s) Disabled. Skipping invalidating TLB.\n",
500 data->dbgname);
503 read_unlock_irqrestore(&data->lock, flags);
506 void exynos_sysmmu_tlb_invalidate(struct device *dev)
508 unsigned long flags;
509 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
511 read_lock_irqsave(&data->lock, flags);
513 if (is_sysmmu_active(data)) {
514 int i;
515 for (i = 0; i < data->nsfrs; i++) {
516 if (sysmmu_block(data->sfrbases[i])) {
517 __sysmmu_tlb_invalidate(data->sfrbases[i]);
518 sysmmu_unblock(data->sfrbases[i]);
521 } else {
522 dev_dbg(data->sysmmu,
523 "(%s) Disabled. Skipping invalidating TLB.\n",
524 data->dbgname);
527 read_unlock_irqrestore(&data->lock, flags);
530 static int exynos_sysmmu_probe(struct platform_device *pdev)
532 int i, ret;
533 struct device *dev;
534 struct sysmmu_drvdata *data;
536 dev = &pdev->dev;
538 data = kzalloc(sizeof(*data), GFP_KERNEL);
539 if (!data) {
540 dev_dbg(dev, "Not enough memory\n");
541 ret = -ENOMEM;
542 goto err_alloc;
545 ret = dev_set_drvdata(dev, data);
546 if (ret) {
547 dev_dbg(dev, "Unabled to initialize driver data\n");
548 goto err_init;
551 data->nsfrs = pdev->num_resources / 2;
552 data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
553 GFP_KERNEL);
554 if (data->sfrbases == NULL) {
555 dev_dbg(dev, "Not enough memory\n");
556 ret = -ENOMEM;
557 goto err_init;
560 for (i = 0; i < data->nsfrs; i++) {
561 struct resource *res;
562 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
563 if (!res) {
564 dev_dbg(dev, "Unable to find IOMEM region\n");
565 ret = -ENOENT;
566 goto err_res;
569 data->sfrbases[i] = ioremap(res->start, resource_size(res));
570 if (!data->sfrbases[i]) {
571 dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
572 res->start);
573 ret = -ENOENT;
574 goto err_res;
578 for (i = 0; i < data->nsfrs; i++) {
579 ret = platform_get_irq(pdev, i);
580 if (ret <= 0) {
581 dev_dbg(dev, "Unable to find IRQ resource\n");
582 goto err_irq;
585 ret = request_irq(ret, exynos_sysmmu_irq, 0,
586 dev_name(dev), data);
587 if (ret) {
588 dev_dbg(dev, "Unabled to register interrupt handler\n");
589 goto err_irq;
593 if (dev_get_platdata(dev)) {
594 char *deli, *beg;
595 struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
597 beg = platdata->clockname;
599 for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
600 /* NOTHING */;
602 if (*deli == '\0')
603 deli = NULL;
604 else
605 *deli = '\0';
607 data->clk[0] = clk_get(dev, beg);
608 if (IS_ERR(data->clk[0])) {
609 data->clk[0] = NULL;
610 dev_dbg(dev, "No clock descriptor registered\n");
613 if (data->clk[0] && deli) {
614 *deli = ',';
615 data->clk[1] = clk_get(dev, deli + 1);
616 if (IS_ERR(data->clk[1]))
617 data->clk[1] = NULL;
620 data->dbgname = platdata->dbgname;
623 data->sysmmu = dev;
624 rwlock_init(&data->lock);
625 INIT_LIST_HEAD(&data->node);
627 __set_fault_handler(data, &default_fault_handler);
629 if (dev->parent)
630 pm_runtime_enable(dev);
632 dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
633 return 0;
634 err_irq:
635 while (i-- > 0) {
636 int irq;
638 irq = platform_get_irq(pdev, i);
639 free_irq(irq, data);
641 err_res:
642 while (data->nsfrs-- > 0)
643 iounmap(data->sfrbases[data->nsfrs]);
644 kfree(data->sfrbases);
645 err_init:
646 kfree(data);
647 err_alloc:
648 dev_err(dev, "Failed to initialize\n");
649 return ret;
652 static struct platform_driver exynos_sysmmu_driver = {
653 .probe = exynos_sysmmu_probe,
654 .driver = {
655 .owner = THIS_MODULE,
656 .name = "exynos-sysmmu",
660 static inline void pgtable_flush(void *vastart, void *vaend)
662 dmac_flush_range(vastart, vaend);
663 outer_flush_range(virt_to_phys(vastart),
664 virt_to_phys(vaend));
667 static int exynos_iommu_domain_init(struct iommu_domain *domain)
669 struct exynos_iommu_domain *priv;
671 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
672 if (!priv)
673 return -ENOMEM;
675 priv->pgtable = (unsigned long *)__get_free_pages(
676 GFP_KERNEL | __GFP_ZERO, 2);
677 if (!priv->pgtable)
678 goto err_pgtable;
680 priv->lv2entcnt = (short *)__get_free_pages(
681 GFP_KERNEL | __GFP_ZERO, 1);
682 if (!priv->lv2entcnt)
683 goto err_counter;
685 pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
687 spin_lock_init(&priv->lock);
688 spin_lock_init(&priv->pgtablelock);
689 INIT_LIST_HEAD(&priv->clients);
691 domain->geometry.aperture_start = 0;
692 domain->geometry.aperture_end = ~0UL;
693 domain->geometry.force_aperture = true;
695 domain->priv = priv;
696 return 0;
698 err_counter:
699 free_pages((unsigned long)priv->pgtable, 2);
700 err_pgtable:
701 kfree(priv);
702 return -ENOMEM;
705 static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
707 struct exynos_iommu_domain *priv = domain->priv;
708 struct sysmmu_drvdata *data;
709 unsigned long flags;
710 int i;
712 WARN_ON(!list_empty(&priv->clients));
714 spin_lock_irqsave(&priv->lock, flags);
716 list_for_each_entry(data, &priv->clients, node) {
717 while (!exynos_sysmmu_disable(data->dev))
718 ; /* until System MMU is actually disabled */
721 spin_unlock_irqrestore(&priv->lock, flags);
723 for (i = 0; i < NUM_LV1ENTRIES; i++)
724 if (lv1ent_page(priv->pgtable + i))
725 kfree(__va(lv2table_base(priv->pgtable + i)));
727 free_pages((unsigned long)priv->pgtable, 2);
728 free_pages((unsigned long)priv->lv2entcnt, 1);
729 kfree(domain->priv);
730 domain->priv = NULL;
733 static int exynos_iommu_attach_device(struct iommu_domain *domain,
734 struct device *dev)
736 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
737 struct exynos_iommu_domain *priv = domain->priv;
738 unsigned long flags;
739 int ret;
741 ret = pm_runtime_get_sync(data->sysmmu);
742 if (ret < 0)
743 return ret;
745 ret = 0;
747 spin_lock_irqsave(&priv->lock, flags);
749 ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
751 if (ret == 0) {
752 /* 'data->node' must not be appeared in priv->clients */
753 BUG_ON(!list_empty(&data->node));
754 data->dev = dev;
755 list_add_tail(&data->node, &priv->clients);
758 spin_unlock_irqrestore(&priv->lock, flags);
760 if (ret < 0) {
761 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
762 __func__, __pa(priv->pgtable));
763 pm_runtime_put(data->sysmmu);
764 } else if (ret > 0) {
765 dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
766 __func__, __pa(priv->pgtable));
767 } else {
768 dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
769 __func__, __pa(priv->pgtable));
772 return ret;
775 static void exynos_iommu_detach_device(struct iommu_domain *domain,
776 struct device *dev)
778 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
779 struct exynos_iommu_domain *priv = domain->priv;
780 struct list_head *pos;
781 unsigned long flags;
782 bool found = false;
784 spin_lock_irqsave(&priv->lock, flags);
786 list_for_each(pos, &priv->clients) {
787 if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
788 found = true;
789 break;
793 if (!found)
794 goto finish;
796 if (__exynos_sysmmu_disable(data)) {
797 dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
798 __func__, __pa(priv->pgtable));
799 list_del_init(&data->node);
801 } else {
802 dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
803 __func__, __pa(priv->pgtable));
806 finish:
807 spin_unlock_irqrestore(&priv->lock, flags);
809 if (found)
810 pm_runtime_put(data->sysmmu);
813 static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
814 short *pgcounter)
816 if (lv1ent_fault(sent)) {
817 unsigned long *pent;
819 pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
820 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
821 if (!pent)
822 return NULL;
824 *sent = mk_lv1ent_page(__pa(pent));
825 *pgcounter = NUM_LV2ENTRIES;
826 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
827 pgtable_flush(sent, sent + 1);
830 return page_entry(sent, iova);
833 static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
835 if (lv1ent_section(sent))
836 return -EADDRINUSE;
838 if (lv1ent_page(sent)) {
839 if (*pgcnt != NUM_LV2ENTRIES)
840 return -EADDRINUSE;
842 kfree(page_entry(sent, 0));
844 *pgcnt = 0;
847 *sent = mk_lv1ent_sect(paddr);
849 pgtable_flush(sent, sent + 1);
851 return 0;
854 static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
855 short *pgcnt)
857 if (size == SPAGE_SIZE) {
858 if (!lv2ent_fault(pent))
859 return -EADDRINUSE;
861 *pent = mk_lv2ent_spage(paddr);
862 pgtable_flush(pent, pent + 1);
863 *pgcnt -= 1;
864 } else { /* size == LPAGE_SIZE */
865 int i;
866 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
867 if (!lv2ent_fault(pent)) {
868 memset(pent, 0, sizeof(*pent) * i);
869 return -EADDRINUSE;
872 *pent = mk_lv2ent_lpage(paddr);
874 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
875 *pgcnt -= SPAGES_PER_LPAGE;
878 return 0;
881 static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
882 phys_addr_t paddr, size_t size, int prot)
884 struct exynos_iommu_domain *priv = domain->priv;
885 unsigned long *entry;
886 unsigned long flags;
887 int ret = -ENOMEM;
889 BUG_ON(priv->pgtable == NULL);
891 spin_lock_irqsave(&priv->pgtablelock, flags);
893 entry = section_entry(priv->pgtable, iova);
895 if (size == SECT_SIZE) {
896 ret = lv1set_section(entry, paddr,
897 &priv->lv2entcnt[lv1ent_offset(iova)]);
898 } else {
899 unsigned long *pent;
901 pent = alloc_lv2entry(entry, iova,
902 &priv->lv2entcnt[lv1ent_offset(iova)]);
904 if (!pent)
905 ret = -ENOMEM;
906 else
907 ret = lv2set_page(pent, paddr, size,
908 &priv->lv2entcnt[lv1ent_offset(iova)]);
911 if (ret) {
912 pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
913 __func__, iova, size);
916 spin_unlock_irqrestore(&priv->pgtablelock, flags);
918 return ret;
921 static size_t exynos_iommu_unmap(struct iommu_domain *domain,
922 unsigned long iova, size_t size)
924 struct exynos_iommu_domain *priv = domain->priv;
925 struct sysmmu_drvdata *data;
926 unsigned long flags;
927 unsigned long *ent;
929 BUG_ON(priv->pgtable == NULL);
931 spin_lock_irqsave(&priv->pgtablelock, flags);
933 ent = section_entry(priv->pgtable, iova);
935 if (lv1ent_section(ent)) {
936 BUG_ON(size < SECT_SIZE);
938 *ent = 0;
939 pgtable_flush(ent, ent + 1);
940 size = SECT_SIZE;
941 goto done;
944 if (unlikely(lv1ent_fault(ent))) {
945 if (size > SECT_SIZE)
946 size = SECT_SIZE;
947 goto done;
950 /* lv1ent_page(sent) == true here */
952 ent = page_entry(ent, iova);
954 if (unlikely(lv2ent_fault(ent))) {
955 size = SPAGE_SIZE;
956 goto done;
959 if (lv2ent_small(ent)) {
960 *ent = 0;
961 size = SPAGE_SIZE;
962 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
963 goto done;
966 /* lv1ent_large(ent) == true here */
967 BUG_ON(size < LPAGE_SIZE);
969 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
971 size = LPAGE_SIZE;
972 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
973 done:
974 spin_unlock_irqrestore(&priv->pgtablelock, flags);
976 spin_lock_irqsave(&priv->lock, flags);
977 list_for_each_entry(data, &priv->clients, node)
978 sysmmu_tlb_invalidate_entry(data->dev, iova);
979 spin_unlock_irqrestore(&priv->lock, flags);
982 return size;
985 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
986 dma_addr_t iova)
988 struct exynos_iommu_domain *priv = domain->priv;
989 unsigned long *entry;
990 unsigned long flags;
991 phys_addr_t phys = 0;
993 spin_lock_irqsave(&priv->pgtablelock, flags);
995 entry = section_entry(priv->pgtable, iova);
997 if (lv1ent_section(entry)) {
998 phys = section_phys(entry) + section_offs(iova);
999 } else if (lv1ent_page(entry)) {
1000 entry = page_entry(entry, iova);
1002 if (lv2ent_large(entry))
1003 phys = lpage_phys(entry) + lpage_offs(iova);
1004 else if (lv2ent_small(entry))
1005 phys = spage_phys(entry) + spage_offs(iova);
1008 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1010 return phys;
1013 static struct iommu_ops exynos_iommu_ops = {
1014 .domain_init = &exynos_iommu_domain_init,
1015 .domain_destroy = &exynos_iommu_domain_destroy,
1016 .attach_dev = &exynos_iommu_attach_device,
1017 .detach_dev = &exynos_iommu_detach_device,
1018 .map = &exynos_iommu_map,
1019 .unmap = &exynos_iommu_unmap,
1020 .iova_to_phys = &exynos_iommu_iova_to_phys,
1021 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1024 static int __init exynos_iommu_init(void)
1026 int ret;
1028 ret = platform_driver_register(&exynos_sysmmu_driver);
1030 if (ret == 0)
1031 bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1033 return ret;
1035 subsys_initcall(exynos_iommu_init);