xenbus_client.c: correct exit path for xenbus_map_ring_valloc_hvm
[linux/fpc-iii.git] / drivers / iommu / exynos-iommu.c
blob3f32d64ab87a4f98f910212b1f1a8a8d66d99b5f
1 /* linux/drivers/iommu/exynos_iommu.c
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
11 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12 #define DEBUG
13 #endif
15 #include <linux/io.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/clk.h>
21 #include <linux/err.h>
22 #include <linux/mm.h>
23 #include <linux/iommu.h>
24 #include <linux/errno.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/export.h>
29 #include <asm/cacheflush.h>
30 #include <asm/pgtable.h>
32 #include <mach/sysmmu.h>
34 /* We does not consider super section mapping (16MB) */
35 #define SECT_ORDER 20
36 #define LPAGE_ORDER 16
37 #define SPAGE_ORDER 12
39 #define SECT_SIZE (1 << SECT_ORDER)
40 #define LPAGE_SIZE (1 << LPAGE_ORDER)
41 #define SPAGE_SIZE (1 << SPAGE_ORDER)
43 #define SECT_MASK (~(SECT_SIZE - 1))
44 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
45 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
47 #define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
48 #define lv1ent_page(sent) ((*(sent) & 3) == 1)
49 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
51 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
52 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
53 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
55 #define section_phys(sent) (*(sent) & SECT_MASK)
56 #define section_offs(iova) ((iova) & 0xFFFFF)
57 #define lpage_phys(pent) (*(pent) & LPAGE_MASK)
58 #define lpage_offs(iova) ((iova) & 0xFFFF)
59 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
60 #define spage_offs(iova) ((iova) & 0xFFF)
62 #define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
63 #define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
65 #define NUM_LV1ENTRIES 4096
66 #define NUM_LV2ENTRIES 256
68 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
70 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
72 #define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
74 #define mk_lv1ent_sect(pa) ((pa) | 2)
75 #define mk_lv1ent_page(pa) ((pa) | 1)
76 #define mk_lv2ent_lpage(pa) ((pa) | 1)
77 #define mk_lv2ent_spage(pa) ((pa) | 2)
79 #define CTRL_ENABLE 0x5
80 #define CTRL_BLOCK 0x7
81 #define CTRL_DISABLE 0x0
83 #define REG_MMU_CTRL 0x000
84 #define REG_MMU_CFG 0x004
85 #define REG_MMU_STATUS 0x008
86 #define REG_MMU_FLUSH 0x00C
87 #define REG_MMU_FLUSH_ENTRY 0x010
88 #define REG_PT_BASE_ADDR 0x014
89 #define REG_INT_STATUS 0x018
90 #define REG_INT_CLEAR 0x01C
92 #define REG_PAGE_FAULT_ADDR 0x024
93 #define REG_AW_FAULT_ADDR 0x028
94 #define REG_AR_FAULT_ADDR 0x02C
95 #define REG_DEFAULT_SLAVE_ADDR 0x030
97 #define REG_MMU_VERSION 0x034
99 #define REG_PB0_SADDR 0x04C
100 #define REG_PB0_EADDR 0x050
101 #define REG_PB1_SADDR 0x054
102 #define REG_PB1_EADDR 0x058
104 static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
106 return pgtable + lv1ent_offset(iova);
109 static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
111 return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
114 enum exynos_sysmmu_inttype {
115 SYSMMU_PAGEFAULT,
116 SYSMMU_AR_MULTIHIT,
117 SYSMMU_AW_MULTIHIT,
118 SYSMMU_BUSERROR,
119 SYSMMU_AR_SECURITY,
120 SYSMMU_AR_ACCESS,
121 SYSMMU_AW_SECURITY,
122 SYSMMU_AW_PROTECTION, /* 7 */
123 SYSMMU_FAULT_UNKNOWN,
124 SYSMMU_FAULTS_NUM
128 * @itype: type of fault.
129 * @pgtable_base: the physical address of page table base. This is 0 if @itype
130 * is SYSMMU_BUSERROR.
131 * @fault_addr: the device (virtual) address that the System MMU tried to
132 * translated. This is 0 if @itype is SYSMMU_BUSERROR.
134 typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
135 unsigned long pgtable_base, unsigned long fault_addr);
137 static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
138 REG_PAGE_FAULT_ADDR,
139 REG_AR_FAULT_ADDR,
140 REG_AW_FAULT_ADDR,
141 REG_DEFAULT_SLAVE_ADDR,
142 REG_AR_FAULT_ADDR,
143 REG_AR_FAULT_ADDR,
144 REG_AW_FAULT_ADDR,
145 REG_AW_FAULT_ADDR
148 static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
149 "PAGE FAULT",
150 "AR MULTI-HIT FAULT",
151 "AW MULTI-HIT FAULT",
152 "BUS ERROR",
153 "AR SECURITY PROTECTION FAULT",
154 "AR ACCESS PROTECTION FAULT",
155 "AW SECURITY PROTECTION FAULT",
156 "AW ACCESS PROTECTION FAULT",
157 "UNKNOWN FAULT"
160 struct exynos_iommu_domain {
161 struct list_head clients; /* list of sysmmu_drvdata.node */
162 unsigned long *pgtable; /* lv1 page table, 16KB */
163 short *lv2entcnt; /* free lv2 entry counter for each section */
164 spinlock_t lock; /* lock for this structure */
165 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
168 struct sysmmu_drvdata {
169 struct list_head node; /* entry of exynos_iommu_domain.clients */
170 struct device *sysmmu; /* System MMU's device descriptor */
171 struct device *dev; /* Owner of system MMU */
172 char *dbgname;
173 int nsfrs;
174 void __iomem **sfrbases;
175 struct clk *clk[2];
176 int activations;
177 rwlock_t lock;
178 struct iommu_domain *domain;
179 sysmmu_fault_handler_t fault_handler;
180 unsigned long pgtable;
183 static bool set_sysmmu_active(struct sysmmu_drvdata *data)
185 /* return true if the System MMU was not active previously
186 and it needs to be initialized */
187 return ++data->activations == 1;
190 static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
192 /* return true if the System MMU is needed to be disabled */
193 BUG_ON(data->activations < 1);
194 return --data->activations == 0;
197 static bool is_sysmmu_active(struct sysmmu_drvdata *data)
199 return data->activations > 0;
202 static void sysmmu_unblock(void __iomem *sfrbase)
204 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
207 static bool sysmmu_block(void __iomem *sfrbase)
209 int i = 120;
211 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
212 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
213 --i;
215 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
216 sysmmu_unblock(sfrbase);
217 return false;
220 return true;
223 static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
225 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
228 static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
229 unsigned long iova)
231 __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
234 static void __sysmmu_set_ptbase(void __iomem *sfrbase,
235 unsigned long pgd)
237 __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
238 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
240 __sysmmu_tlb_invalidate(sfrbase);
243 static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
244 unsigned long size, int idx)
246 __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
247 __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8);
250 void exynos_sysmmu_set_prefbuf(struct device *dev,
251 unsigned long base0, unsigned long size0,
252 unsigned long base1, unsigned long size1)
254 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
255 unsigned long flags;
256 int i;
258 BUG_ON((base0 + size0) <= base0);
259 BUG_ON((size1 > 0) && ((base1 + size1) <= base1));
261 read_lock_irqsave(&data->lock, flags);
262 if (!is_sysmmu_active(data))
263 goto finish;
265 for (i = 0; i < data->nsfrs; i++) {
266 if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
267 if (!sysmmu_block(data->sfrbases[i]))
268 continue;
270 if (size1 == 0) {
271 if (size0 <= SZ_128K) {
272 base1 = base0;
273 size1 = size0;
274 } else {
275 size1 = size0 -
276 ALIGN(size0 / 2, SZ_64K);
277 size0 = size0 - size1;
278 base1 = base0 + size0;
282 __sysmmu_set_prefbuf(
283 data->sfrbases[i], base0, size0, 0);
284 __sysmmu_set_prefbuf(
285 data->sfrbases[i], base1, size1, 1);
287 sysmmu_unblock(data->sfrbases[i]);
290 finish:
291 read_unlock_irqrestore(&data->lock, flags);
294 static void __set_fault_handler(struct sysmmu_drvdata *data,
295 sysmmu_fault_handler_t handler)
297 unsigned long flags;
299 write_lock_irqsave(&data->lock, flags);
300 data->fault_handler = handler;
301 write_unlock_irqrestore(&data->lock, flags);
304 void exynos_sysmmu_set_fault_handler(struct device *dev,
305 sysmmu_fault_handler_t handler)
307 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
309 __set_fault_handler(data, handler);
312 static int default_fault_handler(enum exynos_sysmmu_inttype itype,
313 unsigned long pgtable_base, unsigned long fault_addr)
315 unsigned long *ent;
317 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
318 itype = SYSMMU_FAULT_UNKNOWN;
320 pr_err("%s occurred at 0x%lx(Page table base: 0x%lx)\n",
321 sysmmu_fault_name[itype], fault_addr, pgtable_base);
323 ent = section_entry(__va(pgtable_base), fault_addr);
324 pr_err("\tLv1 entry: 0x%lx\n", *ent);
326 if (lv1ent_page(ent)) {
327 ent = page_entry(ent, fault_addr);
328 pr_err("\t Lv2 entry: 0x%lx\n", *ent);
331 pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
333 BUG();
335 return 0;
338 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
340 /* SYSMMU is in blocked when interrupt occurred. */
341 struct sysmmu_drvdata *data = dev_id;
342 struct resource *irqres;
343 struct platform_device *pdev;
344 enum exynos_sysmmu_inttype itype;
345 unsigned long addr = -1;
347 int i, ret = -ENOSYS;
349 read_lock(&data->lock);
351 WARN_ON(!is_sysmmu_active(data));
353 pdev = to_platform_device(data->sysmmu);
354 for (i = 0; i < (pdev->num_resources / 2); i++) {
355 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
356 if (irqres && ((int)irqres->start == irq))
357 break;
360 if (i == pdev->num_resources) {
361 itype = SYSMMU_FAULT_UNKNOWN;
362 } else {
363 itype = (enum exynos_sysmmu_inttype)
364 __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
365 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
366 itype = SYSMMU_FAULT_UNKNOWN;
367 else
368 addr = __raw_readl(
369 data->sfrbases[i] + fault_reg_offset[itype]);
372 if (data->domain)
373 ret = report_iommu_fault(data->domain, data->dev,
374 addr, itype);
376 if ((ret == -ENOSYS) && data->fault_handler) {
377 unsigned long base = data->pgtable;
378 if (itype != SYSMMU_FAULT_UNKNOWN)
379 base = __raw_readl(
380 data->sfrbases[i] + REG_PT_BASE_ADDR);
381 ret = data->fault_handler(itype, base, addr);
384 if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
385 __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
386 else
387 dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
388 data->dbgname, sysmmu_fault_name[itype]);
390 if (itype != SYSMMU_FAULT_UNKNOWN)
391 sysmmu_unblock(data->sfrbases[i]);
393 read_unlock(&data->lock);
395 return IRQ_HANDLED;
398 static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
400 unsigned long flags;
401 bool disabled = false;
402 int i;
404 write_lock_irqsave(&data->lock, flags);
406 if (!set_sysmmu_inactive(data))
407 goto finish;
409 for (i = 0; i < data->nsfrs; i++)
410 __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
412 if (data->clk[1])
413 clk_disable(data->clk[1]);
414 if (data->clk[0])
415 clk_disable(data->clk[0]);
417 disabled = true;
418 data->pgtable = 0;
419 data->domain = NULL;
420 finish:
421 write_unlock_irqrestore(&data->lock, flags);
423 if (disabled)
424 dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
425 else
426 dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
427 data->dbgname, data->activations);
429 return disabled;
432 /* __exynos_sysmmu_enable: Enables System MMU
434 * returns -error if an error occurred and System MMU is not enabled,
435 * 0 if the System MMU has been just enabled and 1 if System MMU was already
436 * enabled before.
438 static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
439 unsigned long pgtable, struct iommu_domain *domain)
441 int i, ret = 0;
442 unsigned long flags;
444 write_lock_irqsave(&data->lock, flags);
446 if (!set_sysmmu_active(data)) {
447 if (WARN_ON(pgtable != data->pgtable)) {
448 ret = -EBUSY;
449 set_sysmmu_inactive(data);
450 } else {
451 ret = 1;
454 dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
455 goto finish;
458 if (data->clk[0])
459 clk_enable(data->clk[0]);
460 if (data->clk[1])
461 clk_enable(data->clk[1]);
463 data->pgtable = pgtable;
465 for (i = 0; i < data->nsfrs; i++) {
466 __sysmmu_set_ptbase(data->sfrbases[i], pgtable);
468 if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
469 /* System MMU version is 3.x */
470 __raw_writel((1 << 12) | (2 << 28),
471 data->sfrbases[i] + REG_MMU_CFG);
472 __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
473 __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
476 __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
479 data->domain = domain;
481 dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
482 finish:
483 write_unlock_irqrestore(&data->lock, flags);
485 return ret;
488 int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
490 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
491 int ret;
493 BUG_ON(!memblock_is_memory(pgtable));
495 ret = pm_runtime_get_sync(data->sysmmu);
496 if (ret < 0) {
497 dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
498 return ret;
501 ret = __exynos_sysmmu_enable(data, pgtable, NULL);
502 if (WARN_ON(ret < 0)) {
503 pm_runtime_put(data->sysmmu);
504 dev_err(data->sysmmu,
505 "(%s) Already enabled with page table %#lx\n",
506 data->dbgname, data->pgtable);
507 } else {
508 data->dev = dev;
511 return ret;
514 static bool exynos_sysmmu_disable(struct device *dev)
516 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
517 bool disabled;
519 disabled = __exynos_sysmmu_disable(data);
520 pm_runtime_put(data->sysmmu);
522 return disabled;
525 static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
527 unsigned long flags;
528 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
530 read_lock_irqsave(&data->lock, flags);
532 if (is_sysmmu_active(data)) {
533 int i;
534 for (i = 0; i < data->nsfrs; i++) {
535 if (sysmmu_block(data->sfrbases[i])) {
536 __sysmmu_tlb_invalidate_entry(
537 data->sfrbases[i], iova);
538 sysmmu_unblock(data->sfrbases[i]);
541 } else {
542 dev_dbg(data->sysmmu,
543 "(%s) Disabled. Skipping invalidating TLB.\n",
544 data->dbgname);
547 read_unlock_irqrestore(&data->lock, flags);
550 void exynos_sysmmu_tlb_invalidate(struct device *dev)
552 unsigned long flags;
553 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
555 read_lock_irqsave(&data->lock, flags);
557 if (is_sysmmu_active(data)) {
558 int i;
559 for (i = 0; i < data->nsfrs; i++) {
560 if (sysmmu_block(data->sfrbases[i])) {
561 __sysmmu_tlb_invalidate(data->sfrbases[i]);
562 sysmmu_unblock(data->sfrbases[i]);
565 } else {
566 dev_dbg(data->sysmmu,
567 "(%s) Disabled. Skipping invalidating TLB.\n",
568 data->dbgname);
571 read_unlock_irqrestore(&data->lock, flags);
574 static int exynos_sysmmu_probe(struct platform_device *pdev)
576 int i, ret;
577 struct device *dev;
578 struct sysmmu_drvdata *data;
580 dev = &pdev->dev;
582 data = kzalloc(sizeof(*data), GFP_KERNEL);
583 if (!data) {
584 dev_dbg(dev, "Not enough memory\n");
585 ret = -ENOMEM;
586 goto err_alloc;
589 ret = dev_set_drvdata(dev, data);
590 if (ret) {
591 dev_dbg(dev, "Unabled to initialize driver data\n");
592 goto err_init;
595 data->nsfrs = pdev->num_resources / 2;
596 data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
597 GFP_KERNEL);
598 if (data->sfrbases == NULL) {
599 dev_dbg(dev, "Not enough memory\n");
600 ret = -ENOMEM;
601 goto err_init;
604 for (i = 0; i < data->nsfrs; i++) {
605 struct resource *res;
606 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
607 if (!res) {
608 dev_dbg(dev, "Unable to find IOMEM region\n");
609 ret = -ENOENT;
610 goto err_res;
613 data->sfrbases[i] = ioremap(res->start, resource_size(res));
614 if (!data->sfrbases[i]) {
615 dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
616 res->start);
617 ret = -ENOENT;
618 goto err_res;
622 for (i = 0; i < data->nsfrs; i++) {
623 ret = platform_get_irq(pdev, i);
624 if (ret <= 0) {
625 dev_dbg(dev, "Unable to find IRQ resource\n");
626 goto err_irq;
629 ret = request_irq(ret, exynos_sysmmu_irq, 0,
630 dev_name(dev), data);
631 if (ret) {
632 dev_dbg(dev, "Unabled to register interrupt handler\n");
633 goto err_irq;
637 if (dev_get_platdata(dev)) {
638 char *deli, *beg;
639 struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
641 beg = platdata->clockname;
643 for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
644 /* NOTHING */;
646 if (*deli == '\0')
647 deli = NULL;
648 else
649 *deli = '\0';
651 data->clk[0] = clk_get(dev, beg);
652 if (IS_ERR(data->clk[0])) {
653 data->clk[0] = NULL;
654 dev_dbg(dev, "No clock descriptor registered\n");
657 if (data->clk[0] && deli) {
658 *deli = ',';
659 data->clk[1] = clk_get(dev, deli + 1);
660 if (IS_ERR(data->clk[1]))
661 data->clk[1] = NULL;
664 data->dbgname = platdata->dbgname;
667 data->sysmmu = dev;
668 rwlock_init(&data->lock);
669 INIT_LIST_HEAD(&data->node);
671 __set_fault_handler(data, &default_fault_handler);
673 if (dev->parent)
674 pm_runtime_enable(dev);
676 dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
677 return 0;
678 err_irq:
679 while (i-- > 0) {
680 int irq;
682 irq = platform_get_irq(pdev, i);
683 free_irq(irq, data);
685 err_res:
686 while (data->nsfrs-- > 0)
687 iounmap(data->sfrbases[data->nsfrs]);
688 kfree(data->sfrbases);
689 err_init:
690 kfree(data);
691 err_alloc:
692 dev_err(dev, "Failed to initialize\n");
693 return ret;
696 static struct platform_driver exynos_sysmmu_driver = {
697 .probe = exynos_sysmmu_probe,
698 .driver = {
699 .owner = THIS_MODULE,
700 .name = "exynos-sysmmu",
704 static inline void pgtable_flush(void *vastart, void *vaend)
706 dmac_flush_range(vastart, vaend);
707 outer_flush_range(virt_to_phys(vastart),
708 virt_to_phys(vaend));
711 static int exynos_iommu_domain_init(struct iommu_domain *domain)
713 struct exynos_iommu_domain *priv;
715 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
716 if (!priv)
717 return -ENOMEM;
719 priv->pgtable = (unsigned long *)__get_free_pages(
720 GFP_KERNEL | __GFP_ZERO, 2);
721 if (!priv->pgtable)
722 goto err_pgtable;
724 priv->lv2entcnt = (short *)__get_free_pages(
725 GFP_KERNEL | __GFP_ZERO, 1);
726 if (!priv->lv2entcnt)
727 goto err_counter;
729 pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
731 spin_lock_init(&priv->lock);
732 spin_lock_init(&priv->pgtablelock);
733 INIT_LIST_HEAD(&priv->clients);
735 domain->geometry.aperture_start = 0;
736 domain->geometry.aperture_end = ~0UL;
737 domain->geometry.force_aperture = true;
739 domain->priv = priv;
740 return 0;
742 err_counter:
743 free_pages((unsigned long)priv->pgtable, 2);
744 err_pgtable:
745 kfree(priv);
746 return -ENOMEM;
749 static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
751 struct exynos_iommu_domain *priv = domain->priv;
752 struct sysmmu_drvdata *data;
753 unsigned long flags;
754 int i;
756 WARN_ON(!list_empty(&priv->clients));
758 spin_lock_irqsave(&priv->lock, flags);
760 list_for_each_entry(data, &priv->clients, node) {
761 while (!exynos_sysmmu_disable(data->dev))
762 ; /* until System MMU is actually disabled */
765 spin_unlock_irqrestore(&priv->lock, flags);
767 for (i = 0; i < NUM_LV1ENTRIES; i++)
768 if (lv1ent_page(priv->pgtable + i))
769 kfree(__va(lv2table_base(priv->pgtable + i)));
771 free_pages((unsigned long)priv->pgtable, 2);
772 free_pages((unsigned long)priv->lv2entcnt, 1);
773 kfree(domain->priv);
774 domain->priv = NULL;
777 static int exynos_iommu_attach_device(struct iommu_domain *domain,
778 struct device *dev)
780 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
781 struct exynos_iommu_domain *priv = domain->priv;
782 unsigned long flags;
783 int ret;
785 ret = pm_runtime_get_sync(data->sysmmu);
786 if (ret < 0)
787 return ret;
789 ret = 0;
791 spin_lock_irqsave(&priv->lock, flags);
793 ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
795 if (ret == 0) {
796 /* 'data->node' must not be appeared in priv->clients */
797 BUG_ON(!list_empty(&data->node));
798 data->dev = dev;
799 list_add_tail(&data->node, &priv->clients);
802 spin_unlock_irqrestore(&priv->lock, flags);
804 if (ret < 0) {
805 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
806 __func__, __pa(priv->pgtable));
807 pm_runtime_put(data->sysmmu);
808 } else if (ret > 0) {
809 dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
810 __func__, __pa(priv->pgtable));
811 } else {
812 dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
813 __func__, __pa(priv->pgtable));
816 return ret;
819 static void exynos_iommu_detach_device(struct iommu_domain *domain,
820 struct device *dev)
822 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
823 struct exynos_iommu_domain *priv = domain->priv;
824 struct list_head *pos;
825 unsigned long flags;
826 bool found = false;
828 spin_lock_irqsave(&priv->lock, flags);
830 list_for_each(pos, &priv->clients) {
831 if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
832 found = true;
833 break;
837 if (!found)
838 goto finish;
840 if (__exynos_sysmmu_disable(data)) {
841 dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
842 __func__, __pa(priv->pgtable));
843 list_del_init(&data->node);
845 } else {
846 dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
847 __func__, __pa(priv->pgtable));
850 finish:
851 spin_unlock_irqrestore(&priv->lock, flags);
853 if (found)
854 pm_runtime_put(data->sysmmu);
857 static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
858 short *pgcounter)
860 if (lv1ent_fault(sent)) {
861 unsigned long *pent;
863 pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
864 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
865 if (!pent)
866 return NULL;
868 *sent = mk_lv1ent_page(__pa(pent));
869 *pgcounter = NUM_LV2ENTRIES;
870 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
871 pgtable_flush(sent, sent + 1);
874 return page_entry(sent, iova);
877 static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
879 if (lv1ent_section(sent))
880 return -EADDRINUSE;
882 if (lv1ent_page(sent)) {
883 if (*pgcnt != NUM_LV2ENTRIES)
884 return -EADDRINUSE;
886 kfree(page_entry(sent, 0));
888 *pgcnt = 0;
891 *sent = mk_lv1ent_sect(paddr);
893 pgtable_flush(sent, sent + 1);
895 return 0;
898 static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
899 short *pgcnt)
901 if (size == SPAGE_SIZE) {
902 if (!lv2ent_fault(pent))
903 return -EADDRINUSE;
905 *pent = mk_lv2ent_spage(paddr);
906 pgtable_flush(pent, pent + 1);
907 *pgcnt -= 1;
908 } else { /* size == LPAGE_SIZE */
909 int i;
910 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
911 if (!lv2ent_fault(pent)) {
912 memset(pent, 0, sizeof(*pent) * i);
913 return -EADDRINUSE;
916 *pent = mk_lv2ent_lpage(paddr);
918 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
919 *pgcnt -= SPAGES_PER_LPAGE;
922 return 0;
925 static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
926 phys_addr_t paddr, size_t size, int prot)
928 struct exynos_iommu_domain *priv = domain->priv;
929 unsigned long *entry;
930 unsigned long flags;
931 int ret = -ENOMEM;
933 BUG_ON(priv->pgtable == NULL);
935 spin_lock_irqsave(&priv->pgtablelock, flags);
937 entry = section_entry(priv->pgtable, iova);
939 if (size == SECT_SIZE) {
940 ret = lv1set_section(entry, paddr,
941 &priv->lv2entcnt[lv1ent_offset(iova)]);
942 } else {
943 unsigned long *pent;
945 pent = alloc_lv2entry(entry, iova,
946 &priv->lv2entcnt[lv1ent_offset(iova)]);
948 if (!pent)
949 ret = -ENOMEM;
950 else
951 ret = lv2set_page(pent, paddr, size,
952 &priv->lv2entcnt[lv1ent_offset(iova)]);
955 if (ret) {
956 pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
957 __func__, iova, size);
960 spin_unlock_irqrestore(&priv->pgtablelock, flags);
962 return ret;
965 static size_t exynos_iommu_unmap(struct iommu_domain *domain,
966 unsigned long iova, size_t size)
968 struct exynos_iommu_domain *priv = domain->priv;
969 struct sysmmu_drvdata *data;
970 unsigned long flags;
971 unsigned long *ent;
973 BUG_ON(priv->pgtable == NULL);
975 spin_lock_irqsave(&priv->pgtablelock, flags);
977 ent = section_entry(priv->pgtable, iova);
979 if (lv1ent_section(ent)) {
980 BUG_ON(size < SECT_SIZE);
982 *ent = 0;
983 pgtable_flush(ent, ent + 1);
984 size = SECT_SIZE;
985 goto done;
988 if (unlikely(lv1ent_fault(ent))) {
989 if (size > SECT_SIZE)
990 size = SECT_SIZE;
991 goto done;
994 /* lv1ent_page(sent) == true here */
996 ent = page_entry(ent, iova);
998 if (unlikely(lv2ent_fault(ent))) {
999 size = SPAGE_SIZE;
1000 goto done;
1003 if (lv2ent_small(ent)) {
1004 *ent = 0;
1005 size = SPAGE_SIZE;
1006 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
1007 goto done;
1010 /* lv1ent_large(ent) == true here */
1011 BUG_ON(size < LPAGE_SIZE);
1013 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1015 size = LPAGE_SIZE;
1016 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1017 done:
1018 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1020 spin_lock_irqsave(&priv->lock, flags);
1021 list_for_each_entry(data, &priv->clients, node)
1022 sysmmu_tlb_invalidate_entry(data->dev, iova);
1023 spin_unlock_irqrestore(&priv->lock, flags);
1026 return size;
1029 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
1030 dma_addr_t iova)
1032 struct exynos_iommu_domain *priv = domain->priv;
1033 unsigned long *entry;
1034 unsigned long flags;
1035 phys_addr_t phys = 0;
1037 spin_lock_irqsave(&priv->pgtablelock, flags);
1039 entry = section_entry(priv->pgtable, iova);
1041 if (lv1ent_section(entry)) {
1042 phys = section_phys(entry) + section_offs(iova);
1043 } else if (lv1ent_page(entry)) {
1044 entry = page_entry(entry, iova);
1046 if (lv2ent_large(entry))
1047 phys = lpage_phys(entry) + lpage_offs(iova);
1048 else if (lv2ent_small(entry))
1049 phys = spage_phys(entry) + spage_offs(iova);
1052 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1054 return phys;
1057 static struct iommu_ops exynos_iommu_ops = {
1058 .domain_init = &exynos_iommu_domain_init,
1059 .domain_destroy = &exynos_iommu_domain_destroy,
1060 .attach_dev = &exynos_iommu_attach_device,
1061 .detach_dev = &exynos_iommu_detach_device,
1062 .map = &exynos_iommu_map,
1063 .unmap = &exynos_iommu_unmap,
1064 .iova_to_phys = &exynos_iommu_iova_to_phys,
1065 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1068 static int __init exynos_iommu_init(void)
1070 int ret;
1072 ret = platform_driver_register(&exynos_sysmmu_driver);
1074 if (ret == 0)
1075 bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1077 return ret;
1079 subsys_initcall(exynos_iommu_init);