Merge branch 'akpm'
[linux-2.6/next.git] / drivers / iommu / omap-iommu.c
blob90744afbed71230699949e5139e0a2756b4fbab3
1 /*
2 * omap iommu: tlb and pagetable primitives
4 * Copyright (C) 2008-2010 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/clk.h>
20 #include <linux/platform_device.h>
21 #include <linux/iommu.h>
22 #include <linux/mutex.h>
23 #include <linux/spinlock.h>
25 #include <asm/cacheflush.h>
27 #include <plat/iommu.h>
29 #include <plat/iopgtable.h>
31 #define for_each_iotlb_cr(obj, n, __i, cr) \
32 for (__i = 0; \
33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
34 __i++)
36 /**
37 * struct omap_iommu_domain - omap iommu domain
38 * @pgtable: the page table
39 * @iommu_dev: an omap iommu device attached to this domain. only a single
40 * iommu device can be attached for now.
41 * @lock: domain lock, should be taken when attaching/detaching
43 struct omap_iommu_domain {
44 u32 *pgtable;
45 struct omap_iommu *iommu_dev;
46 spinlock_t lock;
49 /* accommodate the difference between omap1 and omap2/3 */
50 static const struct iommu_functions *arch_iommu;
52 static struct platform_driver omap_iommu_driver;
53 static struct kmem_cache *iopte_cachep;
55 /**
56 * omap_install_iommu_arch - Install archtecure specific iommu functions
57 * @ops: a pointer to architecture specific iommu functions
59 * There are several kind of iommu algorithm(tlb, pagetable) among
60 * omap series. This interface installs such an iommu algorighm.
61 **/
62 int omap_install_iommu_arch(const struct iommu_functions *ops)
64 if (arch_iommu)
65 return -EBUSY;
67 arch_iommu = ops;
68 return 0;
70 EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
72 /**
73 * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
74 * @ops: a pointer to architecture specific iommu functions
76 * This interface uninstalls the iommu algorighm installed previously.
77 **/
78 void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
80 if (arch_iommu != ops)
81 pr_err("%s: not your arch\n", __func__);
83 arch_iommu = NULL;
85 EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
87 /**
88 * omap_iommu_save_ctx - Save registers for pm off-mode support
89 * @obj: target iommu
90 **/
91 void omap_iommu_save_ctx(struct omap_iommu *obj)
93 arch_iommu->save_ctx(obj);
95 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
97 /**
98 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
99 * @obj: target iommu
101 void omap_iommu_restore_ctx(struct omap_iommu *obj)
103 arch_iommu->restore_ctx(obj);
105 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
108 * omap_iommu_arch_version - Return running iommu arch version
110 u32 omap_iommu_arch_version(void)
112 return arch_iommu->version;
114 EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
116 static int iommu_enable(struct omap_iommu *obj)
118 int err;
120 if (!obj)
121 return -EINVAL;
123 if (!arch_iommu)
124 return -ENODEV;
126 clk_enable(obj->clk);
128 err = arch_iommu->enable(obj);
130 clk_disable(obj->clk);
131 return err;
134 static void iommu_disable(struct omap_iommu *obj)
136 if (!obj)
137 return;
139 clk_enable(obj->clk);
141 arch_iommu->disable(obj);
143 clk_disable(obj->clk);
147 * TLB operations
149 void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
151 BUG_ON(!cr || !e);
153 arch_iommu->cr_to_e(cr, e);
155 EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
157 static inline int iotlb_cr_valid(struct cr_regs *cr)
159 if (!cr)
160 return -EINVAL;
162 return arch_iommu->cr_valid(cr);
165 static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
166 struct iotlb_entry *e)
168 if (!e)
169 return NULL;
171 return arch_iommu->alloc_cr(obj, e);
174 static u32 iotlb_cr_to_virt(struct cr_regs *cr)
176 return arch_iommu->cr_to_virt(cr);
179 static u32 get_iopte_attr(struct iotlb_entry *e)
181 return arch_iommu->get_pte_attr(e);
184 static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
186 return arch_iommu->fault_isr(obj, da);
189 static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
191 u32 val;
193 val = iommu_read_reg(obj, MMU_LOCK);
195 l->base = MMU_LOCK_BASE(val);
196 l->vict = MMU_LOCK_VICT(val);
200 static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
202 u32 val;
204 val = (l->base << MMU_LOCK_BASE_SHIFT);
205 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
207 iommu_write_reg(obj, val, MMU_LOCK);
210 static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
212 arch_iommu->tlb_read_cr(obj, cr);
215 static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
217 arch_iommu->tlb_load_cr(obj, cr);
219 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
220 iommu_write_reg(obj, 1, MMU_LD_TLB);
224 * iotlb_dump_cr - Dump an iommu tlb entry into buf
225 * @obj: target iommu
226 * @cr: contents of cam and ram register
227 * @buf: output buffer
229 static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
230 char *buf)
232 BUG_ON(!cr || !buf);
234 return arch_iommu->dump_cr(obj, cr, buf);
237 /* only used in iotlb iteration for-loop */
238 static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
240 struct cr_regs cr;
241 struct iotlb_lock l;
243 iotlb_lock_get(obj, &l);
244 l.vict = n;
245 iotlb_lock_set(obj, &l);
246 iotlb_read_cr(obj, &cr);
248 return cr;
252 * load_iotlb_entry - Set an iommu tlb entry
253 * @obj: target iommu
254 * @e: an iommu tlb entry info
256 #ifdef PREFETCH_IOTLB
257 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
259 int err = 0;
260 struct iotlb_lock l;
261 struct cr_regs *cr;
263 if (!obj || !obj->nr_tlb_entries || !e)
264 return -EINVAL;
266 clk_enable(obj->clk);
268 iotlb_lock_get(obj, &l);
269 if (l.base == obj->nr_tlb_entries) {
270 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
271 err = -EBUSY;
272 goto out;
274 if (!e->prsvd) {
275 int i;
276 struct cr_regs tmp;
278 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
279 if (!iotlb_cr_valid(&tmp))
280 break;
282 if (i == obj->nr_tlb_entries) {
283 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
284 err = -EBUSY;
285 goto out;
288 iotlb_lock_get(obj, &l);
289 } else {
290 l.vict = l.base;
291 iotlb_lock_set(obj, &l);
294 cr = iotlb_alloc_cr(obj, e);
295 if (IS_ERR(cr)) {
296 clk_disable(obj->clk);
297 return PTR_ERR(cr);
300 iotlb_load_cr(obj, cr);
301 kfree(cr);
303 if (e->prsvd)
304 l.base++;
305 /* increment victim for next tlb load */
306 if (++l.vict == obj->nr_tlb_entries)
307 l.vict = l.base;
308 iotlb_lock_set(obj, &l);
309 out:
310 clk_disable(obj->clk);
311 return err;
314 #else /* !PREFETCH_IOTLB */
316 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
318 return 0;
321 #endif /* !PREFETCH_IOTLB */
323 static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
325 return load_iotlb_entry(obj, e);
329 * flush_iotlb_page - Clear an iommu tlb entry
330 * @obj: target iommu
331 * @da: iommu device virtual address
333 * Clear an iommu tlb entry which includes 'da' address.
335 static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
337 int i;
338 struct cr_regs cr;
340 clk_enable(obj->clk);
342 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
343 u32 start;
344 size_t bytes;
346 if (!iotlb_cr_valid(&cr))
347 continue;
349 start = iotlb_cr_to_virt(&cr);
350 bytes = iopgsz_to_bytes(cr.cam & 3);
352 if ((start <= da) && (da < start + bytes)) {
353 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
354 __func__, start, da, bytes);
355 iotlb_load_cr(obj, &cr);
356 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
359 clk_disable(obj->clk);
361 if (i == obj->nr_tlb_entries)
362 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
366 * flush_iotlb_all - Clear all iommu tlb entries
367 * @obj: target iommu
369 static void flush_iotlb_all(struct omap_iommu *obj)
371 struct iotlb_lock l;
373 clk_enable(obj->clk);
375 l.base = 0;
376 l.vict = 0;
377 iotlb_lock_set(obj, &l);
379 iommu_write_reg(obj, 1, MMU_GFLUSH);
381 clk_disable(obj->clk);
384 #if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
386 ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
388 if (!obj || !buf)
389 return -EINVAL;
391 clk_enable(obj->clk);
393 bytes = arch_iommu->dump_ctx(obj, buf, bytes);
395 clk_disable(obj->clk);
397 return bytes;
399 EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
401 static int
402 __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
404 int i;
405 struct iotlb_lock saved;
406 struct cr_regs tmp;
407 struct cr_regs *p = crs;
409 clk_enable(obj->clk);
410 iotlb_lock_get(obj, &saved);
412 for_each_iotlb_cr(obj, num, i, tmp) {
413 if (!iotlb_cr_valid(&tmp))
414 continue;
415 *p++ = tmp;
418 iotlb_lock_set(obj, &saved);
419 clk_disable(obj->clk);
421 return p - crs;
425 * omap_dump_tlb_entries - dump cr arrays to given buffer
426 * @obj: target iommu
427 * @buf: output buffer
429 size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
431 int i, num;
432 struct cr_regs *cr;
433 char *p = buf;
435 num = bytes / sizeof(*cr);
436 num = min(obj->nr_tlb_entries, num);
438 cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
439 if (!cr)
440 return 0;
442 num = __dump_tlb_entries(obj, cr, num);
443 for (i = 0; i < num; i++)
444 p += iotlb_dump_cr(obj, cr + i, p);
445 kfree(cr);
447 return p - buf;
449 EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
451 int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
453 return driver_for_each_device(&omap_iommu_driver.driver,
454 NULL, data, fn);
456 EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
458 #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
461 * H/W pagetable operations
463 static void flush_iopgd_range(u32 *first, u32 *last)
465 /* FIXME: L2 cache should be taken care of if it exists */
466 do {
467 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
468 : : "r" (first));
469 first += L1_CACHE_BYTES / sizeof(*first);
470 } while (first <= last);
473 static void flush_iopte_range(u32 *first, u32 *last)
475 /* FIXME: L2 cache should be taken care of if it exists */
476 do {
477 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
478 : : "r" (first));
479 first += L1_CACHE_BYTES / sizeof(*first);
480 } while (first <= last);
483 static void iopte_free(u32 *iopte)
485 /* Note: freed iopte's must be clean ready for re-use */
486 kmem_cache_free(iopte_cachep, iopte);
489 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
491 u32 *iopte;
493 /* a table has already existed */
494 if (*iopgd)
495 goto pte_ready;
498 * do the allocation outside the page table lock
500 spin_unlock(&obj->page_table_lock);
501 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
502 spin_lock(&obj->page_table_lock);
504 if (!*iopgd) {
505 if (!iopte)
506 return ERR_PTR(-ENOMEM);
508 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
509 flush_iopgd_range(iopgd, iopgd);
511 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
512 } else {
513 /* We raced, free the reduniovant table */
514 iopte_free(iopte);
517 pte_ready:
518 iopte = iopte_offset(iopgd, da);
520 dev_vdbg(obj->dev,
521 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
522 __func__, da, iopgd, *iopgd, iopte, *iopte);
524 return iopte;
527 static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
529 u32 *iopgd = iopgd_offset(obj, da);
531 if ((da | pa) & ~IOSECTION_MASK) {
532 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
533 __func__, da, pa, IOSECTION_SIZE);
534 return -EINVAL;
537 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
538 flush_iopgd_range(iopgd, iopgd);
539 return 0;
542 static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
544 u32 *iopgd = iopgd_offset(obj, da);
545 int i;
547 if ((da | pa) & ~IOSUPER_MASK) {
548 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
549 __func__, da, pa, IOSUPER_SIZE);
550 return -EINVAL;
553 for (i = 0; i < 16; i++)
554 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
555 flush_iopgd_range(iopgd, iopgd + 15);
556 return 0;
559 static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
561 u32 *iopgd = iopgd_offset(obj, da);
562 u32 *iopte = iopte_alloc(obj, iopgd, da);
564 if (IS_ERR(iopte))
565 return PTR_ERR(iopte);
567 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
568 flush_iopte_range(iopte, iopte);
570 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
571 __func__, da, pa, iopte, *iopte);
573 return 0;
576 static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
578 u32 *iopgd = iopgd_offset(obj, da);
579 u32 *iopte = iopte_alloc(obj, iopgd, da);
580 int i;
582 if ((da | pa) & ~IOLARGE_MASK) {
583 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
584 __func__, da, pa, IOLARGE_SIZE);
585 return -EINVAL;
588 if (IS_ERR(iopte))
589 return PTR_ERR(iopte);
591 for (i = 0; i < 16; i++)
592 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
593 flush_iopte_range(iopte, iopte + 15);
594 return 0;
597 static int
598 iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
600 int (*fn)(struct omap_iommu *, u32, u32, u32);
601 u32 prot;
602 int err;
604 if (!obj || !e)
605 return -EINVAL;
607 switch (e->pgsz) {
608 case MMU_CAM_PGSZ_16M:
609 fn = iopgd_alloc_super;
610 break;
611 case MMU_CAM_PGSZ_1M:
612 fn = iopgd_alloc_section;
613 break;
614 case MMU_CAM_PGSZ_64K:
615 fn = iopte_alloc_large;
616 break;
617 case MMU_CAM_PGSZ_4K:
618 fn = iopte_alloc_page;
619 break;
620 default:
621 fn = NULL;
622 BUG();
623 break;
626 prot = get_iopte_attr(e);
628 spin_lock(&obj->page_table_lock);
629 err = fn(obj, e->da, e->pa, prot);
630 spin_unlock(&obj->page_table_lock);
632 return err;
636 * omap_iopgtable_store_entry - Make an iommu pte entry
637 * @obj: target iommu
638 * @e: an iommu tlb entry info
640 int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
642 int err;
644 flush_iotlb_page(obj, e->da);
645 err = iopgtable_store_entry_core(obj, e);
646 if (!err)
647 prefetch_iotlb_entry(obj, e);
648 return err;
650 EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
653 * iopgtable_lookup_entry - Lookup an iommu pte entry
654 * @obj: target iommu
655 * @da: iommu device virtual address
656 * @ppgd: iommu pgd entry pointer to be returned
657 * @ppte: iommu pte entry pointer to be returned
659 static void
660 iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
662 u32 *iopgd, *iopte = NULL;
664 iopgd = iopgd_offset(obj, da);
665 if (!*iopgd)
666 goto out;
668 if (iopgd_is_table(*iopgd))
669 iopte = iopte_offset(iopgd, da);
670 out:
671 *ppgd = iopgd;
672 *ppte = iopte;
675 static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
677 size_t bytes;
678 u32 *iopgd = iopgd_offset(obj, da);
679 int nent = 1;
681 if (!*iopgd)
682 return 0;
684 if (iopgd_is_table(*iopgd)) {
685 int i;
686 u32 *iopte = iopte_offset(iopgd, da);
688 bytes = IOPTE_SIZE;
689 if (*iopte & IOPTE_LARGE) {
690 nent *= 16;
691 /* rewind to the 1st entry */
692 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
694 bytes *= nent;
695 memset(iopte, 0, nent * sizeof(*iopte));
696 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
699 * do table walk to check if this table is necessary or not
701 iopte = iopte_offset(iopgd, 0);
702 for (i = 0; i < PTRS_PER_IOPTE; i++)
703 if (iopte[i])
704 goto out;
706 iopte_free(iopte);
707 nent = 1; /* for the next L1 entry */
708 } else {
709 bytes = IOPGD_SIZE;
710 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
711 nent *= 16;
712 /* rewind to the 1st entry */
713 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
715 bytes *= nent;
717 memset(iopgd, 0, nent * sizeof(*iopgd));
718 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
719 out:
720 return bytes;
724 * iopgtable_clear_entry - Remove an iommu pte entry
725 * @obj: target iommu
726 * @da: iommu device virtual address
728 static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
730 size_t bytes;
732 spin_lock(&obj->page_table_lock);
734 bytes = iopgtable_clear_entry_core(obj, da);
735 flush_iotlb_page(obj, da);
737 spin_unlock(&obj->page_table_lock);
739 return bytes;
742 static void iopgtable_clear_entry_all(struct omap_iommu *obj)
744 int i;
746 spin_lock(&obj->page_table_lock);
748 for (i = 0; i < PTRS_PER_IOPGD; i++) {
749 u32 da;
750 u32 *iopgd;
752 da = i << IOPGD_SHIFT;
753 iopgd = iopgd_offset(obj, da);
755 if (!*iopgd)
756 continue;
758 if (iopgd_is_table(*iopgd))
759 iopte_free(iopte_offset(iopgd, 0));
761 *iopgd = 0;
762 flush_iopgd_range(iopgd, iopgd);
765 flush_iotlb_all(obj);
767 spin_unlock(&obj->page_table_lock);
771 * Device IOMMU generic operations
773 static irqreturn_t iommu_fault_handler(int irq, void *data)
775 u32 da, errs;
776 u32 *iopgd, *iopte;
777 struct omap_iommu *obj = data;
779 if (!obj->refcount)
780 return IRQ_NONE;
782 clk_enable(obj->clk);
783 errs = iommu_report_fault(obj, &da);
784 clk_disable(obj->clk);
785 if (errs == 0)
786 return IRQ_HANDLED;
788 /* Fault callback or TLB/PTE Dynamic loading */
789 if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv))
790 return IRQ_HANDLED;
792 iommu_disable(obj);
794 iopgd = iopgd_offset(obj, da);
796 if (!iopgd_is_table(*iopgd)) {
797 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
798 "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
799 return IRQ_NONE;
802 iopte = iopte_offset(iopgd, da);
804 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
805 "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
806 iopte, *iopte);
808 return IRQ_NONE;
811 static int device_match_by_alias(struct device *dev, void *data)
813 struct omap_iommu *obj = to_iommu(dev);
814 const char *name = data;
816 pr_debug("%s: %s %s\n", __func__, obj->name, name);
818 return strcmp(obj->name, name) == 0;
822 * omap_find_iommu_device() - find an omap iommu device by name
823 * @name: name of the iommu device
825 * The generic iommu API requires the caller to provide the device
826 * he wishes to attach to a certain iommu domain.
828 * Drivers generally should not bother with this as it should just
829 * be taken care of by the DMA-API using dev_archdata.
831 * This function is provided as an interim solution until the latter
832 * materializes, and omap3isp is fully migrated to the DMA-API.
834 struct device *omap_find_iommu_device(const char *name)
836 return driver_find_device(&omap_iommu_driver.driver, NULL,
837 (void *)name,
838 device_match_by_alias);
840 EXPORT_SYMBOL_GPL(omap_find_iommu_device);
843 * omap_iommu_attach() - attach iommu device to an iommu domain
844 * @dev: target omap iommu device
845 * @iopgd: page table
847 static struct omap_iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
849 int err = -ENOMEM;
850 struct omap_iommu *obj = to_iommu(dev);
852 spin_lock(&obj->iommu_lock);
854 /* an iommu device can only be attached once */
855 if (++obj->refcount > 1) {
856 dev_err(dev, "%s: already attached!\n", obj->name);
857 err = -EBUSY;
858 goto err_enable;
861 obj->iopgd = iopgd;
862 err = iommu_enable(obj);
863 if (err)
864 goto err_enable;
865 flush_iotlb_all(obj);
867 if (!try_module_get(obj->owner))
868 goto err_module;
870 spin_unlock(&obj->iommu_lock);
872 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
873 return obj;
875 err_module:
876 if (obj->refcount == 1)
877 iommu_disable(obj);
878 err_enable:
879 obj->refcount--;
880 spin_unlock(&obj->iommu_lock);
881 return ERR_PTR(err);
885 * omap_iommu_detach - release iommu device
886 * @obj: target iommu
888 static void omap_iommu_detach(struct omap_iommu *obj)
890 if (!obj || IS_ERR(obj))
891 return;
893 spin_lock(&obj->iommu_lock);
895 if (--obj->refcount == 0)
896 iommu_disable(obj);
898 module_put(obj->owner);
900 obj->iopgd = NULL;
902 spin_unlock(&obj->iommu_lock);
904 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
907 int omap_iommu_set_isr(const char *name,
908 int (*isr)(struct omap_iommu *obj, u32 da, u32 iommu_errs,
909 void *priv),
910 void *isr_priv)
912 struct device *dev;
913 struct omap_iommu *obj;
915 dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
916 device_match_by_alias);
917 if (!dev)
918 return -ENODEV;
920 obj = to_iommu(dev);
921 spin_lock(&obj->iommu_lock);
922 if (obj->refcount != 0) {
923 spin_unlock(&obj->iommu_lock);
924 return -EBUSY;
926 obj->isr = isr;
927 obj->isr_priv = isr_priv;
928 spin_unlock(&obj->iommu_lock);
930 return 0;
932 EXPORT_SYMBOL_GPL(omap_iommu_set_isr);
935 * OMAP Device MMU(IOMMU) detection
937 static int __devinit omap_iommu_probe(struct platform_device *pdev)
939 int err = -ENODEV;
940 int irq;
941 struct omap_iommu *obj;
942 struct resource *res;
943 struct iommu_platform_data *pdata = pdev->dev.platform_data;
945 if (pdev->num_resources != 2)
946 return -EINVAL;
948 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
949 if (!obj)
950 return -ENOMEM;
952 obj->clk = clk_get(&pdev->dev, pdata->clk_name);
953 if (IS_ERR(obj->clk))
954 goto err_clk;
956 obj->nr_tlb_entries = pdata->nr_tlb_entries;
957 obj->name = pdata->name;
958 obj->dev = &pdev->dev;
959 obj->ctx = (void *)obj + sizeof(*obj);
960 obj->da_start = pdata->da_start;
961 obj->da_end = pdata->da_end;
963 spin_lock_init(&obj->iommu_lock);
964 mutex_init(&obj->mmap_lock);
965 spin_lock_init(&obj->page_table_lock);
966 INIT_LIST_HEAD(&obj->mmap);
968 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
969 if (!res) {
970 err = -ENODEV;
971 goto err_mem;
974 res = request_mem_region(res->start, resource_size(res),
975 dev_name(&pdev->dev));
976 if (!res) {
977 err = -EIO;
978 goto err_mem;
981 obj->regbase = ioremap(res->start, resource_size(res));
982 if (!obj->regbase) {
983 err = -ENOMEM;
984 goto err_ioremap;
987 irq = platform_get_irq(pdev, 0);
988 if (irq < 0) {
989 err = -ENODEV;
990 goto err_irq;
992 err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
993 dev_name(&pdev->dev), obj);
994 if (err < 0)
995 goto err_irq;
996 platform_set_drvdata(pdev, obj);
998 dev_info(&pdev->dev, "%s registered\n", obj->name);
999 return 0;
1001 err_irq:
1002 iounmap(obj->regbase);
1003 err_ioremap:
1004 release_mem_region(res->start, resource_size(res));
1005 err_mem:
1006 clk_put(obj->clk);
1007 err_clk:
1008 kfree(obj);
1009 return err;
1012 static int __devexit omap_iommu_remove(struct platform_device *pdev)
1014 int irq;
1015 struct resource *res;
1016 struct omap_iommu *obj = platform_get_drvdata(pdev);
1018 platform_set_drvdata(pdev, NULL);
1020 iopgtable_clear_entry_all(obj);
1022 irq = platform_get_irq(pdev, 0);
1023 free_irq(irq, obj);
1024 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1025 release_mem_region(res->start, resource_size(res));
1026 iounmap(obj->regbase);
1028 clk_put(obj->clk);
1029 dev_info(&pdev->dev, "%s removed\n", obj->name);
1030 kfree(obj);
1031 return 0;
1034 static struct platform_driver omap_iommu_driver = {
1035 .probe = omap_iommu_probe,
1036 .remove = __devexit_p(omap_iommu_remove),
1037 .driver = {
1038 .name = "omap-iommu",
1042 static void iopte_cachep_ctor(void *iopte)
1044 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1047 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1048 phys_addr_t pa, int order, int prot)
1050 struct omap_iommu_domain *omap_domain = domain->priv;
1051 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1052 struct device *dev = oiommu->dev;
1053 size_t bytes = PAGE_SIZE << order;
1054 struct iotlb_entry e;
1055 int omap_pgsz;
1056 u32 ret, flags;
1058 /* we only support mapping a single iommu page for now */
1059 omap_pgsz = bytes_to_iopgsz(bytes);
1060 if (omap_pgsz < 0) {
1061 dev_err(dev, "invalid size to map: %d\n", bytes);
1062 return -EINVAL;
1065 dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
1067 flags = omap_pgsz | prot;
1069 iotlb_init_entry(&e, da, pa, flags);
1071 ret = omap_iopgtable_store_entry(oiommu, &e);
1072 if (ret) {
1073 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
1074 return ret;
1077 return 0;
1080 static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1081 int order)
1083 struct omap_iommu_domain *omap_domain = domain->priv;
1084 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1085 struct device *dev = oiommu->dev;
1086 size_t bytes = PAGE_SIZE << order;
1087 size_t ret;
1089 dev_dbg(dev, "unmapping da 0x%lx size 0x%x\n", da, bytes);
1091 ret = iopgtable_clear_entry(oiommu, da);
1092 if (ret != bytes) {
1093 dev_err(dev, "entry @ 0x%lx was %d; not %d\n", da, ret, bytes);
1094 return -EINVAL;
1097 return 0;
1100 static int
1101 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1103 struct omap_iommu_domain *omap_domain = domain->priv;
1104 struct omap_iommu *oiommu;
1105 int ret = 0;
1107 spin_lock(&omap_domain->lock);
1109 /* only a single device is supported per domain for now */
1110 if (omap_domain->iommu_dev) {
1111 dev_err(dev, "iommu domain is already attached\n");
1112 ret = -EBUSY;
1113 goto out;
1116 /* get a handle to and enable the omap iommu */
1117 oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
1118 if (IS_ERR(oiommu)) {
1119 ret = PTR_ERR(oiommu);
1120 dev_err(dev, "can't get omap iommu: %d\n", ret);
1121 goto out;
1124 omap_domain->iommu_dev = oiommu;
1126 out:
1127 spin_unlock(&omap_domain->lock);
1128 return ret;
1131 static void omap_iommu_detach_dev(struct iommu_domain *domain,
1132 struct device *dev)
1134 struct omap_iommu_domain *omap_domain = domain->priv;
1135 struct omap_iommu *oiommu = to_iommu(dev);
1137 spin_lock(&omap_domain->lock);
1139 /* only a single device is supported per domain for now */
1140 if (omap_domain->iommu_dev != oiommu) {
1141 dev_err(dev, "invalid iommu device\n");
1142 goto out;
1145 iopgtable_clear_entry_all(oiommu);
1147 omap_iommu_detach(oiommu);
1149 omap_domain->iommu_dev = NULL;
1151 out:
1152 spin_unlock(&omap_domain->lock);
1155 static int omap_iommu_domain_init(struct iommu_domain *domain)
1157 struct omap_iommu_domain *omap_domain;
1159 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1160 if (!omap_domain) {
1161 pr_err("kzalloc failed\n");
1162 goto out;
1165 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
1166 if (!omap_domain->pgtable) {
1167 pr_err("kzalloc failed\n");
1168 goto fail_nomem;
1172 * should never fail, but please keep this around to ensure
1173 * we keep the hardware happy
1175 BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
1177 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
1178 spin_lock_init(&omap_domain->lock);
1180 domain->priv = omap_domain;
1182 return 0;
1184 fail_nomem:
1185 kfree(omap_domain);
1186 out:
1187 return -ENOMEM;
1190 /* assume device was already detached */
1191 static void omap_iommu_domain_destroy(struct iommu_domain *domain)
1193 struct omap_iommu_domain *omap_domain = domain->priv;
1195 domain->priv = NULL;
1197 kfree(omap_domain->pgtable);
1198 kfree(omap_domain);
1201 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1202 unsigned long da)
1204 struct omap_iommu_domain *omap_domain = domain->priv;
1205 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1206 struct device *dev = oiommu->dev;
1207 u32 *pgd, *pte;
1208 phys_addr_t ret = 0;
1210 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1212 if (pte) {
1213 if (iopte_is_small(*pte))
1214 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1215 else if (iopte_is_large(*pte))
1216 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1217 else
1218 dev_err(dev, "bogus pte 0x%x", *pte);
1219 } else {
1220 if (iopgd_is_section(*pgd))
1221 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1222 else if (iopgd_is_super(*pgd))
1223 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1224 else
1225 dev_err(dev, "bogus pgd 0x%x", *pgd);
1228 return ret;
1231 static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
1232 unsigned long cap)
1234 return 0;
1237 static struct iommu_ops omap_iommu_ops = {
1238 .domain_init = omap_iommu_domain_init,
1239 .domain_destroy = omap_iommu_domain_destroy,
1240 .attach_dev = omap_iommu_attach_dev,
1241 .detach_dev = omap_iommu_detach_dev,
1242 .map = omap_iommu_map,
1243 .unmap = omap_iommu_unmap,
1244 .iova_to_phys = omap_iommu_iova_to_phys,
1245 .domain_has_cap = omap_iommu_domain_has_cap,
1248 static int __init omap_iommu_init(void)
1250 struct kmem_cache *p;
1251 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1252 size_t align = 1 << 10; /* L2 pagetable alignement */
1254 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1255 iopte_cachep_ctor);
1256 if (!p)
1257 return -ENOMEM;
1258 iopte_cachep = p;
1260 register_iommu(&omap_iommu_ops);
1262 return platform_driver_register(&omap_iommu_driver);
1264 module_init(omap_iommu_init);
1266 static void __exit omap_iommu_exit(void)
1268 kmem_cache_destroy(iopte_cachep);
1270 platform_driver_unregister(&omap_iommu_driver);
1272 module_exit(omap_iommu_exit);
1274 MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1275 MODULE_ALIAS("platform:omap-iommu");
1276 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1277 MODULE_LICENSE("GPL v2");