[S390] drivers/s390/char: Use kmemdup
[linux-2.6/next.git] / arch / arm / plat-omap / iommu.c
blob0e137663349ce59d60fe45a6c5b3408da64ffe08
1 /*
2 * omap iommu: tlb and pagetable primitives
4 * Copyright (C) 2008-2010 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/clk.h>
20 #include <linux/platform_device.h>
22 #include <asm/cacheflush.h>
24 #include <plat/iommu.h>
26 #include "iopgtable.h"
28 /* accommodate the difference between omap1 and omap2/3 */
29 static const struct iommu_functions *arch_iommu;
31 static struct platform_driver omap_iommu_driver;
32 static struct kmem_cache *iopte_cachep;
34 /**
35 * install_iommu_arch - Install archtecure specific iommu functions
36 * @ops: a pointer to architecture specific iommu functions
38 * There are several kind of iommu algorithm(tlb, pagetable) among
39 * omap series. This interface installs such an iommu algorighm.
40 **/
41 int install_iommu_arch(const struct iommu_functions *ops)
43 if (arch_iommu)
44 return -EBUSY;
46 arch_iommu = ops;
47 return 0;
49 EXPORT_SYMBOL_GPL(install_iommu_arch);
51 /**
52 * uninstall_iommu_arch - Uninstall archtecure specific iommu functions
53 * @ops: a pointer to architecture specific iommu functions
55 * This interface uninstalls the iommu algorighm installed previously.
56 **/
57 void uninstall_iommu_arch(const struct iommu_functions *ops)
59 if (arch_iommu != ops)
60 pr_err("%s: not your arch\n", __func__);
62 arch_iommu = NULL;
64 EXPORT_SYMBOL_GPL(uninstall_iommu_arch);
66 /**
67 * iommu_save_ctx - Save registers for pm off-mode support
68 * @obj: target iommu
69 **/
70 void iommu_save_ctx(struct iommu *obj)
72 arch_iommu->save_ctx(obj);
74 EXPORT_SYMBOL_GPL(iommu_save_ctx);
76 /**
77 * iommu_restore_ctx - Restore registers for pm off-mode support
78 * @obj: target iommu
79 **/
80 void iommu_restore_ctx(struct iommu *obj)
82 arch_iommu->restore_ctx(obj);
84 EXPORT_SYMBOL_GPL(iommu_restore_ctx);
86 /**
87 * iommu_arch_version - Return running iommu arch version
88 **/
89 u32 iommu_arch_version(void)
91 return arch_iommu->version;
93 EXPORT_SYMBOL_GPL(iommu_arch_version);
95 static int iommu_enable(struct iommu *obj)
97 int err;
99 if (!obj)
100 return -EINVAL;
102 clk_enable(obj->clk);
104 err = arch_iommu->enable(obj);
106 clk_disable(obj->clk);
107 return err;
110 static void iommu_disable(struct iommu *obj)
112 if (!obj)
113 return;
115 clk_enable(obj->clk);
117 arch_iommu->disable(obj);
119 clk_disable(obj->clk);
123 * TLB operations
125 void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
127 BUG_ON(!cr || !e);
129 arch_iommu->cr_to_e(cr, e);
131 EXPORT_SYMBOL_GPL(iotlb_cr_to_e);
133 static inline int iotlb_cr_valid(struct cr_regs *cr)
135 if (!cr)
136 return -EINVAL;
138 return arch_iommu->cr_valid(cr);
141 static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj,
142 struct iotlb_entry *e)
144 if (!e)
145 return NULL;
147 return arch_iommu->alloc_cr(obj, e);
150 u32 iotlb_cr_to_virt(struct cr_regs *cr)
152 return arch_iommu->cr_to_virt(cr);
154 EXPORT_SYMBOL_GPL(iotlb_cr_to_virt);
156 static u32 get_iopte_attr(struct iotlb_entry *e)
158 return arch_iommu->get_pte_attr(e);
161 static u32 iommu_report_fault(struct iommu *obj, u32 *da)
163 return arch_iommu->fault_isr(obj, da);
166 static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l)
168 u32 val;
170 val = iommu_read_reg(obj, MMU_LOCK);
172 l->base = MMU_LOCK_BASE(val);
173 l->vict = MMU_LOCK_VICT(val);
175 BUG_ON(l->base != 0); /* Currently no preservation is used */
178 static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l)
180 u32 val;
182 BUG_ON(l->base != 0); /* Currently no preservation is used */
184 val = (l->base << MMU_LOCK_BASE_SHIFT);
185 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
187 iommu_write_reg(obj, val, MMU_LOCK);
190 static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr)
192 arch_iommu->tlb_read_cr(obj, cr);
195 static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr)
197 arch_iommu->tlb_load_cr(obj, cr);
199 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
200 iommu_write_reg(obj, 1, MMU_LD_TLB);
204 * iotlb_dump_cr - Dump an iommu tlb entry into buf
205 * @obj: target iommu
206 * @cr: contents of cam and ram register
207 * @buf: output buffer
209 static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr,
210 char *buf)
212 BUG_ON(!cr || !buf);
214 return arch_iommu->dump_cr(obj, cr, buf);
218 * load_iotlb_entry - Set an iommu tlb entry
219 * @obj: target iommu
220 * @e: an iommu tlb entry info
222 int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
224 int i;
225 int err = 0;
226 struct iotlb_lock l;
227 struct cr_regs *cr;
229 if (!obj || !obj->nr_tlb_entries || !e)
230 return -EINVAL;
232 clk_enable(obj->clk);
234 for (i = 0; i < obj->nr_tlb_entries; i++) {
235 struct cr_regs tmp;
237 iotlb_lock_get(obj, &l);
238 l.vict = i;
239 iotlb_lock_set(obj, &l);
240 iotlb_read_cr(obj, &tmp);
241 if (!iotlb_cr_valid(&tmp))
242 break;
245 if (i == obj->nr_tlb_entries) {
246 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
247 err = -EBUSY;
248 goto out;
251 cr = iotlb_alloc_cr(obj, e);
252 if (IS_ERR(cr)) {
253 clk_disable(obj->clk);
254 return PTR_ERR(cr);
257 iotlb_load_cr(obj, cr);
258 kfree(cr);
260 /* increment victim for next tlb load */
261 if (++l.vict == obj->nr_tlb_entries)
262 l.vict = 0;
263 iotlb_lock_set(obj, &l);
264 out:
265 clk_disable(obj->clk);
266 return err;
268 EXPORT_SYMBOL_GPL(load_iotlb_entry);
271 * flush_iotlb_page - Clear an iommu tlb entry
272 * @obj: target iommu
273 * @da: iommu device virtual address
275 * Clear an iommu tlb entry which includes 'da' address.
277 void flush_iotlb_page(struct iommu *obj, u32 da)
279 struct iotlb_lock l;
280 int i;
282 clk_enable(obj->clk);
284 for (i = 0; i < obj->nr_tlb_entries; i++) {
285 struct cr_regs cr;
286 u32 start;
287 size_t bytes;
289 iotlb_lock_get(obj, &l);
290 l.vict = i;
291 iotlb_lock_set(obj, &l);
292 iotlb_read_cr(obj, &cr);
293 if (!iotlb_cr_valid(&cr))
294 continue;
296 start = iotlb_cr_to_virt(&cr);
297 bytes = iopgsz_to_bytes(cr.cam & 3);
299 if ((start <= da) && (da < start + bytes)) {
300 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
301 __func__, start, da, bytes);
302 iotlb_load_cr(obj, &cr);
303 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
306 clk_disable(obj->clk);
308 if (i == obj->nr_tlb_entries)
309 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
311 EXPORT_SYMBOL_GPL(flush_iotlb_page);
314 * flush_iotlb_range - Clear an iommu tlb entries
315 * @obj: target iommu
316 * @start: iommu device virtual address(start)
317 * @end: iommu device virtual address(end)
319 * Clear an iommu tlb entry which includes 'da' address.
321 void flush_iotlb_range(struct iommu *obj, u32 start, u32 end)
323 u32 da = start;
325 while (da < end) {
326 flush_iotlb_page(obj, da);
327 /* FIXME: Optimize for multiple page size */
328 da += IOPTE_SIZE;
331 EXPORT_SYMBOL_GPL(flush_iotlb_range);
334 * flush_iotlb_all - Clear all iommu tlb entries
335 * @obj: target iommu
337 void flush_iotlb_all(struct iommu *obj)
339 struct iotlb_lock l;
341 clk_enable(obj->clk);
343 l.base = 0;
344 l.vict = 0;
345 iotlb_lock_set(obj, &l);
347 iommu_write_reg(obj, 1, MMU_GFLUSH);
349 clk_disable(obj->clk);
351 EXPORT_SYMBOL_GPL(flush_iotlb_all);
353 #if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
355 ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes)
357 if (!obj || !buf)
358 return -EINVAL;
360 clk_enable(obj->clk);
362 bytes = arch_iommu->dump_ctx(obj, buf, bytes);
364 clk_disable(obj->clk);
366 return bytes;
368 EXPORT_SYMBOL_GPL(iommu_dump_ctx);
370 static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num)
372 int i;
373 struct iotlb_lock saved, l;
374 struct cr_regs *p = crs;
376 clk_enable(obj->clk);
378 iotlb_lock_get(obj, &saved);
379 memcpy(&l, &saved, sizeof(saved));
381 for (i = 0; i < num; i++) {
382 struct cr_regs tmp;
384 iotlb_lock_get(obj, &l);
385 l.vict = i;
386 iotlb_lock_set(obj, &l);
387 iotlb_read_cr(obj, &tmp);
388 if (!iotlb_cr_valid(&tmp))
389 continue;
391 *p++ = tmp;
393 iotlb_lock_set(obj, &saved);
394 clk_disable(obj->clk);
396 return p - crs;
400 * dump_tlb_entries - dump cr arrays to given buffer
401 * @obj: target iommu
402 * @buf: output buffer
404 size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes)
406 int i, num;
407 struct cr_regs *cr;
408 char *p = buf;
410 num = bytes / sizeof(*cr);
411 num = min(obj->nr_tlb_entries, num);
413 cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
414 if (!cr)
415 return 0;
417 num = __dump_tlb_entries(obj, cr, num);
418 for (i = 0; i < num; i++)
419 p += iotlb_dump_cr(obj, cr + i, p);
420 kfree(cr);
422 return p - buf;
424 EXPORT_SYMBOL_GPL(dump_tlb_entries);
426 int foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
428 return driver_for_each_device(&omap_iommu_driver.driver,
429 NULL, data, fn);
431 EXPORT_SYMBOL_GPL(foreach_iommu_device);
433 #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
436 * H/W pagetable operations
438 static void flush_iopgd_range(u32 *first, u32 *last)
440 /* FIXME: L2 cache should be taken care of if it exists */
441 do {
442 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
443 : : "r" (first));
444 first += L1_CACHE_BYTES / sizeof(*first);
445 } while (first <= last);
448 static void flush_iopte_range(u32 *first, u32 *last)
450 /* FIXME: L2 cache should be taken care of if it exists */
451 do {
452 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
453 : : "r" (first));
454 first += L1_CACHE_BYTES / sizeof(*first);
455 } while (first <= last);
458 static void iopte_free(u32 *iopte)
460 /* Note: freed iopte's must be clean ready for re-use */
461 kmem_cache_free(iopte_cachep, iopte);
464 static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da)
466 u32 *iopte;
468 /* a table has already existed */
469 if (*iopgd)
470 goto pte_ready;
473 * do the allocation outside the page table lock
475 spin_unlock(&obj->page_table_lock);
476 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
477 spin_lock(&obj->page_table_lock);
479 if (!*iopgd) {
480 if (!iopte)
481 return ERR_PTR(-ENOMEM);
483 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
484 flush_iopgd_range(iopgd, iopgd);
486 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
487 } else {
488 /* We raced, free the reduniovant table */
489 iopte_free(iopte);
492 pte_ready:
493 iopte = iopte_offset(iopgd, da);
495 dev_vdbg(obj->dev,
496 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
497 __func__, da, iopgd, *iopgd, iopte, *iopte);
499 return iopte;
502 static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot)
504 u32 *iopgd = iopgd_offset(obj, da);
506 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
507 flush_iopgd_range(iopgd, iopgd);
508 return 0;
511 static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot)
513 u32 *iopgd = iopgd_offset(obj, da);
514 int i;
516 for (i = 0; i < 16; i++)
517 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
518 flush_iopgd_range(iopgd, iopgd + 15);
519 return 0;
522 static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot)
524 u32 *iopgd = iopgd_offset(obj, da);
525 u32 *iopte = iopte_alloc(obj, iopgd, da);
527 if (IS_ERR(iopte))
528 return PTR_ERR(iopte);
530 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
531 flush_iopte_range(iopte, iopte);
533 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
534 __func__, da, pa, iopte, *iopte);
536 return 0;
539 static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot)
541 u32 *iopgd = iopgd_offset(obj, da);
542 u32 *iopte = iopte_alloc(obj, iopgd, da);
543 int i;
545 if (IS_ERR(iopte))
546 return PTR_ERR(iopte);
548 for (i = 0; i < 16; i++)
549 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
550 flush_iopte_range(iopte, iopte + 15);
551 return 0;
554 static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e)
556 int (*fn)(struct iommu *, u32, u32, u32);
557 u32 prot;
558 int err;
560 if (!obj || !e)
561 return -EINVAL;
563 switch (e->pgsz) {
564 case MMU_CAM_PGSZ_16M:
565 fn = iopgd_alloc_super;
566 break;
567 case MMU_CAM_PGSZ_1M:
568 fn = iopgd_alloc_section;
569 break;
570 case MMU_CAM_PGSZ_64K:
571 fn = iopte_alloc_large;
572 break;
573 case MMU_CAM_PGSZ_4K:
574 fn = iopte_alloc_page;
575 break;
576 default:
577 fn = NULL;
578 BUG();
579 break;
582 prot = get_iopte_attr(e);
584 spin_lock(&obj->page_table_lock);
585 err = fn(obj, e->da, e->pa, prot);
586 spin_unlock(&obj->page_table_lock);
588 return err;
592 * iopgtable_store_entry - Make an iommu pte entry
593 * @obj: target iommu
594 * @e: an iommu tlb entry info
596 int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e)
598 int err;
600 flush_iotlb_page(obj, e->da);
601 err = iopgtable_store_entry_core(obj, e);
602 #ifdef PREFETCH_IOTLB
603 if (!err)
604 load_iotlb_entry(obj, e);
605 #endif
606 return err;
608 EXPORT_SYMBOL_GPL(iopgtable_store_entry);
611 * iopgtable_lookup_entry - Lookup an iommu pte entry
612 * @obj: target iommu
613 * @da: iommu device virtual address
614 * @ppgd: iommu pgd entry pointer to be returned
615 * @ppte: iommu pte entry pointer to be returned
617 void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
619 u32 *iopgd, *iopte = NULL;
621 iopgd = iopgd_offset(obj, da);
622 if (!*iopgd)
623 goto out;
625 if (*iopgd & IOPGD_TABLE)
626 iopte = iopte_offset(iopgd, da);
627 out:
628 *ppgd = iopgd;
629 *ppte = iopte;
631 EXPORT_SYMBOL_GPL(iopgtable_lookup_entry);
633 static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da)
635 size_t bytes;
636 u32 *iopgd = iopgd_offset(obj, da);
637 int nent = 1;
639 if (!*iopgd)
640 return 0;
642 if (*iopgd & IOPGD_TABLE) {
643 int i;
644 u32 *iopte = iopte_offset(iopgd, da);
646 bytes = IOPTE_SIZE;
647 if (*iopte & IOPTE_LARGE) {
648 nent *= 16;
649 /* rewind to the 1st entry */
650 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
652 bytes *= nent;
653 memset(iopte, 0, nent * sizeof(*iopte));
654 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
657 * do table walk to check if this table is necessary or not
659 iopte = iopte_offset(iopgd, 0);
660 for (i = 0; i < PTRS_PER_IOPTE; i++)
661 if (iopte[i])
662 goto out;
664 iopte_free(iopte);
665 nent = 1; /* for the next L1 entry */
666 } else {
667 bytes = IOPGD_SIZE;
668 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
669 nent *= 16;
670 /* rewind to the 1st entry */
671 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
673 bytes *= nent;
675 memset(iopgd, 0, nent * sizeof(*iopgd));
676 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
677 out:
678 return bytes;
682 * iopgtable_clear_entry - Remove an iommu pte entry
683 * @obj: target iommu
684 * @da: iommu device virtual address
686 size_t iopgtable_clear_entry(struct iommu *obj, u32 da)
688 size_t bytes;
690 spin_lock(&obj->page_table_lock);
692 bytes = iopgtable_clear_entry_core(obj, da);
693 flush_iotlb_page(obj, da);
695 spin_unlock(&obj->page_table_lock);
697 return bytes;
699 EXPORT_SYMBOL_GPL(iopgtable_clear_entry);
701 static void iopgtable_clear_entry_all(struct iommu *obj)
703 int i;
705 spin_lock(&obj->page_table_lock);
707 for (i = 0; i < PTRS_PER_IOPGD; i++) {
708 u32 da;
709 u32 *iopgd;
711 da = i << IOPGD_SHIFT;
712 iopgd = iopgd_offset(obj, da);
714 if (!*iopgd)
715 continue;
717 if (*iopgd & IOPGD_TABLE)
718 iopte_free(iopte_offset(iopgd, 0));
720 *iopgd = 0;
721 flush_iopgd_range(iopgd, iopgd);
724 flush_iotlb_all(obj);
726 spin_unlock(&obj->page_table_lock);
730 * Device IOMMU generic operations
732 static irqreturn_t iommu_fault_handler(int irq, void *data)
734 u32 stat, da;
735 u32 *iopgd, *iopte;
736 int err = -EIO;
737 struct iommu *obj = data;
739 if (!obj->refcount)
740 return IRQ_NONE;
742 /* Dynamic loading TLB or PTE */
743 if (obj->isr)
744 err = obj->isr(obj);
746 if (!err)
747 return IRQ_HANDLED;
749 clk_enable(obj->clk);
750 stat = iommu_report_fault(obj, &da);
751 clk_disable(obj->clk);
752 if (!stat)
753 return IRQ_HANDLED;
755 iopgd = iopgd_offset(obj, da);
757 if (!(*iopgd & IOPGD_TABLE)) {
758 dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x\n", __func__,
759 da, iopgd, *iopgd);
760 return IRQ_NONE;
763 iopte = iopte_offset(iopgd, da);
765 dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
766 __func__, da, iopgd, *iopgd, iopte, *iopte);
768 return IRQ_NONE;
771 static int device_match_by_alias(struct device *dev, void *data)
773 struct iommu *obj = to_iommu(dev);
774 const char *name = data;
776 pr_debug("%s: %s %s\n", __func__, obj->name, name);
778 return strcmp(obj->name, name) == 0;
782 * iommu_get - Get iommu handler
783 * @name: target iommu name
785 struct iommu *iommu_get(const char *name)
787 int err = -ENOMEM;
788 struct device *dev;
789 struct iommu *obj;
791 dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
792 device_match_by_alias);
793 if (!dev)
794 return ERR_PTR(-ENODEV);
796 obj = to_iommu(dev);
798 mutex_lock(&obj->iommu_lock);
800 if (obj->refcount++ == 0) {
801 err = iommu_enable(obj);
802 if (err)
803 goto err_enable;
804 flush_iotlb_all(obj);
807 if (!try_module_get(obj->owner))
808 goto err_module;
810 mutex_unlock(&obj->iommu_lock);
812 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
813 return obj;
815 err_module:
816 if (obj->refcount == 1)
817 iommu_disable(obj);
818 err_enable:
819 obj->refcount--;
820 mutex_unlock(&obj->iommu_lock);
821 return ERR_PTR(err);
823 EXPORT_SYMBOL_GPL(iommu_get);
826 * iommu_put - Put back iommu handler
827 * @obj: target iommu
829 void iommu_put(struct iommu *obj)
831 if (!obj || IS_ERR(obj))
832 return;
834 mutex_lock(&obj->iommu_lock);
836 if (--obj->refcount == 0)
837 iommu_disable(obj);
839 module_put(obj->owner);
841 mutex_unlock(&obj->iommu_lock);
843 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
845 EXPORT_SYMBOL_GPL(iommu_put);
848 * OMAP Device MMU(IOMMU) detection
850 static int __devinit omap_iommu_probe(struct platform_device *pdev)
852 int err = -ENODEV;
853 void *p;
854 int irq;
855 struct iommu *obj;
856 struct resource *res;
857 struct iommu_platform_data *pdata = pdev->dev.platform_data;
859 if (pdev->num_resources != 2)
860 return -EINVAL;
862 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
863 if (!obj)
864 return -ENOMEM;
866 obj->clk = clk_get(&pdev->dev, pdata->clk_name);
867 if (IS_ERR(obj->clk))
868 goto err_clk;
870 obj->nr_tlb_entries = pdata->nr_tlb_entries;
871 obj->name = pdata->name;
872 obj->dev = &pdev->dev;
873 obj->ctx = (void *)obj + sizeof(*obj);
875 mutex_init(&obj->iommu_lock);
876 mutex_init(&obj->mmap_lock);
877 spin_lock_init(&obj->page_table_lock);
878 INIT_LIST_HEAD(&obj->mmap);
880 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
881 if (!res) {
882 err = -ENODEV;
883 goto err_mem;
885 obj->regbase = ioremap(res->start, resource_size(res));
886 if (!obj->regbase) {
887 err = -ENOMEM;
888 goto err_mem;
891 res = request_mem_region(res->start, resource_size(res),
892 dev_name(&pdev->dev));
893 if (!res) {
894 err = -EIO;
895 goto err_mem;
898 irq = platform_get_irq(pdev, 0);
899 if (irq < 0) {
900 err = -ENODEV;
901 goto err_irq;
903 err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
904 dev_name(&pdev->dev), obj);
905 if (err < 0)
906 goto err_irq;
907 platform_set_drvdata(pdev, obj);
909 p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE));
910 if (!p) {
911 err = -ENOMEM;
912 goto err_pgd;
914 memset(p, 0, IOPGD_TABLE_SIZE);
915 clean_dcache_area(p, IOPGD_TABLE_SIZE);
916 obj->iopgd = p;
918 BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE));
920 dev_info(&pdev->dev, "%s registered\n", obj->name);
921 return 0;
923 err_pgd:
924 free_irq(irq, obj);
925 err_irq:
926 release_mem_region(res->start, resource_size(res));
927 iounmap(obj->regbase);
928 err_mem:
929 clk_put(obj->clk);
930 err_clk:
931 kfree(obj);
932 return err;
935 static int __devexit omap_iommu_remove(struct platform_device *pdev)
937 int irq;
938 struct resource *res;
939 struct iommu *obj = platform_get_drvdata(pdev);
941 platform_set_drvdata(pdev, NULL);
943 iopgtable_clear_entry_all(obj);
944 free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE));
946 irq = platform_get_irq(pdev, 0);
947 free_irq(irq, obj);
948 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
949 release_mem_region(res->start, resource_size(res));
950 iounmap(obj->regbase);
952 clk_put(obj->clk);
953 dev_info(&pdev->dev, "%s removed\n", obj->name);
954 kfree(obj);
955 return 0;
958 static struct platform_driver omap_iommu_driver = {
959 .probe = omap_iommu_probe,
960 .remove = __devexit_p(omap_iommu_remove),
961 .driver = {
962 .name = "omap-iommu",
966 static void iopte_cachep_ctor(void *iopte)
968 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
971 static int __init omap_iommu_init(void)
973 struct kmem_cache *p;
974 const unsigned long flags = SLAB_HWCACHE_ALIGN;
975 size_t align = 1 << 10; /* L2 pagetable alignement */
977 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
978 iopte_cachep_ctor);
979 if (!p)
980 return -ENOMEM;
981 iopte_cachep = p;
983 return platform_driver_register(&omap_iommu_driver);
985 module_init(omap_iommu_init);
987 static void __exit omap_iommu_exit(void)
989 kmem_cache_destroy(iopte_cachep);
991 platform_driver_unregister(&omap_iommu_driver);
993 module_exit(omap_iommu_exit);
995 MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
996 MODULE_ALIAS("platform:omap-iommu");
997 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
998 MODULE_LICENSE("GPL v2");