Merge remote-tracking branch 's5p/for-next'
[linux-2.6/next.git] / arch / arm / plat-omap / iovmm.c
blob79e7fedb8602ae1a50bd704921ad432a878f9765
1 /*
2 * omap iommu: simple virtual address space management
4 * Copyright (C) 2008-2009 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/device.h>
17 #include <linux/scatterlist.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mach/map.h>
22 #include <plat/iommu.h>
23 #include <plat/iovmm.h>
25 #include "iopgtable.h"
28 * A device driver needs to create address mappings between:
30 * - iommu/device address
31 * - physical address
32 * - mpu virtual address
34 * There are 4 possible patterns for them:
36 * |iova/ mapping iommu_ page
37 * | da pa va (d)-(p)-(v) function type
38 * ---------------------------------------------------------------------------
39 * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
40 * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
41 * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
42 * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
45 * 'iova': device iommu virtual address
46 * 'da': alias of 'iova'
47 * 'pa': physical address
48 * 'va': mpu virtual address
50 * 'c': contiguous memory area
51 * 'd': discontiguous memory area
52 * 'a': anonymous memory allocation
53 * '()': optional feature
55 * 'n': a normal page(4KB) size is used.
56 * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
58 * '*': not yet, but feasible.
61 static struct kmem_cache *iovm_area_cachep;
63 /* return total bytes of sg buffers */
64 static size_t sgtable_len(const struct sg_table *sgt)
66 unsigned int i, total = 0;
67 struct scatterlist *sg;
69 if (!sgt)
70 return 0;
72 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
73 size_t bytes;
75 bytes = sg->length;
77 if (!iopgsz_ok(bytes)) {
78 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
79 __func__, i, bytes);
80 return 0;
83 total += bytes;
86 return total;
88 #define sgtable_ok(x) (!!sgtable_len(x))
90 static unsigned max_alignment(u32 addr)
92 int i;
93 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
94 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
96 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
100 * calculate the optimal number sg elements from total bytes based on
101 * iommu superpages
103 static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
105 unsigned nr_entries = 0, ent_sz;
107 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
108 pr_err("%s: wrong size %08x\n", __func__, bytes);
109 return 0;
112 while (bytes) {
113 ent_sz = max_alignment(da | pa);
114 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
115 nr_entries++;
116 da += ent_sz;
117 pa += ent_sz;
118 bytes -= ent_sz;
121 return nr_entries;
124 /* allocate and initialize sg_table header(a kind of 'superblock') */
125 static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
126 u32 da, u32 pa)
128 unsigned int nr_entries;
129 int err;
130 struct sg_table *sgt;
132 if (!bytes)
133 return ERR_PTR(-EINVAL);
135 if (!IS_ALIGNED(bytes, PAGE_SIZE))
136 return ERR_PTR(-EINVAL);
138 if (flags & IOVMF_LINEAR) {
139 nr_entries = sgtable_nents(bytes, da, pa);
140 if (!nr_entries)
141 return ERR_PTR(-EINVAL);
142 } else
143 nr_entries = bytes / PAGE_SIZE;
145 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
146 if (!sgt)
147 return ERR_PTR(-ENOMEM);
149 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
150 if (err) {
151 kfree(sgt);
152 return ERR_PTR(err);
155 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
157 return sgt;
160 /* free sg_table header(a kind of superblock) */
161 static void sgtable_free(struct sg_table *sgt)
163 if (!sgt)
164 return;
166 sg_free_table(sgt);
167 kfree(sgt);
169 pr_debug("%s: sgt:%p\n", __func__, sgt);
172 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
173 static void *vmap_sg(const struct sg_table *sgt)
175 u32 va;
176 size_t total;
177 unsigned int i;
178 struct scatterlist *sg;
179 struct vm_struct *new;
180 const struct mem_type *mtype;
182 mtype = get_mem_type(MT_DEVICE);
183 if (!mtype)
184 return ERR_PTR(-EINVAL);
186 total = sgtable_len(sgt);
187 if (!total)
188 return ERR_PTR(-EINVAL);
190 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
191 if (!new)
192 return ERR_PTR(-ENOMEM);
193 va = (u32)new->addr;
195 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
196 size_t bytes;
197 u32 pa;
198 int err;
200 pa = sg_phys(sg);
201 bytes = sg->length;
203 BUG_ON(bytes != PAGE_SIZE);
205 err = ioremap_page(va, pa, mtype);
206 if (err)
207 goto err_out;
209 va += bytes;
212 flush_cache_vmap((unsigned long)new->addr,
213 (unsigned long)(new->addr + total));
214 return new->addr;
216 err_out:
217 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
218 vunmap(new->addr);
219 return ERR_PTR(-EAGAIN);
222 static inline void vunmap_sg(const void *va)
224 vunmap(va);
227 static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
229 struct iovm_struct *tmp;
231 list_for_each_entry(tmp, &obj->mmap, list) {
232 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
233 size_t len;
235 len = tmp->da_end - tmp->da_start;
237 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
238 __func__, tmp->da_start, da, tmp->da_end, len,
239 tmp->flags);
241 return tmp;
245 return NULL;
249 * find_iovm_area - find iovma which includes @da
250 * @da: iommu device virtual address
252 * Find the existing iovma starting at @da
254 struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
256 struct iovm_struct *area;
258 mutex_lock(&obj->mmap_lock);
259 area = __find_iovm_area(obj, da);
260 mutex_unlock(&obj->mmap_lock);
262 return area;
264 EXPORT_SYMBOL_GPL(find_iovm_area);
267 * This finds the hole(area) which fits the requested address and len
268 * in iovmas mmap, and returns the new allocated iovma.
270 static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
271 size_t bytes, u32 flags)
273 struct iovm_struct *new, *tmp;
274 u32 start, prev_end, alignment;
276 if (!obj || !bytes)
277 return ERR_PTR(-EINVAL);
279 start = da;
280 alignment = PAGE_SIZE;
282 if (~flags & IOVMF_DA_FIXED) {
283 /* Don't map address 0 */
284 start = obj->da_start ? obj->da_start : alignment;
286 if (flags & IOVMF_LINEAR)
287 alignment = iopgsz_max(bytes);
288 start = roundup(start, alignment);
289 } else if (start < obj->da_start || start > obj->da_end ||
290 obj->da_end - start < bytes) {
291 return ERR_PTR(-EINVAL);
294 tmp = NULL;
295 if (list_empty(&obj->mmap))
296 goto found;
298 prev_end = 0;
299 list_for_each_entry(tmp, &obj->mmap, list) {
301 if (prev_end > start)
302 break;
304 if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
305 goto found;
307 if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
308 start = roundup(tmp->da_end + 1, alignment);
310 prev_end = tmp->da_end;
313 if ((start >= prev_end) && (obj->da_end - start >= bytes))
314 goto found;
316 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
317 __func__, da, bytes, flags);
319 return ERR_PTR(-EINVAL);
321 found:
322 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
323 if (!new)
324 return ERR_PTR(-ENOMEM);
326 new->iommu = obj;
327 new->da_start = start;
328 new->da_end = start + bytes;
329 new->flags = flags;
332 * keep ascending order of iovmas
334 if (tmp)
335 list_add_tail(&new->list, &tmp->list);
336 else
337 list_add(&new->list, &obj->mmap);
339 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
340 __func__, new->da_start, start, new->da_end, bytes, flags);
342 return new;
345 static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
347 size_t bytes;
349 BUG_ON(!obj || !area);
351 bytes = area->da_end - area->da_start;
353 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
354 __func__, area->da_start, area->da_end, bytes, area->flags);
356 list_del(&area->list);
357 kmem_cache_free(iovm_area_cachep, area);
361 * da_to_va - convert (d) to (v)
362 * @obj: objective iommu
363 * @da: iommu device virtual address
364 * @va: mpu virtual address
366 * Returns mpu virtual addr which corresponds to a given device virtual addr
368 void *da_to_va(struct iommu *obj, u32 da)
370 void *va = NULL;
371 struct iovm_struct *area;
373 mutex_lock(&obj->mmap_lock);
375 area = __find_iovm_area(obj, da);
376 if (!area) {
377 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
378 goto out;
380 va = area->va;
381 out:
382 mutex_unlock(&obj->mmap_lock);
384 return va;
386 EXPORT_SYMBOL_GPL(da_to_va);
388 static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
390 unsigned int i;
391 struct scatterlist *sg;
392 void *va = _va;
393 void *va_end;
395 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
396 struct page *pg;
397 const size_t bytes = PAGE_SIZE;
400 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
402 pg = vmalloc_to_page(va);
403 BUG_ON(!pg);
404 sg_set_page(sg, pg, bytes, 0);
406 va += bytes;
409 va_end = _va + PAGE_SIZE * i;
412 static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
415 * Actually this is not necessary at all, just exists for
416 * consistency of the code readability.
418 BUG_ON(!sgt);
421 static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
422 size_t len)
424 unsigned int i;
425 struct scatterlist *sg;
427 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
428 unsigned bytes;
430 bytes = max_alignment(da | pa);
431 bytes = min_t(unsigned, bytes, iopgsz_max(len));
433 BUG_ON(!iopgsz_ok(bytes));
435 sg_set_buf(sg, phys_to_virt(pa), bytes);
437 * 'pa' is cotinuous(linear).
439 pa += bytes;
440 da += bytes;
441 len -= bytes;
443 BUG_ON(len);
446 static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
449 * Actually this is not necessary at all, just exists for
450 * consistency of the code readability
452 BUG_ON(!sgt);
455 /* create 'da' <-> 'pa' mapping from 'sgt' */
456 static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
457 const struct sg_table *sgt, u32 flags)
459 int err;
460 unsigned int i, j;
461 struct scatterlist *sg;
462 u32 da = new->da_start;
464 if (!obj || !sgt)
465 return -EINVAL;
467 BUG_ON(!sgtable_ok(sgt));
469 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
470 u32 pa;
471 int pgsz;
472 size_t bytes;
473 struct iotlb_entry e;
475 pa = sg_phys(sg);
476 bytes = sg->length;
478 flags &= ~IOVMF_PGSZ_MASK;
479 pgsz = bytes_to_iopgsz(bytes);
480 if (pgsz < 0)
481 goto err_out;
482 flags |= pgsz;
484 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
485 i, da, pa, bytes);
487 iotlb_init_entry(&e, da, pa, flags);
488 err = iopgtable_store_entry(obj, &e);
489 if (err)
490 goto err_out;
492 da += bytes;
494 return 0;
496 err_out:
497 da = new->da_start;
499 for_each_sg(sgt->sgl, sg, i, j) {
500 size_t bytes;
502 bytes = iopgtable_clear_entry(obj, da);
504 BUG_ON(!iopgsz_ok(bytes));
506 da += bytes;
508 return err;
511 /* release 'da' <-> 'pa' mapping */
512 static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
514 u32 start;
515 size_t total = area->da_end - area->da_start;
517 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
519 start = area->da_start;
520 while (total > 0) {
521 size_t bytes;
523 bytes = iopgtable_clear_entry(obj, start);
524 if (bytes == 0)
525 bytes = PAGE_SIZE;
526 else
527 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
528 __func__, start, bytes, area->flags);
530 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
532 total -= bytes;
533 start += bytes;
535 BUG_ON(total);
538 /* template function for all unmapping */
539 static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
540 void (*fn)(const void *), u32 flags)
542 struct sg_table *sgt = NULL;
543 struct iovm_struct *area;
545 if (!IS_ALIGNED(da, PAGE_SIZE)) {
546 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
547 return NULL;
550 mutex_lock(&obj->mmap_lock);
552 area = __find_iovm_area(obj, da);
553 if (!area) {
554 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
555 goto out;
558 if ((area->flags & flags) != flags) {
559 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
560 area->flags);
561 goto out;
563 sgt = (struct sg_table *)area->sgt;
565 unmap_iovm_area(obj, area);
567 fn(area->va);
569 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
570 area->da_start, da, area->da_end,
571 area->da_end - area->da_start, area->flags);
573 free_iovm_area(obj, area);
574 out:
575 mutex_unlock(&obj->mmap_lock);
577 return sgt;
580 static u32 map_iommu_region(struct iommu *obj, u32 da,
581 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
583 int err = -ENOMEM;
584 struct iovm_struct *new;
586 mutex_lock(&obj->mmap_lock);
588 new = alloc_iovm_area(obj, da, bytes, flags);
589 if (IS_ERR(new)) {
590 err = PTR_ERR(new);
591 goto err_alloc_iovma;
593 new->va = va;
594 new->sgt = sgt;
596 if (map_iovm_area(obj, new, sgt, new->flags))
597 goto err_map;
599 mutex_unlock(&obj->mmap_lock);
601 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
602 __func__, new->da_start, bytes, new->flags, va);
604 return new->da_start;
606 err_map:
607 free_iovm_area(obj, new);
608 err_alloc_iovma:
609 mutex_unlock(&obj->mmap_lock);
610 return err;
613 static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
614 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
616 return map_iommu_region(obj, da, sgt, va, bytes, flags);
620 * iommu_vmap - (d)-(p)-(v) address mapper
621 * @obj: objective iommu
622 * @sgt: address of scatter gather table
623 * @flags: iovma and page property
625 * Creates 1-n-1 mapping with given @sgt and returns @da.
626 * All @sgt element must be io page size aligned.
628 u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
629 u32 flags)
631 size_t bytes;
632 void *va = NULL;
634 if (!obj || !obj->dev || !sgt)
635 return -EINVAL;
637 bytes = sgtable_len(sgt);
638 if (!bytes)
639 return -EINVAL;
640 bytes = PAGE_ALIGN(bytes);
642 if (flags & IOVMF_MMIO) {
643 va = vmap_sg(sgt);
644 if (IS_ERR(va))
645 return PTR_ERR(va);
648 flags |= IOVMF_DISCONT;
649 flags |= IOVMF_MMIO;
651 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
652 if (IS_ERR_VALUE(da))
653 vunmap_sg(va);
655 return da;
657 EXPORT_SYMBOL_GPL(iommu_vmap);
660 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
661 * @obj: objective iommu
662 * @da: iommu device virtual address
664 * Free the iommu virtually contiguous memory area starting at
665 * @da, which was returned by 'iommu_vmap()'.
667 struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
669 struct sg_table *sgt;
671 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
672 * Just returns 'sgt' to the caller to free
674 sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
675 if (!sgt)
676 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
677 return sgt;
679 EXPORT_SYMBOL_GPL(iommu_vunmap);
682 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
683 * @obj: objective iommu
684 * @da: contiguous iommu virtual memory
685 * @bytes: allocation size
686 * @flags: iovma and page property
688 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
689 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
691 u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
693 void *va;
694 struct sg_table *sgt;
696 if (!obj || !obj->dev || !bytes)
697 return -EINVAL;
699 bytes = PAGE_ALIGN(bytes);
701 va = vmalloc(bytes);
702 if (!va)
703 return -ENOMEM;
705 flags |= IOVMF_DISCONT;
706 flags |= IOVMF_ALLOC;
708 sgt = sgtable_alloc(bytes, flags, da, 0);
709 if (IS_ERR(sgt)) {
710 da = PTR_ERR(sgt);
711 goto err_sgt_alloc;
713 sgtable_fill_vmalloc(sgt, va);
715 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
716 if (IS_ERR_VALUE(da))
717 goto err_iommu_vmap;
719 return da;
721 err_iommu_vmap:
722 sgtable_drain_vmalloc(sgt);
723 sgtable_free(sgt);
724 err_sgt_alloc:
725 vfree(va);
726 return da;
728 EXPORT_SYMBOL_GPL(iommu_vmalloc);
731 * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
732 * @obj: objective iommu
733 * @da: iommu device virtual address
735 * Frees the iommu virtually continuous memory area starting at
736 * @da, as obtained from 'iommu_vmalloc()'.
738 void iommu_vfree(struct iommu *obj, const u32 da)
740 struct sg_table *sgt;
742 sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
743 if (!sgt)
744 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
745 sgtable_free(sgt);
747 EXPORT_SYMBOL_GPL(iommu_vfree);
749 static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
750 size_t bytes, u32 flags)
752 struct sg_table *sgt;
754 sgt = sgtable_alloc(bytes, flags, da, pa);
755 if (IS_ERR(sgt))
756 return PTR_ERR(sgt);
758 sgtable_fill_kmalloc(sgt, pa, da, bytes);
760 da = map_iommu_region(obj, da, sgt, va, bytes, flags);
761 if (IS_ERR_VALUE(da)) {
762 sgtable_drain_kmalloc(sgt);
763 sgtable_free(sgt);
766 return da;
770 * iommu_kmap - (d)-(p)-(v) address mapper
771 * @obj: objective iommu
772 * @da: contiguous iommu virtual memory
773 * @pa: contiguous physical memory
774 * @flags: iovma and page property
776 * Creates 1-1-1 mapping and returns @da again, which can be
777 * adjusted if 'IOVMF_DA_FIXED' is not set.
779 u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
780 u32 flags)
782 void *va;
784 if (!obj || !obj->dev || !bytes)
785 return -EINVAL;
787 bytes = PAGE_ALIGN(bytes);
789 va = ioremap(pa, bytes);
790 if (!va)
791 return -ENOMEM;
793 flags |= IOVMF_LINEAR;
794 flags |= IOVMF_MMIO;
796 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
797 if (IS_ERR_VALUE(da))
798 iounmap(va);
800 return da;
802 EXPORT_SYMBOL_GPL(iommu_kmap);
805 * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
806 * @obj: objective iommu
807 * @da: iommu device virtual address
809 * Frees the iommu virtually contiguous memory area starting at
810 * @da, which was passed to and was returned by'iommu_kmap()'.
812 void iommu_kunmap(struct iommu *obj, u32 da)
814 struct sg_table *sgt;
815 typedef void (*func_t)(const void *);
817 sgt = unmap_vm_area(obj, da, (func_t)iounmap,
818 IOVMF_LINEAR | IOVMF_MMIO);
819 if (!sgt)
820 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
821 sgtable_free(sgt);
823 EXPORT_SYMBOL_GPL(iommu_kunmap);
826 * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
827 * @obj: objective iommu
828 * @da: contiguous iommu virtual memory
829 * @bytes: bytes for allocation
830 * @flags: iovma and page property
832 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
833 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
835 u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
837 void *va;
838 u32 pa;
840 if (!obj || !obj->dev || !bytes)
841 return -EINVAL;
843 bytes = PAGE_ALIGN(bytes);
845 va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
846 if (!va)
847 return -ENOMEM;
848 pa = virt_to_phys(va);
850 flags |= IOVMF_LINEAR;
851 flags |= IOVMF_ALLOC;
853 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
854 if (IS_ERR_VALUE(da))
855 kfree(va);
857 return da;
859 EXPORT_SYMBOL_GPL(iommu_kmalloc);
862 * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
863 * @obj: objective iommu
864 * @da: iommu device virtual address
866 * Frees the iommu virtually contiguous memory area starting at
867 * @da, which was passed to and was returned by'iommu_kmalloc()'.
869 void iommu_kfree(struct iommu *obj, u32 da)
871 struct sg_table *sgt;
873 sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
874 if (!sgt)
875 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
876 sgtable_free(sgt);
878 EXPORT_SYMBOL_GPL(iommu_kfree);
881 static int __init iovmm_init(void)
883 const unsigned long flags = SLAB_HWCACHE_ALIGN;
884 struct kmem_cache *p;
886 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
887 flags, NULL);
888 if (!p)
889 return -ENOMEM;
890 iovm_area_cachep = p;
892 return 0;
894 module_init(iovmm_init);
896 static void __exit iovmm_exit(void)
898 kmem_cache_destroy(iovm_area_cachep);
900 module_exit(iovmm_exit);
902 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
903 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
904 MODULE_LICENSE("GPL v2");