fed up with those stupid warnings
[mmotm.git] / arch / arm / plat-omap / iovmm.c
blobdc3fac3dd0eac8c40787ea8e3006e6a59ce19e80
1 /*
2 * omap iommu: simple virtual address space management
4 * Copyright (C) 2008-2009 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/err.h>
14 #include <linux/vmalloc.h>
15 #include <linux/device.h>
16 #include <linux/scatterlist.h>
18 #include <asm/cacheflush.h>
19 #include <asm/mach/map.h>
21 #include <mach/iommu.h>
22 #include <mach/iovmm.h>
24 #include "iopgtable.h"
27 * A device driver needs to create address mappings between:
29 * - iommu/device address
30 * - physical address
31 * - mpu virtual address
33 * There are 4 possible patterns for them:
35 * |iova/ mapping iommu_ page
36 * | da pa va (d)-(p)-(v) function type
37 * ---------------------------------------------------------------------------
38 * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
39 * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
40 * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
41 * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
44 * 'iova': device iommu virtual address
45 * 'da': alias of 'iova'
46 * 'pa': physical address
47 * 'va': mpu virtual address
49 * 'c': contiguous memory area
50 * 'd': discontiguous memory area
51 * 'a': anonymous memory allocation
52 * '()': optional feature
54 * 'n': a normal page(4KB) size is used.
55 * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
57 * '*': not yet, but feasible.
60 static struct kmem_cache *iovm_area_cachep;
62 /* return total bytes of sg buffers */
63 static size_t sgtable_len(const struct sg_table *sgt)
65 unsigned int i, total = 0;
66 struct scatterlist *sg;
68 if (!sgt)
69 return 0;
71 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
72 size_t bytes;
74 bytes = sg_dma_len(sg);
76 if (!iopgsz_ok(bytes)) {
77 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
78 __func__, i, bytes);
79 return 0;
82 total += bytes;
85 return total;
87 #define sgtable_ok(x) (!!sgtable_len(x))
90 * calculate the optimal number sg elements from total bytes based on
91 * iommu superpages
93 static unsigned int sgtable_nents(size_t bytes)
95 int i;
96 unsigned int nr_entries;
97 const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
99 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
100 pr_err("%s: wrong size %08x\n", __func__, bytes);
101 return 0;
104 nr_entries = 0;
105 for (i = 0; i < ARRAY_SIZE(pagesize); i++) {
106 if (bytes >= pagesize[i]) {
107 nr_entries += (bytes / pagesize[i]);
108 bytes %= pagesize[i];
111 BUG_ON(bytes);
113 return nr_entries;
116 /* allocate and initialize sg_table header(a kind of 'superblock') */
117 static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
119 unsigned int nr_entries;
120 int err;
121 struct sg_table *sgt;
123 if (!bytes)
124 return ERR_PTR(-EINVAL);
126 if (!IS_ALIGNED(bytes, PAGE_SIZE))
127 return ERR_PTR(-EINVAL);
129 /* FIXME: IOVMF_DA_FIXED should support 'superpages' */
130 if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) {
131 nr_entries = sgtable_nents(bytes);
132 if (!nr_entries)
133 return ERR_PTR(-EINVAL);
134 } else
135 nr_entries = bytes / PAGE_SIZE;
137 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
138 if (!sgt)
139 return ERR_PTR(-ENOMEM);
141 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
142 if (err)
143 return ERR_PTR(err);
145 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
147 return sgt;
150 /* free sg_table header(a kind of superblock) */
151 static void sgtable_free(struct sg_table *sgt)
153 if (!sgt)
154 return;
156 sg_free_table(sgt);
157 kfree(sgt);
159 pr_debug("%s: sgt:%p\n", __func__, sgt);
162 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
163 static void *vmap_sg(const struct sg_table *sgt)
165 u32 va;
166 size_t total;
167 unsigned int i;
168 struct scatterlist *sg;
169 struct vm_struct *new;
170 const struct mem_type *mtype;
172 mtype = get_mem_type(MT_DEVICE);
173 if (!mtype)
174 return ERR_PTR(-EINVAL);
176 total = sgtable_len(sgt);
177 if (!total)
178 return ERR_PTR(-EINVAL);
180 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
181 if (!new)
182 return ERR_PTR(-ENOMEM);
183 va = (u32)new->addr;
185 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
186 size_t bytes;
187 u32 pa;
188 int err;
190 pa = sg_phys(sg);
191 bytes = sg_dma_len(sg);
193 BUG_ON(bytes != PAGE_SIZE);
195 err = ioremap_page(va, pa, mtype);
196 if (err)
197 goto err_out;
199 va += bytes;
202 flush_cache_vmap((unsigned long)new->addr,
203 (unsigned long)(new->addr + total));
204 return new->addr;
206 err_out:
207 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
208 vunmap(new->addr);
209 return ERR_PTR(-EAGAIN);
212 static inline void vunmap_sg(const void *va)
214 vunmap(va);
217 static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
219 struct iovm_struct *tmp;
221 list_for_each_entry(tmp, &obj->mmap, list) {
222 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
223 size_t len;
225 len = tmp->da_end - tmp->da_start;
227 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
228 __func__, tmp->da_start, da, tmp->da_end, len,
229 tmp->flags);
231 return tmp;
235 return NULL;
239 * find_iovm_area - find iovma which includes @da
240 * @da: iommu device virtual address
242 * Find the existing iovma starting at @da
244 struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
246 struct iovm_struct *area;
248 mutex_lock(&obj->mmap_lock);
249 area = __find_iovm_area(obj, da);
250 mutex_unlock(&obj->mmap_lock);
252 return area;
254 EXPORT_SYMBOL_GPL(find_iovm_area);
257 * This finds the hole(area) which fits the requested address and len
258 * in iovmas mmap, and returns the new allocated iovma.
260 static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
261 size_t bytes, u32 flags)
263 struct iovm_struct *new, *tmp;
264 u32 start, prev_end, alignement;
266 if (!obj || !bytes)
267 return ERR_PTR(-EINVAL);
269 start = da;
270 alignement = PAGE_SIZE;
272 if (flags & IOVMF_DA_ANON) {
274 * Reserve the first page for NULL
276 start = PAGE_SIZE;
277 if (flags & IOVMF_LINEAR)
278 alignement = iopgsz_max(bytes);
279 start = roundup(start, alignement);
282 tmp = NULL;
283 if (list_empty(&obj->mmap))
284 goto found;
286 prev_end = 0;
287 list_for_each_entry(tmp, &obj->mmap, list) {
289 if ((prev_end <= start) && (start + bytes < tmp->da_start))
290 goto found;
292 if (flags & IOVMF_DA_ANON)
293 start = roundup(tmp->da_end, alignement);
295 prev_end = tmp->da_end;
298 if ((start >= prev_end) && (ULONG_MAX - start >= bytes))
299 goto found;
301 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
302 __func__, da, bytes, flags);
304 return ERR_PTR(-EINVAL);
306 found:
307 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
308 if (!new)
309 return ERR_PTR(-ENOMEM);
311 new->iommu = obj;
312 new->da_start = start;
313 new->da_end = start + bytes;
314 new->flags = flags;
317 * keep ascending order of iovmas
319 if (tmp)
320 list_add_tail(&new->list, &tmp->list);
321 else
322 list_add(&new->list, &obj->mmap);
324 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
325 __func__, new->da_start, start, new->da_end, bytes, flags);
327 return new;
330 static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
332 size_t bytes;
334 BUG_ON(!obj || !area);
336 bytes = area->da_end - area->da_start;
338 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
339 __func__, area->da_start, area->da_end, bytes, area->flags);
341 list_del(&area->list);
342 kmem_cache_free(iovm_area_cachep, area);
346 * da_to_va - convert (d) to (v)
347 * @obj: objective iommu
348 * @da: iommu device virtual address
349 * @va: mpu virtual address
351 * Returns mpu virtual addr which corresponds to a given device virtual addr
353 void *da_to_va(struct iommu *obj, u32 da)
355 void *va = NULL;
356 struct iovm_struct *area;
358 mutex_lock(&obj->mmap_lock);
360 area = __find_iovm_area(obj, da);
361 if (!area) {
362 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
363 goto out;
365 va = area->va;
366 out:
367 mutex_unlock(&obj->mmap_lock);
369 return va;
371 EXPORT_SYMBOL_GPL(da_to_va);
373 static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
375 unsigned int i;
376 struct scatterlist *sg;
377 void *va = _va;
378 void *va_end;
380 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
381 struct page *pg;
382 const size_t bytes = PAGE_SIZE;
385 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
387 pg = vmalloc_to_page(va);
388 BUG_ON(!pg);
389 sg_set_page(sg, pg, bytes, 0);
391 va += bytes;
394 va_end = _va + PAGE_SIZE * i;
395 flush_cache_vmap((unsigned long)_va, (unsigned long)va_end);
398 static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
401 * Actually this is not necessary at all, just exists for
402 * consistency of the code readability.
404 BUG_ON(!sgt);
407 static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
409 unsigned int i;
410 struct scatterlist *sg;
411 void *va;
413 va = phys_to_virt(pa);
415 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
416 size_t bytes;
418 bytes = iopgsz_max(len);
420 BUG_ON(!iopgsz_ok(bytes));
422 sg_set_buf(sg, phys_to_virt(pa), bytes);
424 * 'pa' is cotinuous(linear).
426 pa += bytes;
427 len -= bytes;
429 BUG_ON(len);
431 clean_dcache_area(va, len);
434 static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
437 * Actually this is not necessary at all, just exists for
438 * consistency of the code readability
440 BUG_ON(!sgt);
443 /* create 'da' <-> 'pa' mapping from 'sgt' */
444 static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
445 const struct sg_table *sgt, u32 flags)
447 int err;
448 unsigned int i, j;
449 struct scatterlist *sg;
450 u32 da = new->da_start;
452 if (!obj || !new || !sgt)
453 return -EINVAL;
455 BUG_ON(!sgtable_ok(sgt));
457 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
458 u32 pa;
459 int pgsz;
460 size_t bytes;
461 struct iotlb_entry e;
463 pa = sg_phys(sg);
464 bytes = sg_dma_len(sg);
466 flags &= ~IOVMF_PGSZ_MASK;
467 pgsz = bytes_to_iopgsz(bytes);
468 if (pgsz < 0)
469 goto err_out;
470 flags |= pgsz;
472 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
473 i, da, pa, bytes);
475 iotlb_init_entry(&e, da, pa, flags);
476 err = iopgtable_store_entry(obj, &e);
477 if (err)
478 goto err_out;
480 da += bytes;
482 return 0;
484 err_out:
485 da = new->da_start;
487 for_each_sg(sgt->sgl, sg, i, j) {
488 size_t bytes;
490 bytes = iopgtable_clear_entry(obj, da);
492 BUG_ON(!iopgsz_ok(bytes));
494 da += bytes;
496 return err;
499 /* release 'da' <-> 'pa' mapping */
500 static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
502 u32 start;
503 size_t total = area->da_end - area->da_start;
505 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
507 start = area->da_start;
508 while (total > 0) {
509 size_t bytes;
511 bytes = iopgtable_clear_entry(obj, start);
512 if (bytes == 0)
513 bytes = PAGE_SIZE;
514 else
515 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
516 __func__, start, bytes, area->flags);
518 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
520 total -= bytes;
521 start += bytes;
523 BUG_ON(total);
526 /* template function for all unmapping */
527 static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
528 void (*fn)(const void *), u32 flags)
530 struct sg_table *sgt = NULL;
531 struct iovm_struct *area;
533 if (!IS_ALIGNED(da, PAGE_SIZE)) {
534 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
535 return NULL;
538 mutex_lock(&obj->mmap_lock);
540 area = __find_iovm_area(obj, da);
541 if (!area) {
542 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
543 goto out;
546 if ((area->flags & flags) != flags) {
547 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
548 area->flags);
549 goto out;
551 sgt = (struct sg_table *)area->sgt;
553 unmap_iovm_area(obj, area);
555 fn(area->va);
557 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
558 area->da_start, da, area->da_end,
559 area->da_end - area->da_start, area->flags);
561 free_iovm_area(obj, area);
562 out:
563 mutex_unlock(&obj->mmap_lock);
565 return sgt;
568 static u32 map_iommu_region(struct iommu *obj, u32 da,
569 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
571 int err = -ENOMEM;
572 struct iovm_struct *new;
574 mutex_lock(&obj->mmap_lock);
576 new = alloc_iovm_area(obj, da, bytes, flags);
577 if (IS_ERR(new)) {
578 err = PTR_ERR(new);
579 goto err_alloc_iovma;
581 new->va = va;
582 new->sgt = sgt;
584 if (map_iovm_area(obj, new, sgt, new->flags))
585 goto err_map;
587 mutex_unlock(&obj->mmap_lock);
589 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
590 __func__, new->da_start, bytes, new->flags, va);
592 return new->da_start;
594 err_map:
595 free_iovm_area(obj, new);
596 err_alloc_iovma:
597 mutex_unlock(&obj->mmap_lock);
598 return err;
601 static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
602 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
604 return map_iommu_region(obj, da, sgt, va, bytes, flags);
608 * iommu_vmap - (d)-(p)-(v) address mapper
609 * @obj: objective iommu
610 * @sgt: address of scatter gather table
611 * @flags: iovma and page property
613 * Creates 1-n-1 mapping with given @sgt and returns @da.
614 * All @sgt element must be io page size aligned.
616 u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
617 u32 flags)
619 size_t bytes;
620 void *va;
622 if (!obj || !obj->dev || !sgt)
623 return -EINVAL;
625 bytes = sgtable_len(sgt);
626 if (!bytes)
627 return -EINVAL;
628 bytes = PAGE_ALIGN(bytes);
630 va = vmap_sg(sgt);
631 if (IS_ERR(va))
632 return PTR_ERR(va);
634 flags &= IOVMF_HW_MASK;
635 flags |= IOVMF_DISCONT;
636 flags |= IOVMF_MMIO;
637 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
639 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
640 if (IS_ERR_VALUE(da))
641 vunmap_sg(va);
643 return da;
645 EXPORT_SYMBOL_GPL(iommu_vmap);
648 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
649 * @obj: objective iommu
650 * @da: iommu device virtual address
652 * Free the iommu virtually contiguous memory area starting at
653 * @da, which was returned by 'iommu_vmap()'.
655 struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
657 struct sg_table *sgt;
659 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
660 * Just returns 'sgt' to the caller to free
662 sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
663 if (!sgt)
664 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
665 return sgt;
667 EXPORT_SYMBOL_GPL(iommu_vunmap);
670 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
671 * @obj: objective iommu
672 * @da: contiguous iommu virtual memory
673 * @bytes: allocation size
674 * @flags: iovma and page property
676 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
677 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
679 u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
681 void *va;
682 struct sg_table *sgt;
684 if (!obj || !obj->dev || !bytes)
685 return -EINVAL;
687 bytes = PAGE_ALIGN(bytes);
689 va = vmalloc(bytes);
690 if (!va)
691 return -ENOMEM;
693 sgt = sgtable_alloc(bytes, flags);
694 if (IS_ERR(sgt)) {
695 da = PTR_ERR(sgt);
696 goto err_sgt_alloc;
698 sgtable_fill_vmalloc(sgt, va);
700 flags &= IOVMF_HW_MASK;
701 flags |= IOVMF_DISCONT;
702 flags |= IOVMF_ALLOC;
703 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
705 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
706 if (IS_ERR_VALUE(da))
707 goto err_iommu_vmap;
709 return da;
711 err_iommu_vmap:
712 sgtable_drain_vmalloc(sgt);
713 sgtable_free(sgt);
714 err_sgt_alloc:
715 vfree(va);
716 return da;
718 EXPORT_SYMBOL_GPL(iommu_vmalloc);
721 * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
722 * @obj: objective iommu
723 * @da: iommu device virtual address
725 * Frees the iommu virtually continuous memory area starting at
726 * @da, as obtained from 'iommu_vmalloc()'.
728 void iommu_vfree(struct iommu *obj, const u32 da)
730 struct sg_table *sgt;
732 sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
733 if (!sgt)
734 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
735 sgtable_free(sgt);
737 EXPORT_SYMBOL_GPL(iommu_vfree);
739 static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
740 size_t bytes, u32 flags)
742 struct sg_table *sgt;
744 sgt = sgtable_alloc(bytes, flags);
745 if (IS_ERR(sgt))
746 return PTR_ERR(sgt);
748 sgtable_fill_kmalloc(sgt, pa, bytes);
750 da = map_iommu_region(obj, da, sgt, va, bytes, flags);
751 if (IS_ERR_VALUE(da)) {
752 sgtable_drain_kmalloc(sgt);
753 sgtable_free(sgt);
756 return da;
760 * iommu_kmap - (d)-(p)-(v) address mapper
761 * @obj: objective iommu
762 * @da: contiguous iommu virtual memory
763 * @pa: contiguous physical memory
764 * @flags: iovma and page property
766 * Creates 1-1-1 mapping and returns @da again, which can be
767 * adjusted if 'IOVMF_DA_ANON' is set.
769 u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
770 u32 flags)
772 void *va;
774 if (!obj || !obj->dev || !bytes)
775 return -EINVAL;
777 bytes = PAGE_ALIGN(bytes);
779 va = ioremap(pa, bytes);
780 if (!va)
781 return -ENOMEM;
783 flags &= IOVMF_HW_MASK;
784 flags |= IOVMF_LINEAR;
785 flags |= IOVMF_MMIO;
786 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
788 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
789 if (IS_ERR_VALUE(da))
790 iounmap(va);
792 return da;
794 EXPORT_SYMBOL_GPL(iommu_kmap);
797 * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
798 * @obj: objective iommu
799 * @da: iommu device virtual address
801 * Frees the iommu virtually contiguous memory area starting at
802 * @da, which was passed to and was returned by'iommu_kmap()'.
804 void iommu_kunmap(struct iommu *obj, u32 da)
806 struct sg_table *sgt;
807 typedef void (*func_t)(const void *);
809 sgt = unmap_vm_area(obj, da, (func_t)__iounmap,
810 IOVMF_LINEAR | IOVMF_MMIO);
811 if (!sgt)
812 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
813 sgtable_free(sgt);
815 EXPORT_SYMBOL_GPL(iommu_kunmap);
818 * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
819 * @obj: objective iommu
820 * @da: contiguous iommu virtual memory
821 * @bytes: bytes for allocation
822 * @flags: iovma and page property
824 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
825 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
827 u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
829 void *va;
830 u32 pa;
832 if (!obj || !obj->dev || !bytes)
833 return -EINVAL;
835 bytes = PAGE_ALIGN(bytes);
837 va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
838 if (!va)
839 return -ENOMEM;
840 pa = virt_to_phys(va);
842 flags &= IOVMF_HW_MASK;
843 flags |= IOVMF_LINEAR;
844 flags |= IOVMF_ALLOC;
845 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
847 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
848 if (IS_ERR_VALUE(da))
849 kfree(va);
851 return da;
853 EXPORT_SYMBOL_GPL(iommu_kmalloc);
856 * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
857 * @obj: objective iommu
858 * @da: iommu device virtual address
860 * Frees the iommu virtually contiguous memory area starting at
861 * @da, which was passed to and was returned by'iommu_kmalloc()'.
863 void iommu_kfree(struct iommu *obj, u32 da)
865 struct sg_table *sgt;
867 sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
868 if (!sgt)
869 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
870 sgtable_free(sgt);
872 EXPORT_SYMBOL_GPL(iommu_kfree);
875 static int __init iovmm_init(void)
877 const unsigned long flags = SLAB_HWCACHE_ALIGN;
878 struct kmem_cache *p;
880 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
881 flags, NULL);
882 if (!p)
883 return -ENOMEM;
884 iovm_area_cachep = p;
886 return 0;
888 module_init(iovmm_init);
890 static void __exit iovmm_exit(void)
892 kmem_cache_destroy(iovm_area_cachep);
894 module_exit(iovmm_exit);
896 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
897 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
898 MODULE_LICENSE("GPL v2");