2 * omap iommu: simple virtual address space management
4 * Copyright (C) 2008-2009 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/device.h>
17 #include <linux/scatterlist.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mach/map.h>
22 #include <plat/iommu.h>
23 #include <plat/iovmm.h>
25 #include "iopgtable.h"
28 * A device driver needs to create address mappings between:
30 * - iommu/device address
32 * - mpu virtual address
34 * There are 4 possible patterns for them:
36 * |iova/ mapping iommu_ page
37 * | da pa va (d)-(p)-(v) function type
38 * ---------------------------------------------------------------------------
39 * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
40 * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
41 * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
42 * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
45 * 'iova': device iommu virtual address
46 * 'da': alias of 'iova'
47 * 'pa': physical address
48 * 'va': mpu virtual address
50 * 'c': contiguous memory area
51 * 'd': discontiguous memory area
52 * 'a': anonymous memory allocation
53 * '()': optional feature
55 * 'n': a normal page(4KB) size is used.
56 * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
58 * '*': not yet, but feasible.
61 static struct kmem_cache
*iovm_area_cachep
;
63 /* return total bytes of sg buffers */
64 static size_t sgtable_len(const struct sg_table
*sgt
)
66 unsigned int i
, total
= 0;
67 struct scatterlist
*sg
;
72 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
75 bytes
= sg_dma_len(sg
);
77 if (!iopgsz_ok(bytes
)) {
78 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
88 #define sgtable_ok(x) (!!sgtable_len(x))
90 static unsigned max_alignment(u32 addr
)
93 unsigned pagesize
[] = { SZ_16M
, SZ_1M
, SZ_64K
, SZ_4K
, };
94 for (i
= 0; i
< ARRAY_SIZE(pagesize
) && addr
& (pagesize
[i
] - 1); i
++)
96 return (i
< ARRAY_SIZE(pagesize
)) ? pagesize
[i
] : 0;
100 * calculate the optimal number sg elements from total bytes based on
103 static unsigned sgtable_nents(size_t bytes
, u32 da
, u32 pa
)
105 unsigned nr_entries
= 0, ent_sz
;
107 if (!IS_ALIGNED(bytes
, PAGE_SIZE
)) {
108 pr_err("%s: wrong size %08x\n", __func__
, bytes
);
113 ent_sz
= max_alignment(da
| pa
);
114 ent_sz
= min_t(unsigned, ent_sz
, iopgsz_max(bytes
));
124 /* allocate and initialize sg_table header(a kind of 'superblock') */
125 static struct sg_table
*sgtable_alloc(const size_t bytes
, u32 flags
,
128 unsigned int nr_entries
;
130 struct sg_table
*sgt
;
133 return ERR_PTR(-EINVAL
);
135 if (!IS_ALIGNED(bytes
, PAGE_SIZE
))
136 return ERR_PTR(-EINVAL
);
138 if (flags
& IOVMF_LINEAR
) {
139 nr_entries
= sgtable_nents(bytes
, da
, pa
);
141 return ERR_PTR(-EINVAL
);
143 nr_entries
= bytes
/ PAGE_SIZE
;
145 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
147 return ERR_PTR(-ENOMEM
);
149 err
= sg_alloc_table(sgt
, nr_entries
, GFP_KERNEL
);
155 pr_debug("%s: sgt:%p(%d entries)\n", __func__
, sgt
, nr_entries
);
160 /* free sg_table header(a kind of superblock) */
161 static void sgtable_free(struct sg_table
*sgt
)
169 pr_debug("%s: sgt:%p\n", __func__
, sgt
);
172 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
173 static void *vmap_sg(const struct sg_table
*sgt
)
178 struct scatterlist
*sg
;
179 struct vm_struct
*new;
180 const struct mem_type
*mtype
;
182 mtype
= get_mem_type(MT_DEVICE
);
184 return ERR_PTR(-EINVAL
);
186 total
= sgtable_len(sgt
);
188 return ERR_PTR(-EINVAL
);
190 new = __get_vm_area(total
, VM_IOREMAP
, VMALLOC_START
, VMALLOC_END
);
192 return ERR_PTR(-ENOMEM
);
195 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
201 bytes
= sg_dma_len(sg
);
203 BUG_ON(bytes
!= PAGE_SIZE
);
205 err
= ioremap_page(va
, pa
, mtype
);
212 flush_cache_vmap((unsigned long)new->addr
,
213 (unsigned long)(new->addr
+ total
));
217 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
219 return ERR_PTR(-EAGAIN
);
222 static inline void vunmap_sg(const void *va
)
227 static struct iovm_struct
*__find_iovm_area(struct iommu
*obj
, const u32 da
)
229 struct iovm_struct
*tmp
;
231 list_for_each_entry(tmp
, &obj
->mmap
, list
) {
232 if ((da
>= tmp
->da_start
) && (da
< tmp
->da_end
)) {
235 len
= tmp
->da_end
- tmp
->da_start
;
237 dev_dbg(obj
->dev
, "%s: %08x-%08x-%08x(%x) %08x\n",
238 __func__
, tmp
->da_start
, da
, tmp
->da_end
, len
,
249 * find_iovm_area - find iovma which includes @da
250 * @da: iommu device virtual address
252 * Find the existing iovma starting at @da
254 struct iovm_struct
*find_iovm_area(struct iommu
*obj
, u32 da
)
256 struct iovm_struct
*area
;
258 mutex_lock(&obj
->mmap_lock
);
259 area
= __find_iovm_area(obj
, da
);
260 mutex_unlock(&obj
->mmap_lock
);
264 EXPORT_SYMBOL_GPL(find_iovm_area
);
267 * This finds the hole(area) which fits the requested address and len
268 * in iovmas mmap, and returns the new allocated iovma.
270 static struct iovm_struct
*alloc_iovm_area(struct iommu
*obj
, u32 da
,
271 size_t bytes
, u32 flags
)
273 struct iovm_struct
*new, *tmp
;
274 u32 start
, prev_end
, alignment
;
277 return ERR_PTR(-EINVAL
);
280 alignment
= PAGE_SIZE
;
282 if (~flags
& IOVMF_DA_FIXED
) {
283 /* Don't map address 0 */
284 start
= obj
->da_start
? obj
->da_start
: alignment
;
286 if (flags
& IOVMF_LINEAR
)
287 alignment
= iopgsz_max(bytes
);
288 start
= roundup(start
, alignment
);
289 } else if (start
< obj
->da_start
|| start
> obj
->da_end
||
290 obj
->da_end
- start
< bytes
) {
291 return ERR_PTR(-EINVAL
);
295 if (list_empty(&obj
->mmap
))
299 list_for_each_entry(tmp
, &obj
->mmap
, list
) {
301 if (prev_end
> start
)
304 if (tmp
->da_start
> start
&& (tmp
->da_start
- start
) >= bytes
)
307 if (tmp
->da_end
>= start
&& ~flags
& IOVMF_DA_FIXED
)
308 start
= roundup(tmp
->da_end
+ 1, alignment
);
310 prev_end
= tmp
->da_end
;
313 if ((start
>= prev_end
) && (obj
->da_end
- start
>= bytes
))
316 dev_dbg(obj
->dev
, "%s: no space to fit %08x(%x) flags: %08x\n",
317 __func__
, da
, bytes
, flags
);
319 return ERR_PTR(-EINVAL
);
322 new = kmem_cache_zalloc(iovm_area_cachep
, GFP_KERNEL
);
324 return ERR_PTR(-ENOMEM
);
327 new->da_start
= start
;
328 new->da_end
= start
+ bytes
;
332 * keep ascending order of iovmas
335 list_add_tail(&new->list
, &tmp
->list
);
337 list_add(&new->list
, &obj
->mmap
);
339 dev_dbg(obj
->dev
, "%s: found %08x-%08x-%08x(%x) %08x\n",
340 __func__
, new->da_start
, start
, new->da_end
, bytes
, flags
);
345 static void free_iovm_area(struct iommu
*obj
, struct iovm_struct
*area
)
349 BUG_ON(!obj
|| !area
);
351 bytes
= area
->da_end
- area
->da_start
;
353 dev_dbg(obj
->dev
, "%s: %08x-%08x(%x) %08x\n",
354 __func__
, area
->da_start
, area
->da_end
, bytes
, area
->flags
);
356 list_del(&area
->list
);
357 kmem_cache_free(iovm_area_cachep
, area
);
361 * da_to_va - convert (d) to (v)
362 * @obj: objective iommu
363 * @da: iommu device virtual address
364 * @va: mpu virtual address
366 * Returns mpu virtual addr which corresponds to a given device virtual addr
368 void *da_to_va(struct iommu
*obj
, u32 da
)
371 struct iovm_struct
*area
;
373 mutex_lock(&obj
->mmap_lock
);
375 area
= __find_iovm_area(obj
, da
);
377 dev_dbg(obj
->dev
, "%s: no da area(%08x)\n", __func__
, da
);
382 mutex_unlock(&obj
->mmap_lock
);
386 EXPORT_SYMBOL_GPL(da_to_va
);
388 static void sgtable_fill_vmalloc(struct sg_table
*sgt
, void *_va
)
391 struct scatterlist
*sg
;
395 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
397 const size_t bytes
= PAGE_SIZE
;
400 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
402 pg
= vmalloc_to_page(va
);
404 sg_set_page(sg
, pg
, bytes
, 0);
409 va_end
= _va
+ PAGE_SIZE
* i
;
412 static inline void sgtable_drain_vmalloc(struct sg_table
*sgt
)
415 * Actually this is not necessary at all, just exists for
416 * consistency of the code readability.
421 static void sgtable_fill_kmalloc(struct sg_table
*sgt
, u32 pa
, u32 da
,
425 struct scatterlist
*sg
;
428 va
= phys_to_virt(pa
);
430 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
433 bytes
= max_alignment(da
| pa
);
434 bytes
= min_t(unsigned, bytes
, iopgsz_max(len
));
436 BUG_ON(!iopgsz_ok(bytes
));
438 sg_set_buf(sg
, phys_to_virt(pa
), bytes
);
440 * 'pa' is cotinuous(linear).
449 static inline void sgtable_drain_kmalloc(struct sg_table
*sgt
)
452 * Actually this is not necessary at all, just exists for
453 * consistency of the code readability
458 /* create 'da' <-> 'pa' mapping from 'sgt' */
459 static int map_iovm_area(struct iommu
*obj
, struct iovm_struct
*new,
460 const struct sg_table
*sgt
, u32 flags
)
464 struct scatterlist
*sg
;
465 u32 da
= new->da_start
;
470 BUG_ON(!sgtable_ok(sgt
));
472 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
476 struct iotlb_entry e
;
479 bytes
= sg_dma_len(sg
);
481 flags
&= ~IOVMF_PGSZ_MASK
;
482 pgsz
= bytes_to_iopgsz(bytes
);
487 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__
,
490 iotlb_init_entry(&e
, da
, pa
, flags
);
491 err
= iopgtable_store_entry(obj
, &e
);
502 for_each_sg(sgt
->sgl
, sg
, i
, j
) {
505 bytes
= iopgtable_clear_entry(obj
, da
);
507 BUG_ON(!iopgsz_ok(bytes
));
514 /* release 'da' <-> 'pa' mapping */
515 static void unmap_iovm_area(struct iommu
*obj
, struct iovm_struct
*area
)
518 size_t total
= area
->da_end
- area
->da_start
;
520 BUG_ON((!total
) || !IS_ALIGNED(total
, PAGE_SIZE
));
522 start
= area
->da_start
;
526 bytes
= iopgtable_clear_entry(obj
, start
);
530 dev_dbg(obj
->dev
, "%s: unmap %08x(%x) %08x\n",
531 __func__
, start
, bytes
, area
->flags
);
533 BUG_ON(!IS_ALIGNED(bytes
, PAGE_SIZE
));
541 /* template function for all unmapping */
542 static struct sg_table
*unmap_vm_area(struct iommu
*obj
, const u32 da
,
543 void (*fn
)(const void *), u32 flags
)
545 struct sg_table
*sgt
= NULL
;
546 struct iovm_struct
*area
;
548 if (!IS_ALIGNED(da
, PAGE_SIZE
)) {
549 dev_err(obj
->dev
, "%s: alignment err(%08x)\n", __func__
, da
);
553 mutex_lock(&obj
->mmap_lock
);
555 area
= __find_iovm_area(obj
, da
);
557 dev_dbg(obj
->dev
, "%s: no da area(%08x)\n", __func__
, da
);
561 if ((area
->flags
& flags
) != flags
) {
562 dev_err(obj
->dev
, "%s: wrong flags(%08x)\n", __func__
,
566 sgt
= (struct sg_table
*)area
->sgt
;
568 unmap_iovm_area(obj
, area
);
572 dev_dbg(obj
->dev
, "%s: %08x-%08x-%08x(%x) %08x\n", __func__
,
573 area
->da_start
, da
, area
->da_end
,
574 area
->da_end
- area
->da_start
, area
->flags
);
576 free_iovm_area(obj
, area
);
578 mutex_unlock(&obj
->mmap_lock
);
583 static u32
map_iommu_region(struct iommu
*obj
, u32 da
,
584 const struct sg_table
*sgt
, void *va
, size_t bytes
, u32 flags
)
587 struct iovm_struct
*new;
589 mutex_lock(&obj
->mmap_lock
);
591 new = alloc_iovm_area(obj
, da
, bytes
, flags
);
594 goto err_alloc_iovma
;
599 if (map_iovm_area(obj
, new, sgt
, new->flags
))
602 mutex_unlock(&obj
->mmap_lock
);
604 dev_dbg(obj
->dev
, "%s: da:%08x(%x) flags:%08x va:%p\n",
605 __func__
, new->da_start
, bytes
, new->flags
, va
);
607 return new->da_start
;
610 free_iovm_area(obj
, new);
612 mutex_unlock(&obj
->mmap_lock
);
616 static inline u32
__iommu_vmap(struct iommu
*obj
, u32 da
,
617 const struct sg_table
*sgt
, void *va
, size_t bytes
, u32 flags
)
619 return map_iommu_region(obj
, da
, sgt
, va
, bytes
, flags
);
623 * iommu_vmap - (d)-(p)-(v) address mapper
624 * @obj: objective iommu
625 * @sgt: address of scatter gather table
626 * @flags: iovma and page property
628 * Creates 1-n-1 mapping with given @sgt and returns @da.
629 * All @sgt element must be io page size aligned.
631 u32
iommu_vmap(struct iommu
*obj
, u32 da
, const struct sg_table
*sgt
,
637 if (!obj
|| !obj
->dev
|| !sgt
)
640 bytes
= sgtable_len(sgt
);
643 bytes
= PAGE_ALIGN(bytes
);
645 if (flags
& IOVMF_MMIO
) {
651 flags
&= IOVMF_HW_MASK
;
652 flags
|= IOVMF_DISCONT
;
655 da
= __iommu_vmap(obj
, da
, sgt
, va
, bytes
, flags
);
656 if (IS_ERR_VALUE(da
))
661 EXPORT_SYMBOL_GPL(iommu_vmap
);
664 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
665 * @obj: objective iommu
666 * @da: iommu device virtual address
668 * Free the iommu virtually contiguous memory area starting at
669 * @da, which was returned by 'iommu_vmap()'.
671 struct sg_table
*iommu_vunmap(struct iommu
*obj
, u32 da
)
673 struct sg_table
*sgt
;
675 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
676 * Just returns 'sgt' to the caller to free
678 sgt
= unmap_vm_area(obj
, da
, vunmap_sg
, IOVMF_DISCONT
| IOVMF_MMIO
);
680 dev_dbg(obj
->dev
, "%s: No sgt\n", __func__
);
683 EXPORT_SYMBOL_GPL(iommu_vunmap
);
686 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
687 * @obj: objective iommu
688 * @da: contiguous iommu virtual memory
689 * @bytes: allocation size
690 * @flags: iovma and page property
692 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
693 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
695 u32
iommu_vmalloc(struct iommu
*obj
, u32 da
, size_t bytes
, u32 flags
)
698 struct sg_table
*sgt
;
700 if (!obj
|| !obj
->dev
|| !bytes
)
703 bytes
= PAGE_ALIGN(bytes
);
709 flags
&= IOVMF_HW_MASK
;
710 flags
|= IOVMF_DISCONT
;
711 flags
|= IOVMF_ALLOC
;
713 sgt
= sgtable_alloc(bytes
, flags
, da
, 0);
718 sgtable_fill_vmalloc(sgt
, va
);
720 da
= __iommu_vmap(obj
, da
, sgt
, va
, bytes
, flags
);
721 if (IS_ERR_VALUE(da
))
727 sgtable_drain_vmalloc(sgt
);
733 EXPORT_SYMBOL_GPL(iommu_vmalloc
);
736 * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
737 * @obj: objective iommu
738 * @da: iommu device virtual address
740 * Frees the iommu virtually continuous memory area starting at
741 * @da, as obtained from 'iommu_vmalloc()'.
743 void iommu_vfree(struct iommu
*obj
, const u32 da
)
745 struct sg_table
*sgt
;
747 sgt
= unmap_vm_area(obj
, da
, vfree
, IOVMF_DISCONT
| IOVMF_ALLOC
);
749 dev_dbg(obj
->dev
, "%s: No sgt\n", __func__
);
752 EXPORT_SYMBOL_GPL(iommu_vfree
);
754 static u32
__iommu_kmap(struct iommu
*obj
, u32 da
, u32 pa
, void *va
,
755 size_t bytes
, u32 flags
)
757 struct sg_table
*sgt
;
759 sgt
= sgtable_alloc(bytes
, flags
, da
, pa
);
763 sgtable_fill_kmalloc(sgt
, pa
, da
, bytes
);
765 da
= map_iommu_region(obj
, da
, sgt
, va
, bytes
, flags
);
766 if (IS_ERR_VALUE(da
)) {
767 sgtable_drain_kmalloc(sgt
);
775 * iommu_kmap - (d)-(p)-(v) address mapper
776 * @obj: objective iommu
777 * @da: contiguous iommu virtual memory
778 * @pa: contiguous physical memory
779 * @flags: iovma and page property
781 * Creates 1-1-1 mapping and returns @da again, which can be
782 * adjusted if 'IOVMF_DA_FIXED' is not set.
784 u32
iommu_kmap(struct iommu
*obj
, u32 da
, u32 pa
, size_t bytes
,
789 if (!obj
|| !obj
->dev
|| !bytes
)
792 bytes
= PAGE_ALIGN(bytes
);
794 va
= ioremap(pa
, bytes
);
798 flags
&= IOVMF_HW_MASK
;
799 flags
|= IOVMF_LINEAR
;
802 da
= __iommu_kmap(obj
, da
, pa
, va
, bytes
, flags
);
803 if (IS_ERR_VALUE(da
))
808 EXPORT_SYMBOL_GPL(iommu_kmap
);
811 * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
812 * @obj: objective iommu
813 * @da: iommu device virtual address
815 * Frees the iommu virtually contiguous memory area starting at
816 * @da, which was passed to and was returned by'iommu_kmap()'.
818 void iommu_kunmap(struct iommu
*obj
, u32 da
)
820 struct sg_table
*sgt
;
821 typedef void (*func_t
)(const void *);
823 sgt
= unmap_vm_area(obj
, da
, (func_t
)iounmap
,
824 IOVMF_LINEAR
| IOVMF_MMIO
);
826 dev_dbg(obj
->dev
, "%s: No sgt\n", __func__
);
829 EXPORT_SYMBOL_GPL(iommu_kunmap
);
832 * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
833 * @obj: objective iommu
834 * @da: contiguous iommu virtual memory
835 * @bytes: bytes for allocation
836 * @flags: iovma and page property
838 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
839 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
841 u32
iommu_kmalloc(struct iommu
*obj
, u32 da
, size_t bytes
, u32 flags
)
846 if (!obj
|| !obj
->dev
|| !bytes
)
849 bytes
= PAGE_ALIGN(bytes
);
851 va
= kmalloc(bytes
, GFP_KERNEL
| GFP_DMA
);
854 pa
= virt_to_phys(va
);
856 flags
&= IOVMF_HW_MASK
;
857 flags
|= IOVMF_LINEAR
;
858 flags
|= IOVMF_ALLOC
;
860 da
= __iommu_kmap(obj
, da
, pa
, va
, bytes
, flags
);
861 if (IS_ERR_VALUE(da
))
866 EXPORT_SYMBOL_GPL(iommu_kmalloc
);
869 * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
870 * @obj: objective iommu
871 * @da: iommu device virtual address
873 * Frees the iommu virtually contiguous memory area starting at
874 * @da, which was passed to and was returned by'iommu_kmalloc()'.
876 void iommu_kfree(struct iommu
*obj
, u32 da
)
878 struct sg_table
*sgt
;
880 sgt
= unmap_vm_area(obj
, da
, kfree
, IOVMF_LINEAR
| IOVMF_ALLOC
);
882 dev_dbg(obj
->dev
, "%s: No sgt\n", __func__
);
885 EXPORT_SYMBOL_GPL(iommu_kfree
);
888 static int __init
iovmm_init(void)
890 const unsigned long flags
= SLAB_HWCACHE_ALIGN
;
891 struct kmem_cache
*p
;
893 p
= kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct
), 0,
897 iovm_area_cachep
= p
;
901 module_init(iovmm_init
);
903 static void __exit
iovmm_exit(void)
905 kmem_cache_destroy(iovm_area_cachep
);
907 module_exit(iovmm_exit
);
909 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
910 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
911 MODULE_LICENSE("GPL v2");