2 * omap iommu: simple virtual address space management
4 * Copyright (C) 2008-2009 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/err.h>
14 #include <linux/vmalloc.h>
15 #include <linux/device.h>
16 #include <linux/scatterlist.h>
18 #include <asm/cacheflush.h>
19 #include <asm/mach/map.h>
21 #include <mach/iommu.h>
22 #include <mach/iovmm.h>
24 #include "iopgtable.h"
27 * A device driver needs to create address mappings between:
29 * - iommu/device address
31 * - mpu virtual address
33 * There are 4 possible patterns for them:
35 * |iova/ mapping iommu_ page
36 * | da pa va (d)-(p)-(v) function type
37 * ---------------------------------------------------------------------------
38 * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
39 * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
40 * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
41 * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
44 * 'iova': device iommu virtual address
45 * 'da': alias of 'iova'
46 * 'pa': physical address
47 * 'va': mpu virtual address
49 * 'c': contiguous memory area
50 * 'd': discontiguous memory area
51 * 'a': anonymous memory allocation
52 * '()': optional feature
54 * 'n': a normal page(4KB) size is used.
55 * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
57 * '*': not yet, but feasible.
60 static struct kmem_cache
*iovm_area_cachep
;
62 /* return total bytes of sg buffers */
63 static size_t sgtable_len(const struct sg_table
*sgt
)
65 unsigned int i
, total
= 0;
66 struct scatterlist
*sg
;
71 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
74 bytes
= sg_dma_len(sg
);
76 if (!iopgsz_ok(bytes
)) {
77 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
87 #define sgtable_ok(x) (!!sgtable_len(x))
90 * calculate the optimal number sg elements from total bytes based on
93 static unsigned int sgtable_nents(size_t bytes
)
96 unsigned int nr_entries
;
97 const unsigned long pagesize
[] = { SZ_16M
, SZ_1M
, SZ_64K
, SZ_4K
, };
99 if (!IS_ALIGNED(bytes
, PAGE_SIZE
)) {
100 pr_err("%s: wrong size %08x\n", __func__
, bytes
);
105 for (i
= 0; i
< ARRAY_SIZE(pagesize
); i
++) {
106 if (bytes
>= pagesize
[i
]) {
107 nr_entries
+= (bytes
/ pagesize
[i
]);
108 bytes
%= pagesize
[i
];
116 /* allocate and initialize sg_table header(a kind of 'superblock') */
117 static struct sg_table
*sgtable_alloc(const size_t bytes
, u32 flags
)
119 unsigned int nr_entries
;
121 struct sg_table
*sgt
;
124 return ERR_PTR(-EINVAL
);
126 if (!IS_ALIGNED(bytes
, PAGE_SIZE
))
127 return ERR_PTR(-EINVAL
);
129 /* FIXME: IOVMF_DA_FIXED should support 'superpages' */
130 if ((flags
& IOVMF_LINEAR
) && (flags
& IOVMF_DA_ANON
)) {
131 nr_entries
= sgtable_nents(bytes
);
133 return ERR_PTR(-EINVAL
);
135 nr_entries
= bytes
/ PAGE_SIZE
;
137 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
139 return ERR_PTR(-ENOMEM
);
141 err
= sg_alloc_table(sgt
, nr_entries
, GFP_KERNEL
);
145 pr_debug("%s: sgt:%p(%d entries)\n", __func__
, sgt
, nr_entries
);
150 /* free sg_table header(a kind of superblock) */
151 static void sgtable_free(struct sg_table
*sgt
)
159 pr_debug("%s: sgt:%p\n", __func__
, sgt
);
162 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
163 static void *vmap_sg(const struct sg_table
*sgt
)
168 struct scatterlist
*sg
;
169 struct vm_struct
*new;
170 const struct mem_type
*mtype
;
172 mtype
= get_mem_type(MT_DEVICE
);
174 return ERR_PTR(-EINVAL
);
176 total
= sgtable_len(sgt
);
178 return ERR_PTR(-EINVAL
);
180 new = __get_vm_area(total
, VM_IOREMAP
, VMALLOC_START
, VMALLOC_END
);
182 return ERR_PTR(-ENOMEM
);
185 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
191 bytes
= sg_dma_len(sg
);
193 BUG_ON(bytes
!= PAGE_SIZE
);
195 err
= ioremap_page(va
, pa
, mtype
);
202 flush_cache_vmap((unsigned long)new->addr
,
203 (unsigned long)(new->addr
+ total
));
207 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
209 return ERR_PTR(-EAGAIN
);
212 static inline void vunmap_sg(const void *va
)
217 static struct iovm_struct
*__find_iovm_area(struct iommu
*obj
, const u32 da
)
219 struct iovm_struct
*tmp
;
221 list_for_each_entry(tmp
, &obj
->mmap
, list
) {
222 if ((da
>= tmp
->da_start
) && (da
< tmp
->da_end
)) {
225 len
= tmp
->da_end
- tmp
->da_start
;
227 dev_dbg(obj
->dev
, "%s: %08x-%08x-%08x(%x) %08x\n",
228 __func__
, tmp
->da_start
, da
, tmp
->da_end
, len
,
239 * find_iovm_area - find iovma which includes @da
240 * @da: iommu device virtual address
242 * Find the existing iovma starting at @da
244 struct iovm_struct
*find_iovm_area(struct iommu
*obj
, u32 da
)
246 struct iovm_struct
*area
;
248 mutex_lock(&obj
->mmap_lock
);
249 area
= __find_iovm_area(obj
, da
);
250 mutex_unlock(&obj
->mmap_lock
);
254 EXPORT_SYMBOL_GPL(find_iovm_area
);
257 * This finds the hole(area) which fits the requested address and len
258 * in iovmas mmap, and returns the new allocated iovma.
260 static struct iovm_struct
*alloc_iovm_area(struct iommu
*obj
, u32 da
,
261 size_t bytes
, u32 flags
)
263 struct iovm_struct
*new, *tmp
;
264 u32 start
, prev_end
, alignement
;
267 return ERR_PTR(-EINVAL
);
270 alignement
= PAGE_SIZE
;
272 if (flags
& IOVMF_DA_ANON
) {
274 * Reserve the first page for NULL
277 if (flags
& IOVMF_LINEAR
)
278 alignement
= iopgsz_max(bytes
);
279 start
= roundup(start
, alignement
);
283 if (list_empty(&obj
->mmap
))
287 list_for_each_entry(tmp
, &obj
->mmap
, list
) {
289 if ((prev_end
<= start
) && (start
+ bytes
< tmp
->da_start
))
292 if (flags
& IOVMF_DA_ANON
)
293 start
= roundup(tmp
->da_end
, alignement
);
295 prev_end
= tmp
->da_end
;
298 if ((start
>= prev_end
) && (ULONG_MAX
- start
>= bytes
))
301 dev_dbg(obj
->dev
, "%s: no space to fit %08x(%x) flags: %08x\n",
302 __func__
, da
, bytes
, flags
);
304 return ERR_PTR(-EINVAL
);
307 new = kmem_cache_zalloc(iovm_area_cachep
, GFP_KERNEL
);
309 return ERR_PTR(-ENOMEM
);
312 new->da_start
= start
;
313 new->da_end
= start
+ bytes
;
317 * keep ascending order of iovmas
320 list_add_tail(&new->list
, &tmp
->list
);
322 list_add(&new->list
, &obj
->mmap
);
324 dev_dbg(obj
->dev
, "%s: found %08x-%08x-%08x(%x) %08x\n",
325 __func__
, new->da_start
, start
, new->da_end
, bytes
, flags
);
330 static void free_iovm_area(struct iommu
*obj
, struct iovm_struct
*area
)
334 BUG_ON(!obj
|| !area
);
336 bytes
= area
->da_end
- area
->da_start
;
338 dev_dbg(obj
->dev
, "%s: %08x-%08x(%x) %08x\n",
339 __func__
, area
->da_start
, area
->da_end
, bytes
, area
->flags
);
341 list_del(&area
->list
);
342 kmem_cache_free(iovm_area_cachep
, area
);
346 * da_to_va - convert (d) to (v)
347 * @obj: objective iommu
348 * @da: iommu device virtual address
349 * @va: mpu virtual address
351 * Returns mpu virtual addr which corresponds to a given device virtual addr
353 void *da_to_va(struct iommu
*obj
, u32 da
)
356 struct iovm_struct
*area
;
358 mutex_lock(&obj
->mmap_lock
);
360 area
= __find_iovm_area(obj
, da
);
362 dev_dbg(obj
->dev
, "%s: no da area(%08x)\n", __func__
, da
);
367 mutex_unlock(&obj
->mmap_lock
);
371 EXPORT_SYMBOL_GPL(da_to_va
);
373 static void sgtable_fill_vmalloc(struct sg_table
*sgt
, void *_va
)
376 struct scatterlist
*sg
;
380 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
382 const size_t bytes
= PAGE_SIZE
;
385 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
387 pg
= vmalloc_to_page(va
);
389 sg_set_page(sg
, pg
, bytes
, 0);
394 va_end
= _va
+ PAGE_SIZE
* i
;
395 flush_cache_vmap((unsigned long)_va
, (unsigned long)va_end
);
398 static inline void sgtable_drain_vmalloc(struct sg_table
*sgt
)
401 * Actually this is not necessary at all, just exists for
402 * consistency of the code readability.
407 static void sgtable_fill_kmalloc(struct sg_table
*sgt
, u32 pa
, size_t len
)
410 struct scatterlist
*sg
;
413 va
= phys_to_virt(pa
);
415 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
418 bytes
= iopgsz_max(len
);
420 BUG_ON(!iopgsz_ok(bytes
));
422 sg_set_buf(sg
, phys_to_virt(pa
), bytes
);
424 * 'pa' is cotinuous(linear).
431 clean_dcache_area(va
, len
);
434 static inline void sgtable_drain_kmalloc(struct sg_table
*sgt
)
437 * Actually this is not necessary at all, just exists for
438 * consistency of the code readability
443 /* create 'da' <-> 'pa' mapping from 'sgt' */
444 static int map_iovm_area(struct iommu
*obj
, struct iovm_struct
*new,
445 const struct sg_table
*sgt
, u32 flags
)
449 struct scatterlist
*sg
;
450 u32 da
= new->da_start
;
452 if (!obj
|| !new || !sgt
)
455 BUG_ON(!sgtable_ok(sgt
));
457 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
461 struct iotlb_entry e
;
464 bytes
= sg_dma_len(sg
);
466 flags
&= ~IOVMF_PGSZ_MASK
;
467 pgsz
= bytes_to_iopgsz(bytes
);
472 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__
,
475 iotlb_init_entry(&e
, da
, pa
, flags
);
476 err
= iopgtable_store_entry(obj
, &e
);
487 for_each_sg(sgt
->sgl
, sg
, i
, j
) {
490 bytes
= iopgtable_clear_entry(obj
, da
);
492 BUG_ON(!iopgsz_ok(bytes
));
499 /* release 'da' <-> 'pa' mapping */
500 static void unmap_iovm_area(struct iommu
*obj
, struct iovm_struct
*area
)
503 size_t total
= area
->da_end
- area
->da_start
;
505 BUG_ON((!total
) || !IS_ALIGNED(total
, PAGE_SIZE
));
507 start
= area
->da_start
;
511 bytes
= iopgtable_clear_entry(obj
, start
);
515 dev_dbg(obj
->dev
, "%s: unmap %08x(%x) %08x\n",
516 __func__
, start
, bytes
, area
->flags
);
518 BUG_ON(!IS_ALIGNED(bytes
, PAGE_SIZE
));
526 /* template function for all unmapping */
527 static struct sg_table
*unmap_vm_area(struct iommu
*obj
, const u32 da
,
528 void (*fn
)(const void *), u32 flags
)
530 struct sg_table
*sgt
= NULL
;
531 struct iovm_struct
*area
;
533 if (!IS_ALIGNED(da
, PAGE_SIZE
)) {
534 dev_err(obj
->dev
, "%s: alignment err(%08x)\n", __func__
, da
);
538 mutex_lock(&obj
->mmap_lock
);
540 area
= __find_iovm_area(obj
, da
);
542 dev_dbg(obj
->dev
, "%s: no da area(%08x)\n", __func__
, da
);
546 if ((area
->flags
& flags
) != flags
) {
547 dev_err(obj
->dev
, "%s: wrong flags(%08x)\n", __func__
,
551 sgt
= (struct sg_table
*)area
->sgt
;
553 unmap_iovm_area(obj
, area
);
557 dev_dbg(obj
->dev
, "%s: %08x-%08x-%08x(%x) %08x\n", __func__
,
558 area
->da_start
, da
, area
->da_end
,
559 area
->da_end
- area
->da_start
, area
->flags
);
561 free_iovm_area(obj
, area
);
563 mutex_unlock(&obj
->mmap_lock
);
568 static u32
map_iommu_region(struct iommu
*obj
, u32 da
,
569 const struct sg_table
*sgt
, void *va
, size_t bytes
, u32 flags
)
572 struct iovm_struct
*new;
574 mutex_lock(&obj
->mmap_lock
);
576 new = alloc_iovm_area(obj
, da
, bytes
, flags
);
579 goto err_alloc_iovma
;
584 if (map_iovm_area(obj
, new, sgt
, new->flags
))
587 mutex_unlock(&obj
->mmap_lock
);
589 dev_dbg(obj
->dev
, "%s: da:%08x(%x) flags:%08x va:%p\n",
590 __func__
, new->da_start
, bytes
, new->flags
, va
);
592 return new->da_start
;
595 free_iovm_area(obj
, new);
597 mutex_unlock(&obj
->mmap_lock
);
601 static inline u32
__iommu_vmap(struct iommu
*obj
, u32 da
,
602 const struct sg_table
*sgt
, void *va
, size_t bytes
, u32 flags
)
604 return map_iommu_region(obj
, da
, sgt
, va
, bytes
, flags
);
608 * iommu_vmap - (d)-(p)-(v) address mapper
609 * @obj: objective iommu
610 * @sgt: address of scatter gather table
611 * @flags: iovma and page property
613 * Creates 1-n-1 mapping with given @sgt and returns @da.
614 * All @sgt element must be io page size aligned.
616 u32
iommu_vmap(struct iommu
*obj
, u32 da
, const struct sg_table
*sgt
,
622 if (!obj
|| !obj
->dev
|| !sgt
)
625 bytes
= sgtable_len(sgt
);
628 bytes
= PAGE_ALIGN(bytes
);
634 flags
&= IOVMF_HW_MASK
;
635 flags
|= IOVMF_DISCONT
;
637 flags
|= (da
? IOVMF_DA_FIXED
: IOVMF_DA_ANON
);
639 da
= __iommu_vmap(obj
, da
, sgt
, va
, bytes
, flags
);
640 if (IS_ERR_VALUE(da
))
645 EXPORT_SYMBOL_GPL(iommu_vmap
);
648 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
649 * @obj: objective iommu
650 * @da: iommu device virtual address
652 * Free the iommu virtually contiguous memory area starting at
653 * @da, which was returned by 'iommu_vmap()'.
655 struct sg_table
*iommu_vunmap(struct iommu
*obj
, u32 da
)
657 struct sg_table
*sgt
;
659 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
660 * Just returns 'sgt' to the caller to free
662 sgt
= unmap_vm_area(obj
, da
, vunmap_sg
, IOVMF_DISCONT
| IOVMF_MMIO
);
664 dev_dbg(obj
->dev
, "%s: No sgt\n", __func__
);
667 EXPORT_SYMBOL_GPL(iommu_vunmap
);
670 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
671 * @obj: objective iommu
672 * @da: contiguous iommu virtual memory
673 * @bytes: allocation size
674 * @flags: iovma and page property
676 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
677 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
679 u32
iommu_vmalloc(struct iommu
*obj
, u32 da
, size_t bytes
, u32 flags
)
682 struct sg_table
*sgt
;
684 if (!obj
|| !obj
->dev
|| !bytes
)
687 bytes
= PAGE_ALIGN(bytes
);
693 sgt
= sgtable_alloc(bytes
, flags
);
698 sgtable_fill_vmalloc(sgt
, va
);
700 flags
&= IOVMF_HW_MASK
;
701 flags
|= IOVMF_DISCONT
;
702 flags
|= IOVMF_ALLOC
;
703 flags
|= (da
? IOVMF_DA_FIXED
: IOVMF_DA_ANON
);
705 da
= __iommu_vmap(obj
, da
, sgt
, va
, bytes
, flags
);
706 if (IS_ERR_VALUE(da
))
712 sgtable_drain_vmalloc(sgt
);
718 EXPORT_SYMBOL_GPL(iommu_vmalloc
);
721 * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
722 * @obj: objective iommu
723 * @da: iommu device virtual address
725 * Frees the iommu virtually continuous memory area starting at
726 * @da, as obtained from 'iommu_vmalloc()'.
728 void iommu_vfree(struct iommu
*obj
, const u32 da
)
730 struct sg_table
*sgt
;
732 sgt
= unmap_vm_area(obj
, da
, vfree
, IOVMF_DISCONT
| IOVMF_ALLOC
);
734 dev_dbg(obj
->dev
, "%s: No sgt\n", __func__
);
737 EXPORT_SYMBOL_GPL(iommu_vfree
);
739 static u32
__iommu_kmap(struct iommu
*obj
, u32 da
, u32 pa
, void *va
,
740 size_t bytes
, u32 flags
)
742 struct sg_table
*sgt
;
744 sgt
= sgtable_alloc(bytes
, flags
);
748 sgtable_fill_kmalloc(sgt
, pa
, bytes
);
750 da
= map_iommu_region(obj
, da
, sgt
, va
, bytes
, flags
);
751 if (IS_ERR_VALUE(da
)) {
752 sgtable_drain_kmalloc(sgt
);
760 * iommu_kmap - (d)-(p)-(v) address mapper
761 * @obj: objective iommu
762 * @da: contiguous iommu virtual memory
763 * @pa: contiguous physical memory
764 * @flags: iovma and page property
766 * Creates 1-1-1 mapping and returns @da again, which can be
767 * adjusted if 'IOVMF_DA_ANON' is set.
769 u32
iommu_kmap(struct iommu
*obj
, u32 da
, u32 pa
, size_t bytes
,
774 if (!obj
|| !obj
->dev
|| !bytes
)
777 bytes
= PAGE_ALIGN(bytes
);
779 va
= ioremap(pa
, bytes
);
783 flags
&= IOVMF_HW_MASK
;
784 flags
|= IOVMF_LINEAR
;
786 flags
|= (da
? IOVMF_DA_FIXED
: IOVMF_DA_ANON
);
788 da
= __iommu_kmap(obj
, da
, pa
, va
, bytes
, flags
);
789 if (IS_ERR_VALUE(da
))
794 EXPORT_SYMBOL_GPL(iommu_kmap
);
797 * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
798 * @obj: objective iommu
799 * @da: iommu device virtual address
801 * Frees the iommu virtually contiguous memory area starting at
802 * @da, which was passed to and was returned by'iommu_kmap()'.
804 void iommu_kunmap(struct iommu
*obj
, u32 da
)
806 struct sg_table
*sgt
;
807 typedef void (*func_t
)(const void *);
809 sgt
= unmap_vm_area(obj
, da
, (func_t
)__iounmap
,
810 IOVMF_LINEAR
| IOVMF_MMIO
);
812 dev_dbg(obj
->dev
, "%s: No sgt\n", __func__
);
815 EXPORT_SYMBOL_GPL(iommu_kunmap
);
818 * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
819 * @obj: objective iommu
820 * @da: contiguous iommu virtual memory
821 * @bytes: bytes for allocation
822 * @flags: iovma and page property
824 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
825 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
827 u32
iommu_kmalloc(struct iommu
*obj
, u32 da
, size_t bytes
, u32 flags
)
832 if (!obj
|| !obj
->dev
|| !bytes
)
835 bytes
= PAGE_ALIGN(bytes
);
837 va
= kmalloc(bytes
, GFP_KERNEL
| GFP_DMA
);
840 pa
= virt_to_phys(va
);
842 flags
&= IOVMF_HW_MASK
;
843 flags
|= IOVMF_LINEAR
;
844 flags
|= IOVMF_ALLOC
;
845 flags
|= (da
? IOVMF_DA_FIXED
: IOVMF_DA_ANON
);
847 da
= __iommu_kmap(obj
, da
, pa
, va
, bytes
, flags
);
848 if (IS_ERR_VALUE(da
))
853 EXPORT_SYMBOL_GPL(iommu_kmalloc
);
856 * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
857 * @obj: objective iommu
858 * @da: iommu device virtual address
860 * Frees the iommu virtually contiguous memory area starting at
861 * @da, which was passed to and was returned by'iommu_kmalloc()'.
863 void iommu_kfree(struct iommu
*obj
, u32 da
)
865 struct sg_table
*sgt
;
867 sgt
= unmap_vm_area(obj
, da
, kfree
, IOVMF_LINEAR
| IOVMF_ALLOC
);
869 dev_dbg(obj
->dev
, "%s: No sgt\n", __func__
);
872 EXPORT_SYMBOL_GPL(iommu_kfree
);
875 static int __init
iovmm_init(void)
877 const unsigned long flags
= SLAB_HWCACHE_ALIGN
;
878 struct kmem_cache
*p
;
880 p
= kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct
), 0,
884 iovm_area_cachep
= p
;
888 module_init(iovmm_init
);
890 static void __exit
iovmm_exit(void)
892 kmem_cache_destroy(iovm_area_cachep
);
894 module_exit(iovmm_exit
);
896 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
897 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
898 MODULE_LICENSE("GPL v2");