2 * omap iommu: simple virtual address space management
4 * Copyright (C) 2008-2009 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/device.h>
18 #include <linux/scatterlist.h>
19 #include <linux/iommu.h>
20 #include <linux/omap-iommu.h>
21 #include <linux/platform_data/iommu-omap.h>
23 #include <asm/cacheflush.h>
24 #include <asm/mach/map.h>
26 #include "omap-iopgtable.h"
27 #include "omap-iommu.h"
30 * IOVMF_FLAGS: attribute for iommu virtual memory area(iovma)
32 * lower 16 bit is used for h/w and upper 16 bit is for s/w.
34 #define IOVMF_SW_SHIFT 16
37 * iovma: h/w flags derived from cam and ram attribute
39 #define IOVMF_CAM_MASK (~((1 << 10) - 1))
40 #define IOVMF_RAM_MASK (~IOVMF_CAM_MASK)
42 #define IOVMF_PGSZ_MASK (3 << 0)
43 #define IOVMF_PGSZ_1M MMU_CAM_PGSZ_1M
44 #define IOVMF_PGSZ_64K MMU_CAM_PGSZ_64K
45 #define IOVMF_PGSZ_4K MMU_CAM_PGSZ_4K
46 #define IOVMF_PGSZ_16M MMU_CAM_PGSZ_16M
48 #define IOVMF_ENDIAN_MASK (1 << 9)
49 #define IOVMF_ENDIAN_BIG MMU_RAM_ENDIAN_BIG
51 #define IOVMF_ELSZ_MASK (3 << 7)
52 #define IOVMF_ELSZ_16 MMU_RAM_ELSZ_16
53 #define IOVMF_ELSZ_32 MMU_RAM_ELSZ_32
54 #define IOVMF_ELSZ_NONE MMU_RAM_ELSZ_NONE
56 #define IOVMF_MIXED_MASK (1 << 6)
57 #define IOVMF_MIXED MMU_RAM_MIXED
60 * iovma: s/w flags, used for mapping and umapping internally.
62 #define IOVMF_MMIO (1 << IOVMF_SW_SHIFT)
63 #define IOVMF_ALLOC (2 << IOVMF_SW_SHIFT)
64 #define IOVMF_ALLOC_MASK (3 << IOVMF_SW_SHIFT)
66 /* "superpages" is supported just with physically linear pages */
67 #define IOVMF_DISCONT (1 << (2 + IOVMF_SW_SHIFT))
68 #define IOVMF_LINEAR (2 << (2 + IOVMF_SW_SHIFT))
69 #define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT))
71 #define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT))
73 static struct kmem_cache
*iovm_area_cachep
;
75 /* return the offset of the first scatterlist entry in a sg table */
76 static unsigned int sgtable_offset(const struct sg_table
*sgt
)
78 if (!sgt
|| !sgt
->nents
)
81 return sgt
->sgl
->offset
;
84 /* return total bytes of sg buffers */
85 static size_t sgtable_len(const struct sg_table
*sgt
)
87 unsigned int i
, total
= 0;
88 struct scatterlist
*sg
;
93 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
96 bytes
= sg
->length
+ sg
->offset
;
98 if (!iopgsz_ok(bytes
)) {
99 pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
100 __func__
, i
, bytes
, sg
->offset
);
104 if (i
&& sg
->offset
) {
105 pr_err("%s: sg[%d] offset not allowed in internal entries\n",
115 #define sgtable_ok(x) (!!sgtable_len(x))
117 static unsigned max_alignment(u32 addr
)
120 unsigned pagesize
[] = { SZ_16M
, SZ_1M
, SZ_64K
, SZ_4K
, };
121 for (i
= 0; i
< ARRAY_SIZE(pagesize
) && addr
& (pagesize
[i
] - 1); i
++)
123 return (i
< ARRAY_SIZE(pagesize
)) ? pagesize
[i
] : 0;
127 * calculate the optimal number sg elements from total bytes based on
130 static unsigned sgtable_nents(size_t bytes
, u32 da
, u32 pa
)
132 unsigned nr_entries
= 0, ent_sz
;
134 if (!IS_ALIGNED(bytes
, PAGE_SIZE
)) {
135 pr_err("%s: wrong size %08x\n", __func__
, bytes
);
140 ent_sz
= max_alignment(da
| pa
);
141 ent_sz
= min_t(unsigned, ent_sz
, iopgsz_max(bytes
));
151 /* allocate and initialize sg_table header(a kind of 'superblock') */
152 static struct sg_table
*sgtable_alloc(const size_t bytes
, u32 flags
,
155 unsigned int nr_entries
;
157 struct sg_table
*sgt
;
160 return ERR_PTR(-EINVAL
);
162 if (!IS_ALIGNED(bytes
, PAGE_SIZE
))
163 return ERR_PTR(-EINVAL
);
165 if (flags
& IOVMF_LINEAR
) {
166 nr_entries
= sgtable_nents(bytes
, da
, pa
);
168 return ERR_PTR(-EINVAL
);
170 nr_entries
= bytes
/ PAGE_SIZE
;
172 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
174 return ERR_PTR(-ENOMEM
);
176 err
= sg_alloc_table(sgt
, nr_entries
, GFP_KERNEL
);
182 pr_debug("%s: sgt:%p(%d entries)\n", __func__
, sgt
, nr_entries
);
187 /* free sg_table header(a kind of superblock) */
188 static void sgtable_free(struct sg_table
*sgt
)
196 pr_debug("%s: sgt:%p\n", __func__
, sgt
);
199 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
200 static void *vmap_sg(const struct sg_table
*sgt
)
205 struct scatterlist
*sg
;
206 struct vm_struct
*new;
207 const struct mem_type
*mtype
;
209 mtype
= get_mem_type(MT_DEVICE
);
211 return ERR_PTR(-EINVAL
);
213 total
= sgtable_len(sgt
);
215 return ERR_PTR(-EINVAL
);
217 new = __get_vm_area(total
, VM_IOREMAP
, VMALLOC_START
, VMALLOC_END
);
219 return ERR_PTR(-ENOMEM
);
222 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
227 pa
= sg_phys(sg
) - sg
->offset
;
228 bytes
= sg
->length
+ sg
->offset
;
230 BUG_ON(bytes
!= PAGE_SIZE
);
232 err
= ioremap_page(va
, pa
, mtype
);
239 flush_cache_vmap((unsigned long)new->addr
,
240 (unsigned long)(new->addr
+ total
));
244 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
246 return ERR_PTR(-EAGAIN
);
249 static inline void vunmap_sg(const void *va
)
254 static struct iovm_struct
*__find_iovm_area(struct omap_iommu
*obj
,
257 struct iovm_struct
*tmp
;
259 list_for_each_entry(tmp
, &obj
->mmap
, list
) {
260 if ((da
>= tmp
->da_start
) && (da
< tmp
->da_end
)) {
263 len
= tmp
->da_end
- tmp
->da_start
;
265 dev_dbg(obj
->dev
, "%s: %08x-%08x-%08x(%x) %08x\n",
266 __func__
, tmp
->da_start
, da
, tmp
->da_end
, len
,
277 * omap_find_iovm_area - find iovma which includes @da
278 * @dev: client device
279 * @da: iommu device virtual address
281 * Find the existing iovma starting at @da
283 struct iovm_struct
*omap_find_iovm_area(struct device
*dev
, u32 da
)
285 struct omap_iommu
*obj
= dev_to_omap_iommu(dev
);
286 struct iovm_struct
*area
;
288 mutex_lock(&obj
->mmap_lock
);
289 area
= __find_iovm_area(obj
, da
);
290 mutex_unlock(&obj
->mmap_lock
);
294 EXPORT_SYMBOL_GPL(omap_find_iovm_area
);
297 * This finds the hole(area) which fits the requested address and len
298 * in iovmas mmap, and returns the new allocated iovma.
300 static struct iovm_struct
*alloc_iovm_area(struct omap_iommu
*obj
, u32 da
,
301 size_t bytes
, u32 flags
)
303 struct iovm_struct
*new, *tmp
;
304 u32 start
, prev_end
, alignment
;
307 return ERR_PTR(-EINVAL
);
310 alignment
= PAGE_SIZE
;
312 if (~flags
& IOVMF_DA_FIXED
) {
313 /* Don't map address 0 */
314 start
= obj
->da_start
? obj
->da_start
: alignment
;
316 if (flags
& IOVMF_LINEAR
)
317 alignment
= iopgsz_max(bytes
);
318 start
= roundup(start
, alignment
);
319 } else if (start
< obj
->da_start
|| start
> obj
->da_end
||
320 obj
->da_end
- start
< bytes
) {
321 return ERR_PTR(-EINVAL
);
325 if (list_empty(&obj
->mmap
))
329 list_for_each_entry(tmp
, &obj
->mmap
, list
) {
331 if (prev_end
> start
)
334 if (tmp
->da_start
> start
&& (tmp
->da_start
- start
) >= bytes
)
337 if (tmp
->da_end
>= start
&& ~flags
& IOVMF_DA_FIXED
)
338 start
= roundup(tmp
->da_end
+ 1, alignment
);
340 prev_end
= tmp
->da_end
;
343 if ((start
>= prev_end
) && (obj
->da_end
- start
>= bytes
))
346 dev_dbg(obj
->dev
, "%s: no space to fit %08x(%x) flags: %08x\n",
347 __func__
, da
, bytes
, flags
);
349 return ERR_PTR(-EINVAL
);
352 new = kmem_cache_zalloc(iovm_area_cachep
, GFP_KERNEL
);
354 return ERR_PTR(-ENOMEM
);
357 new->da_start
= start
;
358 new->da_end
= start
+ bytes
;
362 * keep ascending order of iovmas
365 list_add_tail(&new->list
, &tmp
->list
);
367 list_add(&new->list
, &obj
->mmap
);
369 dev_dbg(obj
->dev
, "%s: found %08x-%08x-%08x(%x) %08x\n",
370 __func__
, new->da_start
, start
, new->da_end
, bytes
, flags
);
375 static void free_iovm_area(struct omap_iommu
*obj
, struct iovm_struct
*area
)
379 BUG_ON(!obj
|| !area
);
381 bytes
= area
->da_end
- area
->da_start
;
383 dev_dbg(obj
->dev
, "%s: %08x-%08x(%x) %08x\n",
384 __func__
, area
->da_start
, area
->da_end
, bytes
, area
->flags
);
386 list_del(&area
->list
);
387 kmem_cache_free(iovm_area_cachep
, area
);
391 * omap_da_to_va - convert (d) to (v)
392 * @dev: client device
393 * @da: iommu device virtual address
394 * @va: mpu virtual address
396 * Returns mpu virtual addr which corresponds to a given device virtual addr
398 void *omap_da_to_va(struct device
*dev
, u32 da
)
400 struct omap_iommu
*obj
= dev_to_omap_iommu(dev
);
402 struct iovm_struct
*area
;
404 mutex_lock(&obj
->mmap_lock
);
406 area
= __find_iovm_area(obj
, da
);
408 dev_dbg(obj
->dev
, "%s: no da area(%08x)\n", __func__
, da
);
413 mutex_unlock(&obj
->mmap_lock
);
417 EXPORT_SYMBOL_GPL(omap_da_to_va
);
419 static void sgtable_fill_vmalloc(struct sg_table
*sgt
, void *_va
)
422 struct scatterlist
*sg
;
426 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
428 const size_t bytes
= PAGE_SIZE
;
431 * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
433 pg
= vmalloc_to_page(va
);
435 sg_set_page(sg
, pg
, bytes
, 0);
440 va_end
= _va
+ PAGE_SIZE
* i
;
443 static inline void sgtable_drain_vmalloc(struct sg_table
*sgt
)
446 * Actually this is not necessary at all, just exists for
447 * consistency of the code readability.
452 /* create 'da' <-> 'pa' mapping from 'sgt' */
453 static int map_iovm_area(struct iommu_domain
*domain
, struct iovm_struct
*new,
454 const struct sg_table
*sgt
, u32 flags
)
458 struct scatterlist
*sg
;
459 u32 da
= new->da_start
;
464 BUG_ON(!sgtable_ok(sgt
));
466 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
470 pa
= sg_phys(sg
) - sg
->offset
;
471 bytes
= sg
->length
+ sg
->offset
;
473 flags
&= ~IOVMF_PGSZ_MASK
;
475 if (bytes_to_iopgsz(bytes
) < 0)
478 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__
,
481 err
= iommu_map(domain
, da
, pa
, bytes
, flags
);
492 for_each_sg(sgt
->sgl
, sg
, i
, j
) {
495 bytes
= sg
->length
+ sg
->offset
;
497 /* ignore failures.. we're already handling one */
498 iommu_unmap(domain
, da
, bytes
);
505 /* release 'da' <-> 'pa' mapping */
506 static void unmap_iovm_area(struct iommu_domain
*domain
, struct omap_iommu
*obj
,
507 struct iovm_struct
*area
)
510 size_t total
= area
->da_end
- area
->da_start
;
511 const struct sg_table
*sgt
= area
->sgt
;
512 struct scatterlist
*sg
;
516 BUG_ON(!sgtable_ok(sgt
));
517 BUG_ON((!total
) || !IS_ALIGNED(total
, PAGE_SIZE
));
519 start
= area
->da_start
;
520 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
523 bytes
= sg
->length
+ sg
->offset
;
525 unmapped
= iommu_unmap(domain
, start
, bytes
);
526 if (unmapped
< bytes
)
529 dev_dbg(obj
->dev
, "%s: unmap %08x(%x) %08x\n",
530 __func__
, start
, bytes
, area
->flags
);
532 BUG_ON(!IS_ALIGNED(bytes
, PAGE_SIZE
));
540 /* template function for all unmapping */
541 static struct sg_table
*unmap_vm_area(struct iommu_domain
*domain
,
542 struct omap_iommu
*obj
, const u32 da
,
543 void (*fn
)(const void *), u32 flags
)
545 struct sg_table
*sgt
= NULL
;
546 struct iovm_struct
*area
;
548 if (!IS_ALIGNED(da
, PAGE_SIZE
)) {
549 dev_err(obj
->dev
, "%s: alignment err(%08x)\n", __func__
, da
);
553 mutex_lock(&obj
->mmap_lock
);
555 area
= __find_iovm_area(obj
, da
);
557 dev_dbg(obj
->dev
, "%s: no da area(%08x)\n", __func__
, da
);
561 if ((area
->flags
& flags
) != flags
) {
562 dev_err(obj
->dev
, "%s: wrong flags(%08x)\n", __func__
,
566 sgt
= (struct sg_table
*)area
->sgt
;
568 unmap_iovm_area(domain
, obj
, area
);
572 dev_dbg(obj
->dev
, "%s: %08x-%08x-%08x(%x) %08x\n", __func__
,
573 area
->da_start
, da
, area
->da_end
,
574 area
->da_end
- area
->da_start
, area
->flags
);
576 free_iovm_area(obj
, area
);
578 mutex_unlock(&obj
->mmap_lock
);
583 static u32
map_iommu_region(struct iommu_domain
*domain
, struct omap_iommu
*obj
,
584 u32 da
, const struct sg_table
*sgt
, void *va
,
585 size_t bytes
, u32 flags
)
588 struct iovm_struct
*new;
590 mutex_lock(&obj
->mmap_lock
);
592 new = alloc_iovm_area(obj
, da
, bytes
, flags
);
595 goto err_alloc_iovma
;
600 if (map_iovm_area(domain
, new, sgt
, new->flags
))
603 mutex_unlock(&obj
->mmap_lock
);
605 dev_dbg(obj
->dev
, "%s: da:%08x(%x) flags:%08x va:%p\n",
606 __func__
, new->da_start
, bytes
, new->flags
, va
);
608 return new->da_start
;
611 free_iovm_area(obj
, new);
613 mutex_unlock(&obj
->mmap_lock
);
618 __iommu_vmap(struct iommu_domain
*domain
, struct omap_iommu
*obj
,
619 u32 da
, const struct sg_table
*sgt
,
620 void *va
, size_t bytes
, u32 flags
)
622 return map_iommu_region(domain
, obj
, da
, sgt
, va
, bytes
, flags
);
626 * omap_iommu_vmap - (d)-(p)-(v) address mapper
627 * @domain: iommu domain
628 * @dev: client device
629 * @sgt: address of scatter gather table
630 * @flags: iovma and page property
632 * Creates 1-n-1 mapping with given @sgt and returns @da.
633 * All @sgt element must be io page size aligned.
635 u32
omap_iommu_vmap(struct iommu_domain
*domain
, struct device
*dev
, u32 da
,
636 const struct sg_table
*sgt
, u32 flags
)
638 struct omap_iommu
*obj
= dev_to_omap_iommu(dev
);
642 if (!obj
|| !obj
->dev
|| !sgt
)
645 bytes
= sgtable_len(sgt
);
648 bytes
= PAGE_ALIGN(bytes
);
650 if (flags
& IOVMF_MMIO
) {
656 flags
|= IOVMF_DISCONT
;
659 da
= __iommu_vmap(domain
, obj
, da
, sgt
, va
, bytes
, flags
);
660 if (IS_ERR_VALUE(da
))
663 return da
+ sgtable_offset(sgt
);
665 EXPORT_SYMBOL_GPL(omap_iommu_vmap
);
668 * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
669 * @domain: iommu domain
670 * @dev: client device
671 * @da: iommu device virtual address
673 * Free the iommu virtually contiguous memory area starting at
674 * @da, which was returned by 'omap_iommu_vmap()'.
677 omap_iommu_vunmap(struct iommu_domain
*domain
, struct device
*dev
, u32 da
)
679 struct omap_iommu
*obj
= dev_to_omap_iommu(dev
);
680 struct sg_table
*sgt
;
682 * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
683 * Just returns 'sgt' to the caller to free
686 sgt
= unmap_vm_area(domain
, obj
, da
, vunmap_sg
,
687 IOVMF_DISCONT
| IOVMF_MMIO
);
689 dev_dbg(obj
->dev
, "%s: No sgt\n", __func__
);
692 EXPORT_SYMBOL_GPL(omap_iommu_vunmap
);
695 * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
696 * @dev: client device
697 * @da: contiguous iommu virtual memory
698 * @bytes: allocation size
699 * @flags: iovma and page property
701 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
702 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
705 omap_iommu_vmalloc(struct iommu_domain
*domain
, struct device
*dev
, u32 da
,
706 size_t bytes
, u32 flags
)
708 struct omap_iommu
*obj
= dev_to_omap_iommu(dev
);
710 struct sg_table
*sgt
;
712 if (!obj
|| !obj
->dev
|| !bytes
)
715 bytes
= PAGE_ALIGN(bytes
);
721 flags
|= IOVMF_DISCONT
;
722 flags
|= IOVMF_ALLOC
;
724 sgt
= sgtable_alloc(bytes
, flags
, da
, 0);
729 sgtable_fill_vmalloc(sgt
, va
);
731 da
= __iommu_vmap(domain
, obj
, da
, sgt
, va
, bytes
, flags
);
732 if (IS_ERR_VALUE(da
))
738 sgtable_drain_vmalloc(sgt
);
744 EXPORT_SYMBOL_GPL(omap_iommu_vmalloc
);
747 * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
748 * @dev: client device
749 * @da: iommu device virtual address
751 * Frees the iommu virtually continuous memory area starting at
752 * @da, as obtained from 'omap_iommu_vmalloc()'.
754 void omap_iommu_vfree(struct iommu_domain
*domain
, struct device
*dev
,
757 struct omap_iommu
*obj
= dev_to_omap_iommu(dev
);
758 struct sg_table
*sgt
;
760 sgt
= unmap_vm_area(domain
, obj
, da
, vfree
,
761 IOVMF_DISCONT
| IOVMF_ALLOC
);
763 dev_dbg(obj
->dev
, "%s: No sgt\n", __func__
);
766 EXPORT_SYMBOL_GPL(omap_iommu_vfree
);
768 static int __init
iovmm_init(void)
770 const unsigned long flags
= SLAB_HWCACHE_ALIGN
;
771 struct kmem_cache
*p
;
773 p
= kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct
), 0,
777 iovm_area_cachep
= p
;
781 module_init(iovmm_init
);
783 static void __exit
iovmm_exit(void)
785 kmem_cache_destroy(iovm_area_cachep
);
787 module_exit(iovmm_exit
);
789 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
790 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
791 MODULE_LICENSE("GPL v2");