2 * omap iommu: simple virtual address space management
4 * Copyright (C) 2008-2009 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/device.h>
17 #include <linux/scatterlist.h>
18 #include <linux/iommu.h>
20 #include <asm/cacheflush.h>
21 #include <asm/mach/map.h>
23 #include <plat/iommu.h>
24 #include <plat/iovmm.h>
26 #include <plat/iopgtable.h>
28 static struct kmem_cache
*iovm_area_cachep
;
30 /* return total bytes of sg buffers */
31 static size_t sgtable_len(const struct sg_table
*sgt
)
33 unsigned int i
, total
= 0;
34 struct scatterlist
*sg
;
39 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
44 if (!iopgsz_ok(bytes
)) {
45 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
55 #define sgtable_ok(x) (!!sgtable_len(x))
57 static unsigned max_alignment(u32 addr
)
60 unsigned pagesize
[] = { SZ_16M
, SZ_1M
, SZ_64K
, SZ_4K
, };
61 for (i
= 0; i
< ARRAY_SIZE(pagesize
) && addr
& (pagesize
[i
] - 1); i
++)
63 return (i
< ARRAY_SIZE(pagesize
)) ? pagesize
[i
] : 0;
67 * calculate the optimal number sg elements from total bytes based on
70 static unsigned sgtable_nents(size_t bytes
, u32 da
, u32 pa
)
72 unsigned nr_entries
= 0, ent_sz
;
74 if (!IS_ALIGNED(bytes
, PAGE_SIZE
)) {
75 pr_err("%s: wrong size %08x\n", __func__
, bytes
);
80 ent_sz
= max_alignment(da
| pa
);
81 ent_sz
= min_t(unsigned, ent_sz
, iopgsz_max(bytes
));
91 /* allocate and initialize sg_table header(a kind of 'superblock') */
92 static struct sg_table
*sgtable_alloc(const size_t bytes
, u32 flags
,
95 unsigned int nr_entries
;
100 return ERR_PTR(-EINVAL
);
102 if (!IS_ALIGNED(bytes
, PAGE_SIZE
))
103 return ERR_PTR(-EINVAL
);
105 if (flags
& IOVMF_LINEAR
) {
106 nr_entries
= sgtable_nents(bytes
, da
, pa
);
108 return ERR_PTR(-EINVAL
);
110 nr_entries
= bytes
/ PAGE_SIZE
;
112 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
114 return ERR_PTR(-ENOMEM
);
116 err
= sg_alloc_table(sgt
, nr_entries
, GFP_KERNEL
);
122 pr_debug("%s: sgt:%p(%d entries)\n", __func__
, sgt
, nr_entries
);
127 /* free sg_table header(a kind of superblock) */
128 static void sgtable_free(struct sg_table
*sgt
)
136 pr_debug("%s: sgt:%p\n", __func__
, sgt
);
139 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
140 static void *vmap_sg(const struct sg_table
*sgt
)
145 struct scatterlist
*sg
;
146 struct vm_struct
*new;
147 const struct mem_type
*mtype
;
149 mtype
= get_mem_type(MT_DEVICE
);
151 return ERR_PTR(-EINVAL
);
153 total
= sgtable_len(sgt
);
155 return ERR_PTR(-EINVAL
);
157 new = __get_vm_area(total
, VM_IOREMAP
, VMALLOC_START
, VMALLOC_END
);
159 return ERR_PTR(-ENOMEM
);
162 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
170 BUG_ON(bytes
!= PAGE_SIZE
);
172 err
= ioremap_page(va
, pa
, mtype
);
179 flush_cache_vmap((unsigned long)new->addr
,
180 (unsigned long)(new->addr
+ total
));
184 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
186 return ERR_PTR(-EAGAIN
);
189 static inline void vunmap_sg(const void *va
)
194 static struct iovm_struct
*__find_iovm_area(struct omap_iommu
*obj
,
197 struct iovm_struct
*tmp
;
199 list_for_each_entry(tmp
, &obj
->mmap
, list
) {
200 if ((da
>= tmp
->da_start
) && (da
< tmp
->da_end
)) {
203 len
= tmp
->da_end
- tmp
->da_start
;
205 dev_dbg(obj
->dev
, "%s: %08x-%08x-%08x(%x) %08x\n",
206 __func__
, tmp
->da_start
, da
, tmp
->da_end
, len
,
217 * omap_find_iovm_area - find iovma which includes @da
218 * @da: iommu device virtual address
220 * Find the existing iovma starting at @da
222 struct iovm_struct
*omap_find_iovm_area(struct omap_iommu
*obj
, u32 da
)
224 struct iovm_struct
*area
;
226 mutex_lock(&obj
->mmap_lock
);
227 area
= __find_iovm_area(obj
, da
);
228 mutex_unlock(&obj
->mmap_lock
);
232 EXPORT_SYMBOL_GPL(omap_find_iovm_area
);
235 * This finds the hole(area) which fits the requested address and len
236 * in iovmas mmap, and returns the new allocated iovma.
238 static struct iovm_struct
*alloc_iovm_area(struct omap_iommu
*obj
, u32 da
,
239 size_t bytes
, u32 flags
)
241 struct iovm_struct
*new, *tmp
;
242 u32 start
, prev_end
, alignment
;
245 return ERR_PTR(-EINVAL
);
248 alignment
= PAGE_SIZE
;
250 if (~flags
& IOVMF_DA_FIXED
) {
251 /* Don't map address 0 */
252 start
= obj
->da_start
? obj
->da_start
: alignment
;
254 if (flags
& IOVMF_LINEAR
)
255 alignment
= iopgsz_max(bytes
);
256 start
= roundup(start
, alignment
);
257 } else if (start
< obj
->da_start
|| start
> obj
->da_end
||
258 obj
->da_end
- start
< bytes
) {
259 return ERR_PTR(-EINVAL
);
263 if (list_empty(&obj
->mmap
))
267 list_for_each_entry(tmp
, &obj
->mmap
, list
) {
269 if (prev_end
> start
)
272 if (tmp
->da_start
> start
&& (tmp
->da_start
- start
) >= bytes
)
275 if (tmp
->da_end
>= start
&& ~flags
& IOVMF_DA_FIXED
)
276 start
= roundup(tmp
->da_end
+ 1, alignment
);
278 prev_end
= tmp
->da_end
;
281 if ((start
>= prev_end
) && (obj
->da_end
- start
>= bytes
))
284 dev_dbg(obj
->dev
, "%s: no space to fit %08x(%x) flags: %08x\n",
285 __func__
, da
, bytes
, flags
);
287 return ERR_PTR(-EINVAL
);
290 new = kmem_cache_zalloc(iovm_area_cachep
, GFP_KERNEL
);
292 return ERR_PTR(-ENOMEM
);
295 new->da_start
= start
;
296 new->da_end
= start
+ bytes
;
300 * keep ascending order of iovmas
303 list_add_tail(&new->list
, &tmp
->list
);
305 list_add(&new->list
, &obj
->mmap
);
307 dev_dbg(obj
->dev
, "%s: found %08x-%08x-%08x(%x) %08x\n",
308 __func__
, new->da_start
, start
, new->da_end
, bytes
, flags
);
313 static void free_iovm_area(struct omap_iommu
*obj
, struct iovm_struct
*area
)
317 BUG_ON(!obj
|| !area
);
319 bytes
= area
->da_end
- area
->da_start
;
321 dev_dbg(obj
->dev
, "%s: %08x-%08x(%x) %08x\n",
322 __func__
, area
->da_start
, area
->da_end
, bytes
, area
->flags
);
324 list_del(&area
->list
);
325 kmem_cache_free(iovm_area_cachep
, area
);
329 * omap_da_to_va - convert (d) to (v)
330 * @obj: objective iommu
331 * @da: iommu device virtual address
332 * @va: mpu virtual address
334 * Returns mpu virtual addr which corresponds to a given device virtual addr
336 void *omap_da_to_va(struct omap_iommu
*obj
, u32 da
)
339 struct iovm_struct
*area
;
341 mutex_lock(&obj
->mmap_lock
);
343 area
= __find_iovm_area(obj
, da
);
345 dev_dbg(obj
->dev
, "%s: no da area(%08x)\n", __func__
, da
);
350 mutex_unlock(&obj
->mmap_lock
);
354 EXPORT_SYMBOL_GPL(omap_da_to_va
);
356 static void sgtable_fill_vmalloc(struct sg_table
*sgt
, void *_va
)
359 struct scatterlist
*sg
;
363 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
365 const size_t bytes
= PAGE_SIZE
;
368 * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
370 pg
= vmalloc_to_page(va
);
372 sg_set_page(sg
, pg
, bytes
, 0);
377 va_end
= _va
+ PAGE_SIZE
* i
;
380 static inline void sgtable_drain_vmalloc(struct sg_table
*sgt
)
383 * Actually this is not necessary at all, just exists for
384 * consistency of the code readability.
389 /* create 'da' <-> 'pa' mapping from 'sgt' */
390 static int map_iovm_area(struct iommu_domain
*domain
, struct iovm_struct
*new,
391 const struct sg_table
*sgt
, u32 flags
)
395 struct scatterlist
*sg
;
396 u32 da
= new->da_start
;
402 BUG_ON(!sgtable_ok(sgt
));
404 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
411 flags
&= ~IOVMF_PGSZ_MASK
;
413 if (bytes_to_iopgsz(bytes
) < 0)
416 order
= get_order(bytes
);
418 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__
,
421 err
= iommu_map(domain
, da
, pa
, order
, flags
);
432 for_each_sg(sgt
->sgl
, sg
, i
, j
) {
436 order
= get_order(bytes
);
438 /* ignore failures.. we're already handling one */
439 iommu_unmap(domain
, da
, order
);
446 /* release 'da' <-> 'pa' mapping */
447 static void unmap_iovm_area(struct iommu_domain
*domain
, struct omap_iommu
*obj
,
448 struct iovm_struct
*area
)
451 size_t total
= area
->da_end
- area
->da_start
;
452 const struct sg_table
*sgt
= area
->sgt
;
453 struct scatterlist
*sg
;
456 BUG_ON(!sgtable_ok(sgt
));
457 BUG_ON((!total
) || !IS_ALIGNED(total
, PAGE_SIZE
));
459 start
= area
->da_start
;
460 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
) {
465 order
= get_order(bytes
);
467 err
= iommu_unmap(domain
, start
, order
);
471 dev_dbg(obj
->dev
, "%s: unmap %08x(%x) %08x\n",
472 __func__
, start
, bytes
, area
->flags
);
474 BUG_ON(!IS_ALIGNED(bytes
, PAGE_SIZE
));
482 /* template function for all unmapping */
483 static struct sg_table
*unmap_vm_area(struct iommu_domain
*domain
,
484 struct omap_iommu
*obj
, const u32 da
,
485 void (*fn
)(const void *), u32 flags
)
487 struct sg_table
*sgt
= NULL
;
488 struct iovm_struct
*area
;
490 if (!IS_ALIGNED(da
, PAGE_SIZE
)) {
491 dev_err(obj
->dev
, "%s: alignment err(%08x)\n", __func__
, da
);
495 mutex_lock(&obj
->mmap_lock
);
497 area
= __find_iovm_area(obj
, da
);
499 dev_dbg(obj
->dev
, "%s: no da area(%08x)\n", __func__
, da
);
503 if ((area
->flags
& flags
) != flags
) {
504 dev_err(obj
->dev
, "%s: wrong flags(%08x)\n", __func__
,
508 sgt
= (struct sg_table
*)area
->sgt
;
510 unmap_iovm_area(domain
, obj
, area
);
514 dev_dbg(obj
->dev
, "%s: %08x-%08x-%08x(%x) %08x\n", __func__
,
515 area
->da_start
, da
, area
->da_end
,
516 area
->da_end
- area
->da_start
, area
->flags
);
518 free_iovm_area(obj
, area
);
520 mutex_unlock(&obj
->mmap_lock
);
525 static u32
map_iommu_region(struct iommu_domain
*domain
, struct omap_iommu
*obj
,
526 u32 da
, const struct sg_table
*sgt
, void *va
,
527 size_t bytes
, u32 flags
)
530 struct iovm_struct
*new;
532 mutex_lock(&obj
->mmap_lock
);
534 new = alloc_iovm_area(obj
, da
, bytes
, flags
);
537 goto err_alloc_iovma
;
542 if (map_iovm_area(domain
, new, sgt
, new->flags
))
545 mutex_unlock(&obj
->mmap_lock
);
547 dev_dbg(obj
->dev
, "%s: da:%08x(%x) flags:%08x va:%p\n",
548 __func__
, new->da_start
, bytes
, new->flags
, va
);
550 return new->da_start
;
553 free_iovm_area(obj
, new);
555 mutex_unlock(&obj
->mmap_lock
);
560 __iommu_vmap(struct iommu_domain
*domain
, struct omap_iommu
*obj
,
561 u32 da
, const struct sg_table
*sgt
,
562 void *va
, size_t bytes
, u32 flags
)
564 return map_iommu_region(domain
, obj
, da
, sgt
, va
, bytes
, flags
);
568 * omap_iommu_vmap - (d)-(p)-(v) address mapper
569 * @obj: objective iommu
570 * @sgt: address of scatter gather table
571 * @flags: iovma and page property
573 * Creates 1-n-1 mapping with given @sgt and returns @da.
574 * All @sgt element must be io page size aligned.
576 u32
omap_iommu_vmap(struct iommu_domain
*domain
, struct omap_iommu
*obj
, u32 da
,
577 const struct sg_table
*sgt
, u32 flags
)
582 if (!obj
|| !obj
->dev
|| !sgt
)
585 bytes
= sgtable_len(sgt
);
588 bytes
= PAGE_ALIGN(bytes
);
590 if (flags
& IOVMF_MMIO
) {
596 flags
|= IOVMF_DISCONT
;
599 da
= __iommu_vmap(domain
, obj
, da
, sgt
, va
, bytes
, flags
);
600 if (IS_ERR_VALUE(da
))
605 EXPORT_SYMBOL_GPL(omap_iommu_vmap
);
608 * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
609 * @obj: objective iommu
610 * @da: iommu device virtual address
612 * Free the iommu virtually contiguous memory area starting at
613 * @da, which was returned by 'omap_iommu_vmap()'.
616 omap_iommu_vunmap(struct iommu_domain
*domain
, struct omap_iommu
*obj
, u32 da
)
618 struct sg_table
*sgt
;
620 * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
621 * Just returns 'sgt' to the caller to free
623 sgt
= unmap_vm_area(domain
, obj
, da
, vunmap_sg
,
624 IOVMF_DISCONT
| IOVMF_MMIO
);
626 dev_dbg(obj
->dev
, "%s: No sgt\n", __func__
);
629 EXPORT_SYMBOL_GPL(omap_iommu_vunmap
);
632 * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
633 * @obj: objective iommu
634 * @da: contiguous iommu virtual memory
635 * @bytes: allocation size
636 * @flags: iovma and page property
638 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
639 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
642 omap_iommu_vmalloc(struct iommu_domain
*domain
, struct omap_iommu
*obj
, u32 da
,
643 size_t bytes
, u32 flags
)
646 struct sg_table
*sgt
;
648 if (!obj
|| !obj
->dev
|| !bytes
)
651 bytes
= PAGE_ALIGN(bytes
);
657 flags
|= IOVMF_DISCONT
;
658 flags
|= IOVMF_ALLOC
;
660 sgt
= sgtable_alloc(bytes
, flags
, da
, 0);
665 sgtable_fill_vmalloc(sgt
, va
);
667 da
= __iommu_vmap(domain
, obj
, da
, sgt
, va
, bytes
, flags
);
668 if (IS_ERR_VALUE(da
))
674 sgtable_drain_vmalloc(sgt
);
680 EXPORT_SYMBOL_GPL(omap_iommu_vmalloc
);
683 * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
684 * @obj: objective iommu
685 * @da: iommu device virtual address
687 * Frees the iommu virtually continuous memory area starting at
688 * @da, as obtained from 'omap_iommu_vmalloc()'.
690 void omap_iommu_vfree(struct iommu_domain
*domain
, struct omap_iommu
*obj
,
693 struct sg_table
*sgt
;
695 sgt
= unmap_vm_area(domain
, obj
, da
, vfree
,
696 IOVMF_DISCONT
| IOVMF_ALLOC
);
698 dev_dbg(obj
->dev
, "%s: No sgt\n", __func__
);
701 EXPORT_SYMBOL_GPL(omap_iommu_vfree
);
703 static int __init
iovmm_init(void)
705 const unsigned long flags
= SLAB_HWCACHE_ALIGN
;
706 struct kmem_cache
*p
;
708 p
= kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct
), 0,
712 iovm_area_cachep
= p
;
716 module_init(iovmm_init
);
718 static void __exit
iovmm_exit(void)
720 kmem_cache_destroy(iovm_area_cachep
);
722 module_exit(iovmm_exit
);
724 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
725 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
726 MODULE_LICENSE("GPL v2");