Merge branch 'akpm'
[linux-2.6/next.git] / drivers / iommu / omap-iovmm.c
blob5e7f97dc76ef28c2a4db1ad766faf0853141ce59
1 /*
2 * omap iommu: simple virtual address space management
4 * Copyright (C) 2008-2009 Nokia Corporation
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/device.h>
17 #include <linux/scatterlist.h>
18 #include <linux/iommu.h>
20 #include <asm/cacheflush.h>
21 #include <asm/mach/map.h>
23 #include <plat/iommu.h>
24 #include <plat/iovmm.h>
26 #include <plat/iopgtable.h>
28 static struct kmem_cache *iovm_area_cachep;
30 /* return total bytes of sg buffers */
31 static size_t sgtable_len(const struct sg_table *sgt)
33 unsigned int i, total = 0;
34 struct scatterlist *sg;
36 if (!sgt)
37 return 0;
39 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
40 size_t bytes;
42 bytes = sg->length;
44 if (!iopgsz_ok(bytes)) {
45 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
46 __func__, i, bytes);
47 return 0;
50 total += bytes;
53 return total;
55 #define sgtable_ok(x) (!!sgtable_len(x))
57 static unsigned max_alignment(u32 addr)
59 int i;
60 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
61 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
63 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
67 * calculate the optimal number sg elements from total bytes based on
68 * iommu superpages
70 static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
72 unsigned nr_entries = 0, ent_sz;
74 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
75 pr_err("%s: wrong size %08x\n", __func__, bytes);
76 return 0;
79 while (bytes) {
80 ent_sz = max_alignment(da | pa);
81 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
82 nr_entries++;
83 da += ent_sz;
84 pa += ent_sz;
85 bytes -= ent_sz;
88 return nr_entries;
91 /* allocate and initialize sg_table header(a kind of 'superblock') */
92 static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
93 u32 da, u32 pa)
95 unsigned int nr_entries;
96 int err;
97 struct sg_table *sgt;
99 if (!bytes)
100 return ERR_PTR(-EINVAL);
102 if (!IS_ALIGNED(bytes, PAGE_SIZE))
103 return ERR_PTR(-EINVAL);
105 if (flags & IOVMF_LINEAR) {
106 nr_entries = sgtable_nents(bytes, da, pa);
107 if (!nr_entries)
108 return ERR_PTR(-EINVAL);
109 } else
110 nr_entries = bytes / PAGE_SIZE;
112 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
113 if (!sgt)
114 return ERR_PTR(-ENOMEM);
116 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
117 if (err) {
118 kfree(sgt);
119 return ERR_PTR(err);
122 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
124 return sgt;
127 /* free sg_table header(a kind of superblock) */
128 static void sgtable_free(struct sg_table *sgt)
130 if (!sgt)
131 return;
133 sg_free_table(sgt);
134 kfree(sgt);
136 pr_debug("%s: sgt:%p\n", __func__, sgt);
139 /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
140 static void *vmap_sg(const struct sg_table *sgt)
142 u32 va;
143 size_t total;
144 unsigned int i;
145 struct scatterlist *sg;
146 struct vm_struct *new;
147 const struct mem_type *mtype;
149 mtype = get_mem_type(MT_DEVICE);
150 if (!mtype)
151 return ERR_PTR(-EINVAL);
153 total = sgtable_len(sgt);
154 if (!total)
155 return ERR_PTR(-EINVAL);
157 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
158 if (!new)
159 return ERR_PTR(-ENOMEM);
160 va = (u32)new->addr;
162 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
163 size_t bytes;
164 u32 pa;
165 int err;
167 pa = sg_phys(sg);
168 bytes = sg->length;
170 BUG_ON(bytes != PAGE_SIZE);
172 err = ioremap_page(va, pa, mtype);
173 if (err)
174 goto err_out;
176 va += bytes;
179 flush_cache_vmap((unsigned long)new->addr,
180 (unsigned long)(new->addr + total));
181 return new->addr;
183 err_out:
184 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
185 vunmap(new->addr);
186 return ERR_PTR(-EAGAIN);
189 static inline void vunmap_sg(const void *va)
191 vunmap(va);
194 static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
195 const u32 da)
197 struct iovm_struct *tmp;
199 list_for_each_entry(tmp, &obj->mmap, list) {
200 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
201 size_t len;
203 len = tmp->da_end - tmp->da_start;
205 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
206 __func__, tmp->da_start, da, tmp->da_end, len,
207 tmp->flags);
209 return tmp;
213 return NULL;
217 * omap_find_iovm_area - find iovma which includes @da
218 * @da: iommu device virtual address
220 * Find the existing iovma starting at @da
222 struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da)
224 struct iovm_struct *area;
226 mutex_lock(&obj->mmap_lock);
227 area = __find_iovm_area(obj, da);
228 mutex_unlock(&obj->mmap_lock);
230 return area;
232 EXPORT_SYMBOL_GPL(omap_find_iovm_area);
235 * This finds the hole(area) which fits the requested address and len
236 * in iovmas mmap, and returns the new allocated iovma.
238 static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
239 size_t bytes, u32 flags)
241 struct iovm_struct *new, *tmp;
242 u32 start, prev_end, alignment;
244 if (!obj || !bytes)
245 return ERR_PTR(-EINVAL);
247 start = da;
248 alignment = PAGE_SIZE;
250 if (~flags & IOVMF_DA_FIXED) {
251 /* Don't map address 0 */
252 start = obj->da_start ? obj->da_start : alignment;
254 if (flags & IOVMF_LINEAR)
255 alignment = iopgsz_max(bytes);
256 start = roundup(start, alignment);
257 } else if (start < obj->da_start || start > obj->da_end ||
258 obj->da_end - start < bytes) {
259 return ERR_PTR(-EINVAL);
262 tmp = NULL;
263 if (list_empty(&obj->mmap))
264 goto found;
266 prev_end = 0;
267 list_for_each_entry(tmp, &obj->mmap, list) {
269 if (prev_end > start)
270 break;
272 if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
273 goto found;
275 if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
276 start = roundup(tmp->da_end + 1, alignment);
278 prev_end = tmp->da_end;
281 if ((start >= prev_end) && (obj->da_end - start >= bytes))
282 goto found;
284 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
285 __func__, da, bytes, flags);
287 return ERR_PTR(-EINVAL);
289 found:
290 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
291 if (!new)
292 return ERR_PTR(-ENOMEM);
294 new->iommu = obj;
295 new->da_start = start;
296 new->da_end = start + bytes;
297 new->flags = flags;
300 * keep ascending order of iovmas
302 if (tmp)
303 list_add_tail(&new->list, &tmp->list);
304 else
305 list_add(&new->list, &obj->mmap);
307 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
308 __func__, new->da_start, start, new->da_end, bytes, flags);
310 return new;
313 static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
315 size_t bytes;
317 BUG_ON(!obj || !area);
319 bytes = area->da_end - area->da_start;
321 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
322 __func__, area->da_start, area->da_end, bytes, area->flags);
324 list_del(&area->list);
325 kmem_cache_free(iovm_area_cachep, area);
329 * omap_da_to_va - convert (d) to (v)
330 * @obj: objective iommu
331 * @da: iommu device virtual address
332 * @va: mpu virtual address
334 * Returns mpu virtual addr which corresponds to a given device virtual addr
336 void *omap_da_to_va(struct omap_iommu *obj, u32 da)
338 void *va = NULL;
339 struct iovm_struct *area;
341 mutex_lock(&obj->mmap_lock);
343 area = __find_iovm_area(obj, da);
344 if (!area) {
345 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
346 goto out;
348 va = area->va;
349 out:
350 mutex_unlock(&obj->mmap_lock);
352 return va;
354 EXPORT_SYMBOL_GPL(omap_da_to_va);
356 static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
358 unsigned int i;
359 struct scatterlist *sg;
360 void *va = _va;
361 void *va_end;
363 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
364 struct page *pg;
365 const size_t bytes = PAGE_SIZE;
368 * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
370 pg = vmalloc_to_page(va);
371 BUG_ON(!pg);
372 sg_set_page(sg, pg, bytes, 0);
374 va += bytes;
377 va_end = _va + PAGE_SIZE * i;
380 static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
383 * Actually this is not necessary at all, just exists for
384 * consistency of the code readability.
386 BUG_ON(!sgt);
389 /* create 'da' <-> 'pa' mapping from 'sgt' */
390 static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
391 const struct sg_table *sgt, u32 flags)
393 int err;
394 unsigned int i, j;
395 struct scatterlist *sg;
396 u32 da = new->da_start;
397 int order;
399 if (!domain || !sgt)
400 return -EINVAL;
402 BUG_ON(!sgtable_ok(sgt));
404 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
405 u32 pa;
406 size_t bytes;
408 pa = sg_phys(sg);
409 bytes = sg->length;
411 flags &= ~IOVMF_PGSZ_MASK;
413 if (bytes_to_iopgsz(bytes) < 0)
414 goto err_out;
416 order = get_order(bytes);
418 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
419 i, da, pa, bytes);
421 err = iommu_map(domain, da, pa, order, flags);
422 if (err)
423 goto err_out;
425 da += bytes;
427 return 0;
429 err_out:
430 da = new->da_start;
432 for_each_sg(sgt->sgl, sg, i, j) {
433 size_t bytes;
435 bytes = sg->length;
436 order = get_order(bytes);
438 /* ignore failures.. we're already handling one */
439 iommu_unmap(domain, da, order);
441 da += bytes;
443 return err;
446 /* release 'da' <-> 'pa' mapping */
447 static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
448 struct iovm_struct *area)
450 u32 start;
451 size_t total = area->da_end - area->da_start;
452 const struct sg_table *sgt = area->sgt;
453 struct scatterlist *sg;
454 int i, err;
456 BUG_ON(!sgtable_ok(sgt));
457 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
459 start = area->da_start;
460 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
461 size_t bytes;
462 int order;
464 bytes = sg->length;
465 order = get_order(bytes);
467 err = iommu_unmap(domain, start, order);
468 if (err)
469 break;
471 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
472 __func__, start, bytes, area->flags);
474 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
476 total -= bytes;
477 start += bytes;
479 BUG_ON(total);
482 /* template function for all unmapping */
483 static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
484 struct omap_iommu *obj, const u32 da,
485 void (*fn)(const void *), u32 flags)
487 struct sg_table *sgt = NULL;
488 struct iovm_struct *area;
490 if (!IS_ALIGNED(da, PAGE_SIZE)) {
491 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
492 return NULL;
495 mutex_lock(&obj->mmap_lock);
497 area = __find_iovm_area(obj, da);
498 if (!area) {
499 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
500 goto out;
503 if ((area->flags & flags) != flags) {
504 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
505 area->flags);
506 goto out;
508 sgt = (struct sg_table *)area->sgt;
510 unmap_iovm_area(domain, obj, area);
512 fn(area->va);
514 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
515 area->da_start, da, area->da_end,
516 area->da_end - area->da_start, area->flags);
518 free_iovm_area(obj, area);
519 out:
520 mutex_unlock(&obj->mmap_lock);
522 return sgt;
525 static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
526 u32 da, const struct sg_table *sgt, void *va,
527 size_t bytes, u32 flags)
529 int err = -ENOMEM;
530 struct iovm_struct *new;
532 mutex_lock(&obj->mmap_lock);
534 new = alloc_iovm_area(obj, da, bytes, flags);
535 if (IS_ERR(new)) {
536 err = PTR_ERR(new);
537 goto err_alloc_iovma;
539 new->va = va;
540 new->sgt = sgt;
542 if (map_iovm_area(domain, new, sgt, new->flags))
543 goto err_map;
545 mutex_unlock(&obj->mmap_lock);
547 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
548 __func__, new->da_start, bytes, new->flags, va);
550 return new->da_start;
552 err_map:
553 free_iovm_area(obj, new);
554 err_alloc_iovma:
555 mutex_unlock(&obj->mmap_lock);
556 return err;
559 static inline u32
560 __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
561 u32 da, const struct sg_table *sgt,
562 void *va, size_t bytes, u32 flags)
564 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
568 * omap_iommu_vmap - (d)-(p)-(v) address mapper
569 * @obj: objective iommu
570 * @sgt: address of scatter gather table
571 * @flags: iovma and page property
573 * Creates 1-n-1 mapping with given @sgt and returns @da.
574 * All @sgt element must be io page size aligned.
576 u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
577 const struct sg_table *sgt, u32 flags)
579 size_t bytes;
580 void *va = NULL;
582 if (!obj || !obj->dev || !sgt)
583 return -EINVAL;
585 bytes = sgtable_len(sgt);
586 if (!bytes)
587 return -EINVAL;
588 bytes = PAGE_ALIGN(bytes);
590 if (flags & IOVMF_MMIO) {
591 va = vmap_sg(sgt);
592 if (IS_ERR(va))
593 return PTR_ERR(va);
596 flags |= IOVMF_DISCONT;
597 flags |= IOVMF_MMIO;
599 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
600 if (IS_ERR_VALUE(da))
601 vunmap_sg(va);
603 return da;
605 EXPORT_SYMBOL_GPL(omap_iommu_vmap);
608 * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
609 * @obj: objective iommu
610 * @da: iommu device virtual address
612 * Free the iommu virtually contiguous memory area starting at
613 * @da, which was returned by 'omap_iommu_vmap()'.
615 struct sg_table *
616 omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
618 struct sg_table *sgt;
620 * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
621 * Just returns 'sgt' to the caller to free
623 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
624 IOVMF_DISCONT | IOVMF_MMIO);
625 if (!sgt)
626 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
627 return sgt;
629 EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
632 * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
633 * @obj: objective iommu
634 * @da: contiguous iommu virtual memory
635 * @bytes: allocation size
636 * @flags: iovma and page property
638 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
639 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
642 omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
643 size_t bytes, u32 flags)
645 void *va;
646 struct sg_table *sgt;
648 if (!obj || !obj->dev || !bytes)
649 return -EINVAL;
651 bytes = PAGE_ALIGN(bytes);
653 va = vmalloc(bytes);
654 if (!va)
655 return -ENOMEM;
657 flags |= IOVMF_DISCONT;
658 flags |= IOVMF_ALLOC;
660 sgt = sgtable_alloc(bytes, flags, da, 0);
661 if (IS_ERR(sgt)) {
662 da = PTR_ERR(sgt);
663 goto err_sgt_alloc;
665 sgtable_fill_vmalloc(sgt, va);
667 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
668 if (IS_ERR_VALUE(da))
669 goto err_iommu_vmap;
671 return da;
673 err_iommu_vmap:
674 sgtable_drain_vmalloc(sgt);
675 sgtable_free(sgt);
676 err_sgt_alloc:
677 vfree(va);
678 return da;
680 EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
683 * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
684 * @obj: objective iommu
685 * @da: iommu device virtual address
687 * Frees the iommu virtually continuous memory area starting at
688 * @da, as obtained from 'omap_iommu_vmalloc()'.
690 void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
691 const u32 da)
693 struct sg_table *sgt;
695 sgt = unmap_vm_area(domain, obj, da, vfree,
696 IOVMF_DISCONT | IOVMF_ALLOC);
697 if (!sgt)
698 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
699 sgtable_free(sgt);
701 EXPORT_SYMBOL_GPL(omap_iommu_vfree);
703 static int __init iovmm_init(void)
705 const unsigned long flags = SLAB_HWCACHE_ALIGN;
706 struct kmem_cache *p;
708 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
709 flags, NULL);
710 if (!p)
711 return -ENOMEM;
712 iovm_area_cachep = p;
714 return 0;
716 module_init(iovmm_init);
718 static void __exit iovmm_exit(void)
720 kmem_cache_destroy(iovm_area_cachep);
722 module_exit(iovmm_exit);
724 MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
725 MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
726 MODULE_LICENSE("GPL v2");