2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
8 * Dynamic DMA mapping support, bus-independent parts.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitmap.h>
34 #include <linux/iommu-helper.h>
35 #include <linux/crash_dump.h>
36 #include <linux/hash.h>
37 #include <linux/fault-inject.h>
38 #include <linux/pci.h>
39 #include <linux/iommu.h>
40 #include <linux/sched.h>
43 #include <asm/iommu.h>
44 #include <asm/pci-bridge.h>
45 #include <asm/machdep.h>
46 #include <asm/kdump.h>
47 #include <asm/fadump.h>
55 static void __iommu_free(struct iommu_table
*, dma_addr_t
, unsigned int);
57 static int __init
setup_iommu(char *str
)
59 if (!strcmp(str
, "novmerge"))
61 else if (!strcmp(str
, "vmerge"))
66 __setup("iommu=", setup_iommu
);
68 static DEFINE_PER_CPU(unsigned int, iommu_pool_hash
);
71 * We precalculate the hash to avoid doing it on every allocation.
73 * The hash is important to spread CPUs across all the pools. For example,
74 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
75 * with 4 pools all primary threads would map to the same pool.
77 static int __init
setup_iommu_pool_hash(void)
81 for_each_possible_cpu(i
)
82 per_cpu(iommu_pool_hash
, i
) = hash_32(i
, IOMMU_POOL_HASHBITS
);
86 subsys_initcall(setup_iommu_pool_hash
);
88 #ifdef CONFIG_FAIL_IOMMU
90 static DECLARE_FAULT_ATTR(fail_iommu
);
92 static int __init
setup_fail_iommu(char *str
)
94 return setup_fault_attr(&fail_iommu
, str
);
96 __setup("fail_iommu=", setup_fail_iommu
);
98 static bool should_fail_iommu(struct device
*dev
)
100 return dev
->archdata
.fail_iommu
&& should_fail(&fail_iommu
, 1);
103 static int __init
fail_iommu_debugfs(void)
105 struct dentry
*dir
= fault_create_debugfs_attr("fail_iommu",
108 return PTR_ERR_OR_ZERO(dir
);
110 late_initcall(fail_iommu_debugfs
);
112 static ssize_t
fail_iommu_show(struct device
*dev
,
113 struct device_attribute
*attr
, char *buf
)
115 return sprintf(buf
, "%d\n", dev
->archdata
.fail_iommu
);
118 static ssize_t
fail_iommu_store(struct device
*dev
,
119 struct device_attribute
*attr
, const char *buf
,
124 if (count
> 0 && sscanf(buf
, "%d", &i
) > 0)
125 dev
->archdata
.fail_iommu
= (i
== 0) ? 0 : 1;
130 static DEVICE_ATTR(fail_iommu
, S_IRUGO
|S_IWUSR
, fail_iommu_show
,
133 static int fail_iommu_bus_notify(struct notifier_block
*nb
,
134 unsigned long action
, void *data
)
136 struct device
*dev
= data
;
138 if (action
== BUS_NOTIFY_ADD_DEVICE
) {
139 if (device_create_file(dev
, &dev_attr_fail_iommu
))
140 pr_warn("Unable to create IOMMU fault injection sysfs "
142 } else if (action
== BUS_NOTIFY_DEL_DEVICE
) {
143 device_remove_file(dev
, &dev_attr_fail_iommu
);
149 static struct notifier_block fail_iommu_bus_notifier
= {
150 .notifier_call
= fail_iommu_bus_notify
153 static int __init
fail_iommu_setup(void)
156 bus_register_notifier(&pci_bus_type
, &fail_iommu_bus_notifier
);
159 bus_register_notifier(&vio_bus_type
, &fail_iommu_bus_notifier
);
165 * Must execute after PCI and VIO subsystem have initialised but before
166 * devices are probed.
168 arch_initcall(fail_iommu_setup
);
170 static inline bool should_fail_iommu(struct device
*dev
)
176 static unsigned long iommu_range_alloc(struct device
*dev
,
177 struct iommu_table
*tbl
,
178 unsigned long npages
,
179 unsigned long *handle
,
181 unsigned int align_order
)
183 unsigned long n
, end
, start
;
185 int largealloc
= npages
> 15;
187 unsigned long align_mask
;
188 unsigned long boundary_size
;
190 unsigned int pool_nr
;
191 struct iommu_pool
*pool
;
193 align_mask
= 0xffffffffffffffffl
>> (64 - align_order
);
195 /* This allocator was derived from x86_64's bit string search */
198 if (unlikely(npages
== 0)) {
199 if (printk_ratelimit())
201 return DMA_ERROR_CODE
;
204 if (should_fail_iommu(dev
))
205 return DMA_ERROR_CODE
;
208 * We don't need to disable preemption here because any CPU can
209 * safely use any IOMMU pool.
211 pool_nr
= __raw_get_cpu_var(iommu_pool_hash
) & (tbl
->nr_pools
- 1);
214 pool
= &(tbl
->large_pool
);
216 pool
= &(tbl
->pools
[pool_nr
]);
218 spin_lock_irqsave(&(pool
->lock
), flags
);
221 if ((pass
== 0) && handle
&& *handle
&&
222 (*handle
>= pool
->start
) && (*handle
< pool
->end
))
229 /* The case below can happen if we have a small segment appended
230 * to a large, or when the previous alloc was at the very end of
231 * the available space. If so, go back to the initial start.
236 if (limit
+ tbl
->it_offset
> mask
) {
237 limit
= mask
- tbl
->it_offset
+ 1;
238 /* If we're constrained on address range, first try
239 * at the masked hint to avoid O(n) search complexity,
240 * but on second pass, start at 0 in pool 0.
242 if ((start
& mask
) >= limit
|| pass
> 0) {
243 spin_unlock(&(pool
->lock
));
244 pool
= &(tbl
->pools
[0]);
245 spin_lock(&(pool
->lock
));
253 boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
254 1 << IOMMU_PAGE_SHIFT
);
256 boundary_size
= ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT
);
257 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
259 n
= iommu_area_alloc(tbl
->it_map
, limit
, start
, npages
,
260 tbl
->it_offset
, boundary_size
>> IOMMU_PAGE_SHIFT
,
263 if (likely(pass
== 0)) {
264 /* First try the pool from the start */
265 pool
->hint
= pool
->start
;
269 } else if (pass
<= tbl
->nr_pools
) {
270 /* Now try scanning all the other pools */
271 spin_unlock(&(pool
->lock
));
272 pool_nr
= (pool_nr
+ 1) & (tbl
->nr_pools
- 1);
273 pool
= &tbl
->pools
[pool_nr
];
274 spin_lock(&(pool
->lock
));
275 pool
->hint
= pool
->start
;
281 spin_unlock_irqrestore(&(pool
->lock
), flags
);
282 return DMA_ERROR_CODE
;
288 /* Bump the hint to a new block for small allocs. */
290 /* Don't bump to new block to avoid fragmentation */
293 /* Overflow will be taken care of at the next allocation */
294 pool
->hint
= (end
+ tbl
->it_blocksize
- 1) &
295 ~(tbl
->it_blocksize
- 1);
298 /* Update handle for SG allocations */
302 spin_unlock_irqrestore(&(pool
->lock
), flags
);
307 static dma_addr_t
iommu_alloc(struct device
*dev
, struct iommu_table
*tbl
,
308 void *page
, unsigned int npages
,
309 enum dma_data_direction direction
,
310 unsigned long mask
, unsigned int align_order
,
311 struct dma_attrs
*attrs
)
314 dma_addr_t ret
= DMA_ERROR_CODE
;
317 entry
= iommu_range_alloc(dev
, tbl
, npages
, NULL
, mask
, align_order
);
319 if (unlikely(entry
== DMA_ERROR_CODE
))
320 return DMA_ERROR_CODE
;
322 entry
+= tbl
->it_offset
; /* Offset into real TCE table */
323 ret
= entry
<< IOMMU_PAGE_SHIFT
; /* Set the return dma address */
325 /* Put the TCEs in the HW table */
326 build_fail
= ppc_md
.tce_build(tbl
, entry
, npages
,
327 (unsigned long)page
& IOMMU_PAGE_MASK
,
330 /* ppc_md.tce_build() only returns non-zero for transient errors.
331 * Clean up the table bitmap in this case and return
332 * DMA_ERROR_CODE. For all other errors the functionality is
335 if (unlikely(build_fail
)) {
336 __iommu_free(tbl
, ret
, npages
);
337 return DMA_ERROR_CODE
;
340 /* Flush/invalidate TLB caches if necessary */
341 if (ppc_md
.tce_flush
)
342 ppc_md
.tce_flush(tbl
);
344 /* Make sure updates are seen by hardware */
350 static bool iommu_free_check(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
353 unsigned long entry
, free_entry
;
355 entry
= dma_addr
>> IOMMU_PAGE_SHIFT
;
356 free_entry
= entry
- tbl
->it_offset
;
358 if (((free_entry
+ npages
) > tbl
->it_size
) ||
359 (entry
< tbl
->it_offset
)) {
360 if (printk_ratelimit()) {
361 printk(KERN_INFO
"iommu_free: invalid entry\n");
362 printk(KERN_INFO
"\tentry = 0x%lx\n", entry
);
363 printk(KERN_INFO
"\tdma_addr = 0x%llx\n", (u64
)dma_addr
);
364 printk(KERN_INFO
"\tTable = 0x%llx\n", (u64
)tbl
);
365 printk(KERN_INFO
"\tbus# = 0x%llx\n", (u64
)tbl
->it_busno
);
366 printk(KERN_INFO
"\tsize = 0x%llx\n", (u64
)tbl
->it_size
);
367 printk(KERN_INFO
"\tstartOff = 0x%llx\n", (u64
)tbl
->it_offset
);
368 printk(KERN_INFO
"\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
378 static struct iommu_pool
*get_pool(struct iommu_table
*tbl
,
381 struct iommu_pool
*p
;
382 unsigned long largepool_start
= tbl
->large_pool
.start
;
384 /* The large pool is the last pool at the top of the table */
385 if (entry
>= largepool_start
) {
386 p
= &tbl
->large_pool
;
388 unsigned int pool_nr
= entry
/ tbl
->poolsize
;
390 BUG_ON(pool_nr
> tbl
->nr_pools
);
391 p
= &tbl
->pools
[pool_nr
];
397 static void __iommu_free(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
400 unsigned long entry
, free_entry
;
402 struct iommu_pool
*pool
;
404 entry
= dma_addr
>> IOMMU_PAGE_SHIFT
;
405 free_entry
= entry
- tbl
->it_offset
;
407 pool
= get_pool(tbl
, free_entry
);
409 if (!iommu_free_check(tbl
, dma_addr
, npages
))
412 ppc_md
.tce_free(tbl
, entry
, npages
);
414 spin_lock_irqsave(&(pool
->lock
), flags
);
415 bitmap_clear(tbl
->it_map
, free_entry
, npages
);
416 spin_unlock_irqrestore(&(pool
->lock
), flags
);
419 static void iommu_free(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
422 __iommu_free(tbl
, dma_addr
, npages
);
424 /* Make sure TLB cache is flushed if the HW needs it. We do
425 * not do an mb() here on purpose, it is not needed on any of
426 * the current platforms.
428 if (ppc_md
.tce_flush
)
429 ppc_md
.tce_flush(tbl
);
432 int iommu_map_sg(struct device
*dev
, struct iommu_table
*tbl
,
433 struct scatterlist
*sglist
, int nelems
,
434 unsigned long mask
, enum dma_data_direction direction
,
435 struct dma_attrs
*attrs
)
437 dma_addr_t dma_next
= 0, dma_addr
;
438 struct scatterlist
*s
, *outs
, *segstart
;
439 int outcount
, incount
, i
, build_fail
= 0;
441 unsigned long handle
;
442 unsigned int max_seg_size
;
444 BUG_ON(direction
== DMA_NONE
);
446 if ((nelems
== 0) || !tbl
)
449 outs
= s
= segstart
= &sglist
[0];
454 /* Init first segment length for backout at failure */
455 outs
->dma_length
= 0;
457 DBG("sg mapping %d elements:\n", nelems
);
459 max_seg_size
= dma_get_max_seg_size(dev
);
460 for_each_sg(sglist
, s
, nelems
, i
) {
461 unsigned long vaddr
, npages
, entry
, slen
;
469 /* Allocate iommu entries for that segment */
470 vaddr
= (unsigned long) sg_virt(s
);
471 npages
= iommu_num_pages(vaddr
, slen
, IOMMU_PAGE_SIZE
);
473 if (IOMMU_PAGE_SHIFT
< PAGE_SHIFT
&& slen
>= PAGE_SIZE
&&
474 (vaddr
& ~PAGE_MASK
) == 0)
475 align
= PAGE_SHIFT
- IOMMU_PAGE_SHIFT
;
476 entry
= iommu_range_alloc(dev
, tbl
, npages
, &handle
,
477 mask
>> IOMMU_PAGE_SHIFT
, align
);
479 DBG(" - vaddr: %lx, size: %lx\n", vaddr
, slen
);
482 if (unlikely(entry
== DMA_ERROR_CODE
)) {
483 if (printk_ratelimit())
484 dev_info(dev
, "iommu_alloc failed, tbl %p "
485 "vaddr %lx npages %lu\n", tbl
, vaddr
,
490 /* Convert entry to a dma_addr_t */
491 entry
+= tbl
->it_offset
;
492 dma_addr
= entry
<< IOMMU_PAGE_SHIFT
;
493 dma_addr
|= (s
->offset
& ~IOMMU_PAGE_MASK
);
495 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
496 npages
, entry
, dma_addr
);
498 /* Insert into HW table */
499 build_fail
= ppc_md
.tce_build(tbl
, entry
, npages
,
500 vaddr
& IOMMU_PAGE_MASK
,
502 if(unlikely(build_fail
))
505 /* If we are in an open segment, try merging */
507 DBG(" - trying merge...\n");
508 /* We cannot merge if:
509 * - allocated dma_addr isn't contiguous to previous allocation
511 if (novmerge
|| (dma_addr
!= dma_next
) ||
512 (outs
->dma_length
+ s
->length
> max_seg_size
)) {
513 /* Can't merge: create a new segment */
516 outs
= sg_next(outs
);
517 DBG(" can't merge, new segment.\n");
519 outs
->dma_length
+= s
->length
;
520 DBG(" merged, new len: %ux\n", outs
->dma_length
);
525 /* This is a new segment, fill entries */
526 DBG(" - filling new segment.\n");
527 outs
->dma_address
= dma_addr
;
528 outs
->dma_length
= slen
;
531 /* Calculate next page pointer for contiguous check */
532 dma_next
= dma_addr
+ slen
;
534 DBG(" - dma next is: %lx\n", dma_next
);
537 /* Flush/invalidate TLB caches if necessary */
538 if (ppc_md
.tce_flush
)
539 ppc_md
.tce_flush(tbl
);
541 DBG("mapped %d elements:\n", outcount
);
543 /* For the sake of iommu_unmap_sg, we clear out the length in the
544 * next entry of the sglist if we didn't fill the list completely
546 if (outcount
< incount
) {
547 outs
= sg_next(outs
);
548 outs
->dma_address
= DMA_ERROR_CODE
;
549 outs
->dma_length
= 0;
552 /* Make sure updates are seen by hardware */
558 for_each_sg(sglist
, s
, nelems
, i
) {
559 if (s
->dma_length
!= 0) {
560 unsigned long vaddr
, npages
;
562 vaddr
= s
->dma_address
& IOMMU_PAGE_MASK
;
563 npages
= iommu_num_pages(s
->dma_address
, s
->dma_length
,
565 __iommu_free(tbl
, vaddr
, npages
);
566 s
->dma_address
= DMA_ERROR_CODE
;
576 void iommu_unmap_sg(struct iommu_table
*tbl
, struct scatterlist
*sglist
,
577 int nelems
, enum dma_data_direction direction
,
578 struct dma_attrs
*attrs
)
580 struct scatterlist
*sg
;
582 BUG_ON(direction
== DMA_NONE
);
590 dma_addr_t dma_handle
= sg
->dma_address
;
592 if (sg
->dma_length
== 0)
594 npages
= iommu_num_pages(dma_handle
, sg
->dma_length
,
596 __iommu_free(tbl
, dma_handle
, npages
);
600 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
601 * do not do an mb() here, the affected platforms do not need it
604 if (ppc_md
.tce_flush
)
605 ppc_md
.tce_flush(tbl
);
608 static void iommu_table_clear(struct iommu_table
*tbl
)
611 * In case of firmware assisted dump system goes through clean
612 * reboot process at the time of system crash. Hence it's safe to
613 * clear the TCE entries if firmware assisted dump is active.
615 if (!is_kdump_kernel() || is_fadump_active()) {
616 /* Clear the table in case firmware left allocations in it */
617 ppc_md
.tce_free(tbl
, tbl
->it_offset
, tbl
->it_size
);
621 #ifdef CONFIG_CRASH_DUMP
622 if (ppc_md
.tce_get
) {
623 unsigned long index
, tceval
, tcecount
= 0;
625 /* Reserve the existing mappings left by the first kernel. */
626 for (index
= 0; index
< tbl
->it_size
; index
++) {
627 tceval
= ppc_md
.tce_get(tbl
, index
+ tbl
->it_offset
);
629 * Freed TCE entry contains 0x7fffffffffffffff on JS20
631 if (tceval
&& (tceval
!= 0x7fffffffffffffffUL
)) {
632 __set_bit(index
, tbl
->it_map
);
637 if ((tbl
->it_size
- tcecount
) < KDUMP_MIN_TCE_ENTRIES
) {
638 printk(KERN_WARNING
"TCE table is full; freeing ");
639 printk(KERN_WARNING
"%d entries for the kdump boot\n",
640 KDUMP_MIN_TCE_ENTRIES
);
641 for (index
= tbl
->it_size
- KDUMP_MIN_TCE_ENTRIES
;
642 index
< tbl
->it_size
; index
++)
643 __clear_bit(index
, tbl
->it_map
);
650 * Build a iommu_table structure. This contains a bit map which
651 * is used to manage allocation of the tce space.
653 struct iommu_table
*iommu_init_table(struct iommu_table
*tbl
, int nid
)
656 static int welcomed
= 0;
659 struct iommu_pool
*p
;
661 /* number of bytes needed for the bitmap */
662 sz
= BITS_TO_LONGS(tbl
->it_size
) * sizeof(unsigned long);
664 page
= alloc_pages_node(nid
, GFP_KERNEL
, get_order(sz
));
666 panic("iommu_init_table: Can't allocate %ld bytes\n", sz
);
667 tbl
->it_map
= page_address(page
);
668 memset(tbl
->it_map
, 0, sz
);
671 * Reserve page 0 so it will not be used for any mappings.
672 * This avoids buggy drivers that consider page 0 to be invalid
673 * to crash the machine or even lose data.
675 if (tbl
->it_offset
== 0)
676 set_bit(0, tbl
->it_map
);
678 /* We only split the IOMMU table if we have 1GB or more of space */
679 if ((tbl
->it_size
<< IOMMU_PAGE_SHIFT
) >= (1UL * 1024 * 1024 * 1024))
680 tbl
->nr_pools
= IOMMU_NR_POOLS
;
684 /* We reserve the top 1/4 of the table for large allocations */
685 tbl
->poolsize
= (tbl
->it_size
* 3 / 4) / tbl
->nr_pools
;
687 for (i
= 0; i
< tbl
->nr_pools
; i
++) {
689 spin_lock_init(&(p
->lock
));
690 p
->start
= tbl
->poolsize
* i
;
692 p
->end
= p
->start
+ tbl
->poolsize
;
695 p
= &tbl
->large_pool
;
696 spin_lock_init(&(p
->lock
));
697 p
->start
= tbl
->poolsize
* i
;
699 p
->end
= tbl
->it_size
;
701 iommu_table_clear(tbl
);
704 printk(KERN_INFO
"IOMMU table initialized, virtual merging %s\n",
705 novmerge
? "disabled" : "enabled");
712 void iommu_free_table(struct iommu_table
*tbl
, const char *node_name
)
714 unsigned long bitmap_sz
;
717 if (!tbl
|| !tbl
->it_map
) {
718 printk(KERN_ERR
"%s: expected TCE map for %s\n", __func__
,
724 * In case we have reserved the first bit, we should not emit
727 if (tbl
->it_offset
== 0)
728 clear_bit(0, tbl
->it_map
);
730 #ifdef CONFIG_IOMMU_API
732 iommu_group_put(tbl
->it_group
);
733 BUG_ON(tbl
->it_group
);
737 /* verify that table contains no entries */
738 if (!bitmap_empty(tbl
->it_map
, tbl
->it_size
))
739 pr_warn("%s: Unexpected TCEs for %s\n", __func__
, node_name
);
741 /* calculate bitmap size in bytes */
742 bitmap_sz
= BITS_TO_LONGS(tbl
->it_size
) * sizeof(unsigned long);
745 order
= get_order(bitmap_sz
);
746 free_pages((unsigned long) tbl
->it_map
, order
);
752 /* Creates TCEs for a user provided buffer. The user buffer must be
753 * contiguous real kernel storage (not vmalloc). The address passed here
754 * comprises a page address and offset into that page. The dma_addr_t
755 * returned will point to the same byte within the page as was passed in.
757 dma_addr_t
iommu_map_page(struct device
*dev
, struct iommu_table
*tbl
,
758 struct page
*page
, unsigned long offset
, size_t size
,
759 unsigned long mask
, enum dma_data_direction direction
,
760 struct dma_attrs
*attrs
)
762 dma_addr_t dma_handle
= DMA_ERROR_CODE
;
765 unsigned int npages
, align
;
767 BUG_ON(direction
== DMA_NONE
);
769 vaddr
= page_address(page
) + offset
;
770 uaddr
= (unsigned long)vaddr
;
771 npages
= iommu_num_pages(uaddr
, size
, IOMMU_PAGE_SIZE
);
775 if (IOMMU_PAGE_SHIFT
< PAGE_SHIFT
&& size
>= PAGE_SIZE
&&
776 ((unsigned long)vaddr
& ~PAGE_MASK
) == 0)
777 align
= PAGE_SHIFT
- IOMMU_PAGE_SHIFT
;
779 dma_handle
= iommu_alloc(dev
, tbl
, vaddr
, npages
, direction
,
780 mask
>> IOMMU_PAGE_SHIFT
, align
,
782 if (dma_handle
== DMA_ERROR_CODE
) {
783 if (printk_ratelimit()) {
784 dev_info(dev
, "iommu_alloc failed, tbl %p "
785 "vaddr %p npages %d\n", tbl
, vaddr
,
789 dma_handle
|= (uaddr
& ~IOMMU_PAGE_MASK
);
795 void iommu_unmap_page(struct iommu_table
*tbl
, dma_addr_t dma_handle
,
796 size_t size
, enum dma_data_direction direction
,
797 struct dma_attrs
*attrs
)
801 BUG_ON(direction
== DMA_NONE
);
804 npages
= iommu_num_pages(dma_handle
, size
, IOMMU_PAGE_SIZE
);
805 iommu_free(tbl
, dma_handle
, npages
);
809 /* Allocates a contiguous real buffer and creates mappings over it.
810 * Returns the virtual address of the buffer and sets dma_handle
811 * to the dma address (mapping) of the first page.
813 void *iommu_alloc_coherent(struct device
*dev
, struct iommu_table
*tbl
,
814 size_t size
, dma_addr_t
*dma_handle
,
815 unsigned long mask
, gfp_t flag
, int node
)
820 unsigned int nio_pages
, io_order
;
823 size
= PAGE_ALIGN(size
);
824 order
= get_order(size
);
827 * Client asked for way too much space. This is checked later
828 * anyway. It is easier to debug here for the drivers than in
831 if (order
>= IOMAP_MAX_ORDER
) {
832 dev_info(dev
, "iommu_alloc_consistent size too large: 0x%lx\n",
840 /* Alloc enough pages (and possibly more) */
841 page
= alloc_pages_node(node
, flag
, order
);
844 ret
= page_address(page
);
845 memset(ret
, 0, size
);
847 /* Set up tces to cover the allocated range */
848 nio_pages
= size
>> IOMMU_PAGE_SHIFT
;
849 io_order
= get_iommu_order(size
);
850 mapping
= iommu_alloc(dev
, tbl
, ret
, nio_pages
, DMA_BIDIRECTIONAL
,
851 mask
>> IOMMU_PAGE_SHIFT
, io_order
, NULL
);
852 if (mapping
== DMA_ERROR_CODE
) {
853 free_pages((unsigned long)ret
, order
);
856 *dma_handle
= mapping
;
860 void iommu_free_coherent(struct iommu_table
*tbl
, size_t size
,
861 void *vaddr
, dma_addr_t dma_handle
)
864 unsigned int nio_pages
;
866 size
= PAGE_ALIGN(size
);
867 nio_pages
= size
>> IOMMU_PAGE_SHIFT
;
868 iommu_free(tbl
, dma_handle
, nio_pages
);
869 size
= PAGE_ALIGN(size
);
870 free_pages((unsigned long)vaddr
, get_order(size
));
874 #ifdef CONFIG_IOMMU_API
878 static void group_release(void *iommu_data
)
880 struct iommu_table
*tbl
= iommu_data
;
881 tbl
->it_group
= NULL
;
884 void iommu_register_group(struct iommu_table
*tbl
,
885 int pci_domain_number
, unsigned long pe_num
)
887 struct iommu_group
*grp
;
890 grp
= iommu_group_alloc();
892 pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
897 iommu_group_set_iommudata(grp
, tbl
, group_release
);
898 name
= kasprintf(GFP_KERNEL
, "domain%d-pe%lx",
899 pci_domain_number
, pe_num
);
902 iommu_group_set_name(grp
, name
);
906 enum dma_data_direction
iommu_tce_direction(unsigned long tce
)
908 if ((tce
& TCE_PCI_READ
) && (tce
& TCE_PCI_WRITE
))
909 return DMA_BIDIRECTIONAL
;
910 else if (tce
& TCE_PCI_READ
)
911 return DMA_TO_DEVICE
;
912 else if (tce
& TCE_PCI_WRITE
)
913 return DMA_FROM_DEVICE
;
917 EXPORT_SYMBOL_GPL(iommu_tce_direction
);
919 void iommu_flush_tce(struct iommu_table
*tbl
)
921 /* Flush/invalidate TLB caches if necessary */
922 if (ppc_md
.tce_flush
)
923 ppc_md
.tce_flush(tbl
);
925 /* Make sure updates are seen by hardware */
928 EXPORT_SYMBOL_GPL(iommu_flush_tce
);
930 int iommu_tce_clear_param_check(struct iommu_table
*tbl
,
931 unsigned long ioba
, unsigned long tce_value
,
932 unsigned long npages
)
934 /* ppc_md.tce_free() does not support any value but 0 */
938 if (ioba
& ~IOMMU_PAGE_MASK
)
941 ioba
>>= IOMMU_PAGE_SHIFT
;
942 if (ioba
< tbl
->it_offset
)
945 if ((ioba
+ npages
) > (tbl
->it_offset
+ tbl
->it_size
))
950 EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check
);
952 int iommu_tce_put_param_check(struct iommu_table
*tbl
,
953 unsigned long ioba
, unsigned long tce
)
955 if (!(tce
& (TCE_PCI_WRITE
| TCE_PCI_READ
)))
958 if (tce
& ~(IOMMU_PAGE_MASK
| TCE_PCI_WRITE
| TCE_PCI_READ
))
961 if (ioba
& ~IOMMU_PAGE_MASK
)
964 ioba
>>= IOMMU_PAGE_SHIFT
;
965 if (ioba
< tbl
->it_offset
)
968 if ((ioba
+ 1) > (tbl
->it_offset
+ tbl
->it_size
))
973 EXPORT_SYMBOL_GPL(iommu_tce_put_param_check
);
975 unsigned long iommu_clear_tce(struct iommu_table
*tbl
, unsigned long entry
)
977 unsigned long oldtce
;
978 struct iommu_pool
*pool
= get_pool(tbl
, entry
);
980 spin_lock(&(pool
->lock
));
982 oldtce
= ppc_md
.tce_get(tbl
, entry
);
983 if (oldtce
& (TCE_PCI_WRITE
| TCE_PCI_READ
))
984 ppc_md
.tce_free(tbl
, entry
, 1);
988 spin_unlock(&(pool
->lock
));
992 EXPORT_SYMBOL_GPL(iommu_clear_tce
);
994 int iommu_clear_tces_and_put_pages(struct iommu_table
*tbl
,
995 unsigned long entry
, unsigned long pages
)
997 unsigned long oldtce
;
1000 for ( ; pages
; --pages
, ++entry
) {
1001 oldtce
= iommu_clear_tce(tbl
, entry
);
1005 page
= pfn_to_page(oldtce
>> PAGE_SHIFT
);
1008 if (oldtce
& TCE_PCI_WRITE
)
1016 EXPORT_SYMBOL_GPL(iommu_clear_tces_and_put_pages
);
1019 * hwaddr is a kernel virtual address here (0xc... bazillion),
1020 * tce_build converts it to a physical address.
1022 int iommu_tce_build(struct iommu_table
*tbl
, unsigned long entry
,
1023 unsigned long hwaddr
, enum dma_data_direction direction
)
1026 unsigned long oldtce
;
1027 struct iommu_pool
*pool
= get_pool(tbl
, entry
);
1029 spin_lock(&(pool
->lock
));
1031 oldtce
= ppc_md
.tce_get(tbl
, entry
);
1032 /* Add new entry if it is not busy */
1033 if (!(oldtce
& (TCE_PCI_WRITE
| TCE_PCI_READ
)))
1034 ret
= ppc_md
.tce_build(tbl
, entry
, 1, hwaddr
, direction
, NULL
);
1036 spin_unlock(&(pool
->lock
));
1038 /* if (unlikely(ret))
1039 pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
1040 __func__, hwaddr, entry << IOMMU_PAGE_SHIFT,
1045 EXPORT_SYMBOL_GPL(iommu_tce_build
);
1047 int iommu_put_tce_user_mode(struct iommu_table
*tbl
, unsigned long entry
,
1051 struct page
*page
= NULL
;
1052 unsigned long hwaddr
, offset
= tce
& IOMMU_PAGE_MASK
& ~PAGE_MASK
;
1053 enum dma_data_direction direction
= iommu_tce_direction(tce
);
1055 ret
= get_user_pages_fast(tce
& PAGE_MASK
, 1,
1056 direction
!= DMA_TO_DEVICE
, &page
);
1057 if (unlikely(ret
!= 1)) {
1058 /* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n",
1059 tce, entry << IOMMU_PAGE_SHIFT, ret); */
1062 hwaddr
= (unsigned long) page_address(page
) + offset
;
1064 ret
= iommu_tce_build(tbl
, entry
, hwaddr
, direction
);
1069 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n",
1070 __func__
, entry
<< IOMMU_PAGE_SHIFT
, tce
, ret
);
1074 EXPORT_SYMBOL_GPL(iommu_put_tce_user_mode
);
1076 int iommu_take_ownership(struct iommu_table
*tbl
)
1078 unsigned long sz
= (tbl
->it_size
+ 7) >> 3;
1080 if (tbl
->it_offset
== 0)
1081 clear_bit(0, tbl
->it_map
);
1083 if (!bitmap_empty(tbl
->it_map
, tbl
->it_size
)) {
1084 pr_err("iommu_tce: it_map is not empty");
1088 memset(tbl
->it_map
, 0xff, sz
);
1089 iommu_clear_tces_and_put_pages(tbl
, tbl
->it_offset
, tbl
->it_size
);
1093 EXPORT_SYMBOL_GPL(iommu_take_ownership
);
1095 void iommu_release_ownership(struct iommu_table
*tbl
)
1097 unsigned long sz
= (tbl
->it_size
+ 7) >> 3;
1099 iommu_clear_tces_and_put_pages(tbl
, tbl
->it_offset
, tbl
->it_size
);
1100 memset(tbl
->it_map
, 0, sz
);
1102 /* Restore bit#0 set by iommu_init_table() */
1103 if (tbl
->it_offset
== 0)
1104 set_bit(0, tbl
->it_map
);
1106 EXPORT_SYMBOL_GPL(iommu_release_ownership
);
1108 static int iommu_add_device(struct device
*dev
)
1110 struct iommu_table
*tbl
;
1113 if (WARN_ON(dev
->iommu_group
)) {
1114 pr_warn("iommu_tce: device %s is already in iommu group %d, skipping\n",
1116 iommu_group_id(dev
->iommu_group
));
1120 tbl
= get_iommu_table_base(dev
);
1121 if (!tbl
|| !tbl
->it_group
) {
1122 pr_debug("iommu_tce: skipping device %s with no tbl\n",
1127 pr_debug("iommu_tce: adding %s to iommu group %d\n",
1128 dev_name(dev
), iommu_group_id(tbl
->it_group
));
1130 ret
= iommu_group_add_device(tbl
->it_group
, dev
);
1132 pr_err("iommu_tce: %s has not been added, ret=%d\n",
1133 dev_name(dev
), ret
);
1138 static void iommu_del_device(struct device
*dev
)
1140 iommu_group_remove_device(dev
);
1143 static int iommu_bus_notifier(struct notifier_block
*nb
,
1144 unsigned long action
, void *data
)
1146 struct device
*dev
= data
;
1149 case BUS_NOTIFY_ADD_DEVICE
:
1150 return iommu_add_device(dev
);
1151 case BUS_NOTIFY_DEL_DEVICE
:
1152 iommu_del_device(dev
);
1159 static struct notifier_block tce_iommu_bus_nb
= {
1160 .notifier_call
= iommu_bus_notifier
,
1163 static int __init
tce_iommu_init(void)
1165 struct pci_dev
*pdev
= NULL
;
1167 BUILD_BUG_ON(PAGE_SIZE
< IOMMU_PAGE_SIZE
);
1169 for_each_pci_dev(pdev
)
1170 iommu_add_device(&pdev
->dev
);
1172 bus_register_notifier(&pci_bus_type
, &tce_iommu_bus_nb
);
1176 subsys_initcall_sync(tce_iommu_init
);
1180 void iommu_register_group(struct iommu_table
*tbl
,
1181 int pci_domain_number
, unsigned long pe_num
)
1185 #endif /* CONFIG_IOMMU_API */