2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
8 * Dynamic DMA mapping support, bus-independent parts.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/iommu-helper.h>
37 #include <asm/iommu.h>
38 #include <asm/pci-bridge.h>
39 #include <asm/machdep.h>
40 #include <asm/kdump.h>
44 #ifdef CONFIG_IOMMU_VMERGE
45 static int novmerge
= 0;
47 static int novmerge
= 1;
50 static int protect4gb
= 1;
52 static inline unsigned long iommu_num_pages(unsigned long vaddr
,
57 npages
= IOMMU_PAGE_ALIGN(vaddr
+ slen
) - (vaddr
& IOMMU_PAGE_MASK
);
58 npages
>>= IOMMU_PAGE_SHIFT
;
63 static int __init
setup_protect4gb(char *str
)
65 if (strcmp(str
, "on") == 0)
67 else if (strcmp(str
, "off") == 0)
73 static int __init
setup_iommu(char *str
)
75 if (!strcmp(str
, "novmerge"))
77 else if (!strcmp(str
, "vmerge"))
82 __setup("protect4gb=", setup_protect4gb
);
83 __setup("iommu=", setup_iommu
);
85 static unsigned long iommu_range_alloc(struct device
*dev
,
86 struct iommu_table
*tbl
,
88 unsigned long *handle
,
90 unsigned int align_order
)
92 unsigned long n
, end
, start
;
94 int largealloc
= npages
> 15;
96 unsigned long align_mask
;
97 unsigned long boundary_size
;
99 align_mask
= 0xffffffffffffffffl
>> (64 - align_order
);
101 /* This allocator was derived from x86_64's bit string search */
104 if (unlikely(npages
== 0)) {
105 if (printk_ratelimit())
107 return DMA_ERROR_CODE
;
110 if (handle
&& *handle
)
113 start
= largealloc
? tbl
->it_largehint
: tbl
->it_hint
;
115 /* Use only half of the table for small allocs (15 pages or less) */
116 limit
= largealloc
? tbl
->it_size
: tbl
->it_halfpoint
;
118 if (largealloc
&& start
< tbl
->it_halfpoint
)
119 start
= tbl
->it_halfpoint
;
121 /* The case below can happen if we have a small segment appended
122 * to a large, or when the previous alloc was at the very end of
123 * the available space. If so, go back to the initial start.
126 start
= largealloc
? tbl
->it_largehint
: tbl
->it_hint
;
130 if (limit
+ tbl
->it_offset
> mask
) {
131 limit
= mask
- tbl
->it_offset
+ 1;
132 /* If we're constrained on address range, first try
133 * at the masked hint to avoid O(n) search complexity,
134 * but on second pass, start at 0.
136 if ((start
& mask
) >= limit
|| pass
> 0)
143 boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
144 1 << IOMMU_PAGE_SHIFT
);
146 boundary_size
= ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT
);
147 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
149 n
= iommu_area_alloc(tbl
->it_map
, limit
, start
, npages
,
150 tbl
->it_offset
, boundary_size
>> IOMMU_PAGE_SHIFT
,
153 if (likely(pass
< 2)) {
154 /* First failure, just rescan the half of the table.
155 * Second failure, rescan the other half of the table.
157 start
= (largealloc
^ pass
) ? tbl
->it_halfpoint
: 0;
158 limit
= pass
? tbl
->it_size
: limit
;
162 /* Third failure, give up */
163 return DMA_ERROR_CODE
;
169 /* Bump the hint to a new block for small allocs. */
171 /* Don't bump to new block to avoid fragmentation */
172 tbl
->it_largehint
= end
;
174 /* Overflow will be taken care of at the next allocation */
175 tbl
->it_hint
= (end
+ tbl
->it_blocksize
- 1) &
176 ~(tbl
->it_blocksize
- 1);
179 /* Update handle for SG allocations */
186 static dma_addr_t
iommu_alloc(struct device
*dev
, struct iommu_table
*tbl
,
187 void *page
, unsigned int npages
,
188 enum dma_data_direction direction
,
189 unsigned long mask
, unsigned int align_order
)
191 unsigned long entry
, flags
;
192 dma_addr_t ret
= DMA_ERROR_CODE
;
194 spin_lock_irqsave(&(tbl
->it_lock
), flags
);
196 entry
= iommu_range_alloc(dev
, tbl
, npages
, NULL
, mask
, align_order
);
198 if (unlikely(entry
== DMA_ERROR_CODE
)) {
199 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
200 return DMA_ERROR_CODE
;
203 entry
+= tbl
->it_offset
; /* Offset into real TCE table */
204 ret
= entry
<< IOMMU_PAGE_SHIFT
; /* Set the return dma address */
206 /* Put the TCEs in the HW table */
207 ppc_md
.tce_build(tbl
, entry
, npages
, (unsigned long)page
& IOMMU_PAGE_MASK
,
211 /* Flush/invalidate TLB caches if necessary */
212 if (ppc_md
.tce_flush
)
213 ppc_md
.tce_flush(tbl
);
215 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
217 /* Make sure updates are seen by hardware */
223 static void __iommu_free(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
226 unsigned long entry
, free_entry
;
228 entry
= dma_addr
>> IOMMU_PAGE_SHIFT
;
229 free_entry
= entry
- tbl
->it_offset
;
231 if (((free_entry
+ npages
) > tbl
->it_size
) ||
232 (entry
< tbl
->it_offset
)) {
233 if (printk_ratelimit()) {
234 printk(KERN_INFO
"iommu_free: invalid entry\n");
235 printk(KERN_INFO
"\tentry = 0x%lx\n", entry
);
236 printk(KERN_INFO
"\tdma_addr = 0x%lx\n", (u64
)dma_addr
);
237 printk(KERN_INFO
"\tTable = 0x%lx\n", (u64
)tbl
);
238 printk(KERN_INFO
"\tbus# = 0x%lx\n", (u64
)tbl
->it_busno
);
239 printk(KERN_INFO
"\tsize = 0x%lx\n", (u64
)tbl
->it_size
);
240 printk(KERN_INFO
"\tstartOff = 0x%lx\n", (u64
)tbl
->it_offset
);
241 printk(KERN_INFO
"\tindex = 0x%lx\n", (u64
)tbl
->it_index
);
247 ppc_md
.tce_free(tbl
, entry
, npages
);
248 iommu_area_free(tbl
->it_map
, free_entry
, npages
);
251 static void iommu_free(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
256 spin_lock_irqsave(&(tbl
->it_lock
), flags
);
258 __iommu_free(tbl
, dma_addr
, npages
);
260 /* Make sure TLB cache is flushed if the HW needs it. We do
261 * not do an mb() here on purpose, it is not needed on any of
262 * the current platforms.
264 if (ppc_md
.tce_flush
)
265 ppc_md
.tce_flush(tbl
);
267 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
270 int iommu_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
271 int nelems
, unsigned long mask
,
272 enum dma_data_direction direction
)
274 struct iommu_table
*tbl
= dev
->archdata
.dma_data
;
275 dma_addr_t dma_next
= 0, dma_addr
;
277 struct scatterlist
*s
, *outs
, *segstart
;
278 int outcount
, incount
, i
;
280 unsigned long handle
;
281 unsigned int max_seg_size
;
283 BUG_ON(direction
== DMA_NONE
);
285 if ((nelems
== 0) || !tbl
)
288 outs
= s
= segstart
= &sglist
[0];
293 /* Init first segment length for backout at failure */
294 outs
->dma_length
= 0;
296 DBG("sg mapping %d elements:\n", nelems
);
298 spin_lock_irqsave(&(tbl
->it_lock
), flags
);
300 max_seg_size
= dma_get_max_seg_size(dev
);
301 for_each_sg(sglist
, s
, nelems
, i
) {
302 unsigned long vaddr
, npages
, entry
, slen
;
310 /* Allocate iommu entries for that segment */
311 vaddr
= (unsigned long) sg_virt(s
);
312 npages
= iommu_num_pages(vaddr
, slen
);
314 if (IOMMU_PAGE_SHIFT
< PAGE_SHIFT
&& slen
>= PAGE_SIZE
&&
315 (vaddr
& ~PAGE_MASK
) == 0)
316 align
= PAGE_SHIFT
- IOMMU_PAGE_SHIFT
;
317 entry
= iommu_range_alloc(dev
, tbl
, npages
, &handle
,
318 mask
>> IOMMU_PAGE_SHIFT
, align
);
320 DBG(" - vaddr: %lx, size: %lx\n", vaddr
, slen
);
323 if (unlikely(entry
== DMA_ERROR_CODE
)) {
324 if (printk_ratelimit())
325 printk(KERN_INFO
"iommu_alloc failed, tbl %p vaddr %lx"
326 " npages %lx\n", tbl
, vaddr
, npages
);
330 /* Convert entry to a dma_addr_t */
331 entry
+= tbl
->it_offset
;
332 dma_addr
= entry
<< IOMMU_PAGE_SHIFT
;
333 dma_addr
|= (s
->offset
& ~IOMMU_PAGE_MASK
);
335 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
336 npages
, entry
, dma_addr
);
338 /* Insert into HW table */
339 ppc_md
.tce_build(tbl
, entry
, npages
, vaddr
& IOMMU_PAGE_MASK
, direction
);
341 /* If we are in an open segment, try merging */
343 DBG(" - trying merge...\n");
344 /* We cannot merge if:
345 * - allocated dma_addr isn't contiguous to previous allocation
347 if (novmerge
|| (dma_addr
!= dma_next
) ||
348 (outs
->dma_length
+ s
->length
> max_seg_size
)) {
349 /* Can't merge: create a new segment */
352 outs
= sg_next(outs
);
353 DBG(" can't merge, new segment.\n");
355 outs
->dma_length
+= s
->length
;
356 DBG(" merged, new len: %ux\n", outs
->dma_length
);
361 /* This is a new segment, fill entries */
362 DBG(" - filling new segment.\n");
363 outs
->dma_address
= dma_addr
;
364 outs
->dma_length
= slen
;
367 /* Calculate next page pointer for contiguous check */
368 dma_next
= dma_addr
+ slen
;
370 DBG(" - dma next is: %lx\n", dma_next
);
373 /* Flush/invalidate TLB caches if necessary */
374 if (ppc_md
.tce_flush
)
375 ppc_md
.tce_flush(tbl
);
377 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
379 DBG("mapped %d elements:\n", outcount
);
381 /* For the sake of iommu_unmap_sg, we clear out the length in the
382 * next entry of the sglist if we didn't fill the list completely
384 if (outcount
< incount
) {
385 outs
= sg_next(outs
);
386 outs
->dma_address
= DMA_ERROR_CODE
;
387 outs
->dma_length
= 0;
390 /* Make sure updates are seen by hardware */
396 for_each_sg(sglist
, s
, nelems
, i
) {
397 if (s
->dma_length
!= 0) {
398 unsigned long vaddr
, npages
;
400 vaddr
= s
->dma_address
& IOMMU_PAGE_MASK
;
401 npages
= iommu_num_pages(s
->dma_address
, s
->dma_length
);
402 __iommu_free(tbl
, vaddr
, npages
);
403 s
->dma_address
= DMA_ERROR_CODE
;
409 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
414 void iommu_unmap_sg(struct iommu_table
*tbl
, struct scatterlist
*sglist
,
415 int nelems
, enum dma_data_direction direction
)
417 struct scatterlist
*sg
;
420 BUG_ON(direction
== DMA_NONE
);
425 spin_lock_irqsave(&(tbl
->it_lock
), flags
);
430 dma_addr_t dma_handle
= sg
->dma_address
;
432 if (sg
->dma_length
== 0)
434 npages
= iommu_num_pages(dma_handle
, sg
->dma_length
);
435 __iommu_free(tbl
, dma_handle
, npages
);
439 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
440 * do not do an mb() here, the affected platforms do not need it
443 if (ppc_md
.tce_flush
)
444 ppc_md
.tce_flush(tbl
);
446 spin_unlock_irqrestore(&(tbl
->it_lock
), flags
);
450 * Build a iommu_table structure. This contains a bit map which
451 * is used to manage allocation of the tce space.
453 struct iommu_table
*iommu_init_table(struct iommu_table
*tbl
, int nid
)
456 static int welcomed
= 0;
459 /* Set aside 1/4 of the table for large allocations. */
460 tbl
->it_halfpoint
= tbl
->it_size
* 3 / 4;
462 /* number of bytes needed for the bitmap */
463 sz
= (tbl
->it_size
+ 7) >> 3;
465 page
= alloc_pages_node(nid
, GFP_ATOMIC
, get_order(sz
));
467 panic("iommu_init_table: Can't allocate %ld bytes\n", sz
);
468 tbl
->it_map
= page_address(page
);
469 memset(tbl
->it_map
, 0, sz
);
472 tbl
->it_largehint
= tbl
->it_halfpoint
;
473 spin_lock_init(&tbl
->it_lock
);
475 #ifdef CONFIG_CRASH_DUMP
476 if (ppc_md
.tce_get
) {
478 unsigned long tceval
;
479 unsigned long tcecount
= 0;
482 * Reserve the existing mappings left by the first kernel.
484 for (index
= 0; index
< tbl
->it_size
; index
++) {
485 tceval
= ppc_md
.tce_get(tbl
, index
+ tbl
->it_offset
);
487 * Freed TCE entry contains 0x7fffffffffffffff on JS20
489 if (tceval
&& (tceval
!= 0x7fffffffffffffffUL
)) {
490 __set_bit(index
, tbl
->it_map
);
494 if ((tbl
->it_size
- tcecount
) < KDUMP_MIN_TCE_ENTRIES
) {
495 printk(KERN_WARNING
"TCE table is full; ");
496 printk(KERN_WARNING
"freeing %d entries for the kdump boot\n",
497 KDUMP_MIN_TCE_ENTRIES
);
498 for (index
= tbl
->it_size
- KDUMP_MIN_TCE_ENTRIES
;
499 index
< tbl
->it_size
; index
++)
500 __clear_bit(index
, tbl
->it_map
);
504 /* Clear the hardware table in case firmware left allocations in it */
505 ppc_md
.tce_free(tbl
, tbl
->it_offset
, tbl
->it_size
);
509 printk(KERN_INFO
"IOMMU table initialized, virtual merging %s\n",
510 novmerge
? "disabled" : "enabled");
517 void iommu_free_table(struct iommu_table
*tbl
, const char *node_name
)
519 unsigned long bitmap_sz
, i
;
522 if (!tbl
|| !tbl
->it_map
) {
523 printk(KERN_ERR
"%s: expected TCE map for %s\n", __FUNCTION__
,
528 /* verify that table contains no entries */
529 /* it_size is in entries, and we're examining 64 at a time */
530 for (i
= 0; i
< (tbl
->it_size
/64); i
++) {
531 if (tbl
->it_map
[i
] != 0) {
532 printk(KERN_WARNING
"%s: Unexpected TCEs for %s\n",
533 __FUNCTION__
, node_name
);
538 /* calculate bitmap size in bytes */
539 bitmap_sz
= (tbl
->it_size
+ 7) / 8;
542 order
= get_order(bitmap_sz
);
543 free_pages((unsigned long) tbl
->it_map
, order
);
549 /* Creates TCEs for a user provided buffer. The user buffer must be
550 * contiguous real kernel storage (not vmalloc). The address of the buffer
551 * passed here is the kernel (virtual) address of the buffer. The buffer
552 * need not be page aligned, the dma_addr_t returned will point to the same
553 * byte within the page as vaddr.
555 dma_addr_t
iommu_map_single(struct device
*dev
, struct iommu_table
*tbl
,
556 void *vaddr
, size_t size
, unsigned long mask
,
557 enum dma_data_direction direction
)
559 dma_addr_t dma_handle
= DMA_ERROR_CODE
;
561 unsigned int npages
, align
;
563 BUG_ON(direction
== DMA_NONE
);
565 uaddr
= (unsigned long)vaddr
;
566 npages
= iommu_num_pages(uaddr
, size
);
570 if (IOMMU_PAGE_SHIFT
< PAGE_SHIFT
&& size
>= PAGE_SIZE
&&
571 ((unsigned long)vaddr
& ~PAGE_MASK
) == 0)
572 align
= PAGE_SHIFT
- IOMMU_PAGE_SHIFT
;
574 dma_handle
= iommu_alloc(dev
, tbl
, vaddr
, npages
, direction
,
575 mask
>> IOMMU_PAGE_SHIFT
, align
);
576 if (dma_handle
== DMA_ERROR_CODE
) {
577 if (printk_ratelimit()) {
578 printk(KERN_INFO
"iommu_alloc failed, "
579 "tbl %p vaddr %p npages %d\n",
583 dma_handle
|= (uaddr
& ~IOMMU_PAGE_MASK
);
589 void iommu_unmap_single(struct iommu_table
*tbl
, dma_addr_t dma_handle
,
590 size_t size
, enum dma_data_direction direction
)
594 BUG_ON(direction
== DMA_NONE
);
597 npages
= iommu_num_pages(dma_handle
, size
);
598 iommu_free(tbl
, dma_handle
, npages
);
602 /* Allocates a contiguous real buffer and creates mappings over it.
603 * Returns the virtual address of the buffer and sets dma_handle
604 * to the dma address (mapping) of the first page.
606 void *iommu_alloc_coherent(struct device
*dev
, struct iommu_table
*tbl
,
607 size_t size
, dma_addr_t
*dma_handle
,
608 unsigned long mask
, gfp_t flag
, int node
)
613 unsigned int nio_pages
, io_order
;
616 size
= PAGE_ALIGN(size
);
617 order
= get_order(size
);
620 * Client asked for way too much space. This is checked later
621 * anyway. It is easier to debug here for the drivers than in
624 if (order
>= IOMAP_MAX_ORDER
) {
625 printk("iommu_alloc_consistent size too large: 0x%lx\n", size
);
632 /* Alloc enough pages (and possibly more) */
633 page
= alloc_pages_node(node
, flag
, order
);
636 ret
= page_address(page
);
637 memset(ret
, 0, size
);
639 /* Set up tces to cover the allocated range */
640 nio_pages
= size
>> IOMMU_PAGE_SHIFT
;
641 io_order
= get_iommu_order(size
);
642 mapping
= iommu_alloc(dev
, tbl
, ret
, nio_pages
, DMA_BIDIRECTIONAL
,
643 mask
>> IOMMU_PAGE_SHIFT
, io_order
);
644 if (mapping
== DMA_ERROR_CODE
) {
645 free_pages((unsigned long)ret
, order
);
648 *dma_handle
= mapping
;
652 void iommu_free_coherent(struct iommu_table
*tbl
, size_t size
,
653 void *vaddr
, dma_addr_t dma_handle
)
656 unsigned int nio_pages
;
658 size
= PAGE_ALIGN(size
);
659 nio_pages
= size
>> IOMMU_PAGE_SHIFT
;
660 iommu_free(tbl
, dma_handle
, nio_pages
);
661 size
= PAGE_ALIGN(size
);
662 free_pages((unsigned long)vaddr
, get_order(size
));