[PATCH] powerpc: trivial: modify comments to refer to new location of files
[linux-2.6/verdex.git] / arch / powerpc / kernel / iommu.c
blobd9a7fdef59b98234b3cbfddd378a84c12f9c7286
1 /*
2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 *
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
8 * Dynamic DMA mapping support, bus-independent parts.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/config.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/spinlock.h>
32 #include <linux/string.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/init.h>
35 #include <linux/bitops.h>
36 #include <asm/io.h>
37 #include <asm/prom.h>
38 #include <asm/iommu.h>
39 #include <asm/pci-bridge.h>
40 #include <asm/machdep.h>
42 #define DBG(...)
44 #ifdef CONFIG_IOMMU_VMERGE
45 static int novmerge = 0;
46 #else
47 static int novmerge = 1;
48 #endif
50 static int __init setup_iommu(char *str)
52 if (!strcmp(str, "novmerge"))
53 novmerge = 1;
54 else if (!strcmp(str, "vmerge"))
55 novmerge = 0;
56 return 1;
59 __setup("iommu=", setup_iommu);
61 static unsigned long iommu_range_alloc(struct iommu_table *tbl,
62 unsigned long npages,
63 unsigned long *handle,
64 unsigned int align_order)
66 unsigned long n, end, i, start;
67 unsigned long limit;
68 int largealloc = npages > 15;
69 int pass = 0;
70 unsigned long align_mask;
72 align_mask = 0xffffffffffffffffl >> (64 - align_order);
74 /* This allocator was derived from x86_64's bit string search */
76 /* Sanity check */
77 if (unlikely(npages) == 0) {
78 if (printk_ratelimit())
79 WARN_ON(1);
80 return DMA_ERROR_CODE;
83 if (handle && *handle)
84 start = *handle;
85 else
86 start = largealloc ? tbl->it_largehint : tbl->it_hint;
88 /* Use only half of the table for small allocs (15 pages or less) */
89 limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
91 if (largealloc && start < tbl->it_halfpoint)
92 start = tbl->it_halfpoint;
94 /* The case below can happen if we have a small segment appended
95 * to a large, or when the previous alloc was at the very end of
96 * the available space. If so, go back to the initial start.
98 if (start >= limit)
99 start = largealloc ? tbl->it_largehint : tbl->it_hint;
101 again:
103 n = find_next_zero_bit(tbl->it_map, limit, start);
105 /* Align allocation */
106 n = (n + align_mask) & ~align_mask;
108 end = n + npages;
110 if (unlikely(end >= limit)) {
111 if (likely(pass < 2)) {
112 /* First failure, just rescan the half of the table.
113 * Second failure, rescan the other half of the table.
115 start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
116 limit = pass ? tbl->it_size : limit;
117 pass++;
118 goto again;
119 } else {
120 /* Third failure, give up */
121 return DMA_ERROR_CODE;
125 for (i = n; i < end; i++)
126 if (test_bit(i, tbl->it_map)) {
127 start = i+1;
128 goto again;
131 for (i = n; i < end; i++)
132 __set_bit(i, tbl->it_map);
134 /* Bump the hint to a new block for small allocs. */
135 if (largealloc) {
136 /* Don't bump to new block to avoid fragmentation */
137 tbl->it_largehint = end;
138 } else {
139 /* Overflow will be taken care of at the next allocation */
140 tbl->it_hint = (end + tbl->it_blocksize - 1) &
141 ~(tbl->it_blocksize - 1);
144 /* Update handle for SG allocations */
145 if (handle)
146 *handle = end;
148 return n;
151 static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
152 unsigned int npages, enum dma_data_direction direction,
153 unsigned int align_order)
155 unsigned long entry, flags;
156 dma_addr_t ret = DMA_ERROR_CODE;
158 spin_lock_irqsave(&(tbl->it_lock), flags);
160 entry = iommu_range_alloc(tbl, npages, NULL, align_order);
162 if (unlikely(entry == DMA_ERROR_CODE)) {
163 spin_unlock_irqrestore(&(tbl->it_lock), flags);
164 return DMA_ERROR_CODE;
167 entry += tbl->it_offset; /* Offset into real TCE table */
168 ret = entry << PAGE_SHIFT; /* Set the return dma address */
170 /* Put the TCEs in the HW table */
171 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK,
172 direction);
175 /* Flush/invalidate TLB caches if necessary */
176 if (ppc_md.tce_flush)
177 ppc_md.tce_flush(tbl);
179 spin_unlock_irqrestore(&(tbl->it_lock), flags);
181 /* Make sure updates are seen by hardware */
182 mb();
184 return ret;
187 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
188 unsigned int npages)
190 unsigned long entry, free_entry;
191 unsigned long i;
193 entry = dma_addr >> PAGE_SHIFT;
194 free_entry = entry - tbl->it_offset;
196 if (((free_entry + npages) > tbl->it_size) ||
197 (entry < tbl->it_offset)) {
198 if (printk_ratelimit()) {
199 printk(KERN_INFO "iommu_free: invalid entry\n");
200 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
201 printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr);
202 printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl);
203 printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno);
204 printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size);
205 printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset);
206 printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index);
207 WARN_ON(1);
209 return;
212 ppc_md.tce_free(tbl, entry, npages);
214 for (i = 0; i < npages; i++)
215 __clear_bit(free_entry+i, tbl->it_map);
218 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
219 unsigned int npages)
221 unsigned long flags;
223 spin_lock_irqsave(&(tbl->it_lock), flags);
225 __iommu_free(tbl, dma_addr, npages);
227 /* Make sure TLB cache is flushed if the HW needs it. We do
228 * not do an mb() here on purpose, it is not needed on any of
229 * the current platforms.
231 if (ppc_md.tce_flush)
232 ppc_md.tce_flush(tbl);
234 spin_unlock_irqrestore(&(tbl->it_lock), flags);
237 int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
238 struct scatterlist *sglist, int nelems,
239 enum dma_data_direction direction)
241 dma_addr_t dma_next = 0, dma_addr;
242 unsigned long flags;
243 struct scatterlist *s, *outs, *segstart;
244 int outcount, incount;
245 unsigned long handle;
247 BUG_ON(direction == DMA_NONE);
249 if ((nelems == 0) || !tbl)
250 return 0;
252 outs = s = segstart = &sglist[0];
253 outcount = 1;
254 incount = nelems;
255 handle = 0;
257 /* Init first segment length for backout at failure */
258 outs->dma_length = 0;
260 DBG("mapping %d elements:\n", nelems);
262 spin_lock_irqsave(&(tbl->it_lock), flags);
264 for (s = outs; nelems; nelems--, s++) {
265 unsigned long vaddr, npages, entry, slen;
267 slen = s->length;
268 /* Sanity check */
269 if (slen == 0) {
270 dma_next = 0;
271 continue;
273 /* Allocate iommu entries for that segment */
274 vaddr = (unsigned long)page_address(s->page) + s->offset;
275 npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
276 npages >>= PAGE_SHIFT;
277 entry = iommu_range_alloc(tbl, npages, &handle, 0);
279 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
281 /* Handle failure */
282 if (unlikely(entry == DMA_ERROR_CODE)) {
283 if (printk_ratelimit())
284 printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
285 " npages %lx\n", tbl, vaddr, npages);
286 goto failure;
289 /* Convert entry to a dma_addr_t */
290 entry += tbl->it_offset;
291 dma_addr = entry << PAGE_SHIFT;
292 dma_addr |= s->offset;
294 DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n",
295 npages, entry, dma_addr);
297 /* Insert into HW table */
298 ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction);
300 /* If we are in an open segment, try merging */
301 if (segstart != s) {
302 DBG(" - trying merge...\n");
303 /* We cannot merge if:
304 * - allocated dma_addr isn't contiguous to previous allocation
306 if (novmerge || (dma_addr != dma_next)) {
307 /* Can't merge: create a new segment */
308 segstart = s;
309 outcount++; outs++;
310 DBG(" can't merge, new segment.\n");
311 } else {
312 outs->dma_length += s->length;
313 DBG(" merged, new len: %lx\n", outs->dma_length);
317 if (segstart == s) {
318 /* This is a new segment, fill entries */
319 DBG(" - filling new segment.\n");
320 outs->dma_address = dma_addr;
321 outs->dma_length = slen;
324 /* Calculate next page pointer for contiguous check */
325 dma_next = dma_addr + slen;
327 DBG(" - dma next is: %lx\n", dma_next);
330 /* Flush/invalidate TLB caches if necessary */
331 if (ppc_md.tce_flush)
332 ppc_md.tce_flush(tbl);
334 spin_unlock_irqrestore(&(tbl->it_lock), flags);
336 DBG("mapped %d elements:\n", outcount);
338 /* For the sake of iommu_unmap_sg, we clear out the length in the
339 * next entry of the sglist if we didn't fill the list completely
341 if (outcount < incount) {
342 outs++;
343 outs->dma_address = DMA_ERROR_CODE;
344 outs->dma_length = 0;
347 /* Make sure updates are seen by hardware */
348 mb();
350 return outcount;
352 failure:
353 for (s = &sglist[0]; s <= outs; s++) {
354 if (s->dma_length != 0) {
355 unsigned long vaddr, npages;
357 vaddr = s->dma_address & PAGE_MASK;
358 npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr)
359 >> PAGE_SHIFT;
360 __iommu_free(tbl, vaddr, npages);
361 s->dma_address = DMA_ERROR_CODE;
362 s->dma_length = 0;
365 spin_unlock_irqrestore(&(tbl->it_lock), flags);
366 return 0;
370 void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
371 int nelems, enum dma_data_direction direction)
373 unsigned long flags;
375 BUG_ON(direction == DMA_NONE);
377 if (!tbl)
378 return;
380 spin_lock_irqsave(&(tbl->it_lock), flags);
382 while (nelems--) {
383 unsigned int npages;
384 dma_addr_t dma_handle = sglist->dma_address;
386 if (sglist->dma_length == 0)
387 break;
388 npages = (PAGE_ALIGN(dma_handle + sglist->dma_length)
389 - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT;
390 __iommu_free(tbl, dma_handle, npages);
391 sglist++;
394 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
395 * do not do an mb() here, the affected platforms do not need it
396 * when freeing.
398 if (ppc_md.tce_flush)
399 ppc_md.tce_flush(tbl);
401 spin_unlock_irqrestore(&(tbl->it_lock), flags);
405 * Build a iommu_table structure. This contains a bit map which
406 * is used to manage allocation of the tce space.
408 struct iommu_table *iommu_init_table(struct iommu_table *tbl)
410 unsigned long sz;
411 static int welcomed = 0;
413 /* Set aside 1/4 of the table for large allocations. */
414 tbl->it_halfpoint = tbl->it_size * 3 / 4;
416 /* number of bytes needed for the bitmap */
417 sz = (tbl->it_size + 7) >> 3;
419 tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz));
420 if (!tbl->it_map)
421 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
423 memset(tbl->it_map, 0, sz);
425 tbl->it_hint = 0;
426 tbl->it_largehint = tbl->it_halfpoint;
427 spin_lock_init(&tbl->it_lock);
429 /* Clear the hardware table in case firmware left allocations in it */
430 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
432 if (!welcomed) {
433 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
434 novmerge ? "disabled" : "enabled");
435 welcomed = 1;
438 return tbl;
441 void iommu_free_table(struct device_node *dn)
443 struct pci_dn *pdn = dn->data;
444 struct iommu_table *tbl = pdn->iommu_table;
445 unsigned long bitmap_sz, i;
446 unsigned int order;
448 if (!tbl || !tbl->it_map) {
449 printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
450 dn->full_name);
451 return;
454 /* verify that table contains no entries */
455 /* it_size is in entries, and we're examining 64 at a time */
456 for (i = 0; i < (tbl->it_size/64); i++) {
457 if (tbl->it_map[i] != 0) {
458 printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
459 __FUNCTION__, dn->full_name);
460 break;
464 /* calculate bitmap size in bytes */
465 bitmap_sz = (tbl->it_size + 7) / 8;
467 /* free bitmap */
468 order = get_order(bitmap_sz);
469 free_pages((unsigned long) tbl->it_map, order);
471 /* free table */
472 kfree(tbl);
475 /* Creates TCEs for a user provided buffer. The user buffer must be
476 * contiguous real kernel storage (not vmalloc). The address of the buffer
477 * passed here is the kernel (virtual) address of the buffer. The buffer
478 * need not be page aligned, the dma_addr_t returned will point to the same
479 * byte within the page as vaddr.
481 dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
482 size_t size, enum dma_data_direction direction)
484 dma_addr_t dma_handle = DMA_ERROR_CODE;
485 unsigned long uaddr;
486 unsigned int npages;
488 BUG_ON(direction == DMA_NONE);
490 uaddr = (unsigned long)vaddr;
491 npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
492 npages >>= PAGE_SHIFT;
494 if (tbl) {
495 dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 0);
496 if (dma_handle == DMA_ERROR_CODE) {
497 if (printk_ratelimit()) {
498 printk(KERN_INFO "iommu_alloc failed, "
499 "tbl %p vaddr %p npages %d\n",
500 tbl, vaddr, npages);
502 } else
503 dma_handle |= (uaddr & ~PAGE_MASK);
506 return dma_handle;
509 void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
510 size_t size, enum dma_data_direction direction)
512 BUG_ON(direction == DMA_NONE);
514 if (tbl)
515 iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) -
516 (dma_handle & PAGE_MASK)) >> PAGE_SHIFT);
519 /* Allocates a contiguous real buffer and creates mappings over it.
520 * Returns the virtual address of the buffer and sets dma_handle
521 * to the dma address (mapping) of the first page.
523 void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
524 dma_addr_t *dma_handle, gfp_t flag)
526 void *ret = NULL;
527 dma_addr_t mapping;
528 unsigned int npages, order;
530 size = PAGE_ALIGN(size);
531 npages = size >> PAGE_SHIFT;
532 order = get_order(size);
535 * Client asked for way too much space. This is checked later
536 * anyway. It is easier to debug here for the drivers than in
537 * the tce tables.
539 if (order >= IOMAP_MAX_ORDER) {
540 printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
541 return NULL;
544 if (!tbl)
545 return NULL;
547 /* Alloc enough pages (and possibly more) */
548 ret = (void *)__get_free_pages(flag, order);
549 if (!ret)
550 return NULL;
551 memset(ret, 0, size);
553 /* Set up tces to cover the allocated range */
554 mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, order);
555 if (mapping == DMA_ERROR_CODE) {
556 free_pages((unsigned long)ret, order);
557 ret = NULL;
558 } else
559 *dma_handle = mapping;
560 return ret;
563 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
564 void *vaddr, dma_addr_t dma_handle)
566 unsigned int npages;
568 if (tbl) {
569 size = PAGE_ALIGN(size);
570 npages = size >> PAGE_SHIFT;
571 iommu_free(tbl, dma_handle, npages);
572 free_pages((unsigned long)vaddr, get_order(size));