[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / ppc / kernel / dma-mapping.c
blobe0c631cf96b039429a24c609284c9531bef076c8
1 /*
2 * PowerPC version derived from arch/arm/mm/consistent.c
3 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
5 * Copyright (C) 2000 Russell King
7 * Consistent memory allocators. Used for DMA devices that want to
8 * share uncached memory with the processor core. The function return
9 * is the virtual address and 'dma_handle' is the physical address.
10 * Mostly stolen from the ARM port, with some changes for PowerPC.
11 * -- Dan
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17 * implementation. This is pulled straight from ARM and barely
18 * modified. -Matt
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/signal.h>
28 #include <linux/sched.h>
29 #include <linux/kernel.h>
30 #include <linux/errno.h>
31 #include <linux/string.h>
32 #include <linux/types.h>
33 #include <linux/ptrace.h>
34 #include <linux/mman.h>
35 #include <linux/mm.h>
36 #include <linux/swap.h>
37 #include <linux/stddef.h>
38 #include <linux/vmalloc.h>
39 #include <linux/init.h>
40 #include <linux/delay.h>
41 #include <linux/bootmem.h>
42 #include <linux/highmem.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/hardirq.h>
46 #include <asm/pgalloc.h>
47 #include <asm/prom.h>
48 #include <asm/io.h>
49 #include <asm/mmu_context.h>
50 #include <asm/pgtable.h>
51 #include <asm/mmu.h>
52 #include <asm/uaccess.h>
53 #include <asm/smp.h>
54 #include <asm/machdep.h>
56 int map_page(unsigned long va, phys_addr_t pa, int flags);
58 #include <asm/tlbflush.h>
61 * This address range defaults to a value that is safe for all
62 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
63 * can be further configured for specific applications under
64 * the "Advanced Setup" menu. -Matt
66 #define CONSISTENT_BASE (CONFIG_CONSISTENT_START)
67 #define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE)
68 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
71 * This is the page table (2MB) covering uncached, DMA consistent allocations
73 static pte_t *consistent_pte;
74 static DEFINE_SPINLOCK(consistent_lock);
77 * VM region handling support.
79 * This should become something generic, handling VM region allocations for
80 * vmalloc and similar (ioremap, module space, etc).
82 * I envisage vmalloc()'s supporting vm_struct becoming:
84 * struct vm_struct {
85 * struct vm_region region;
86 * unsigned long flags;
87 * struct page **pages;
88 * unsigned int nr_pages;
89 * unsigned long phys_addr;
90 * };
92 * get_vm_area() would then call vm_region_alloc with an appropriate
93 * struct vm_region head (eg):
95 * struct vm_region vmalloc_head = {
96 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
97 * .vm_start = VMALLOC_START,
98 * .vm_end = VMALLOC_END,
99 * };
101 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
102 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
103 * would have to initialise this each time prior to calling vm_region_alloc().
105 struct vm_region {
106 struct list_head vm_list;
107 unsigned long vm_start;
108 unsigned long vm_end;
111 static struct vm_region consistent_head = {
112 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
113 .vm_start = CONSISTENT_BASE,
114 .vm_end = CONSISTENT_END,
117 static struct vm_region *
118 vm_region_alloc(struct vm_region *head, size_t size, int gfp)
120 unsigned long addr = head->vm_start, end = head->vm_end - size;
121 unsigned long flags;
122 struct vm_region *c, *new;
124 new = kmalloc(sizeof(struct vm_region), gfp);
125 if (!new)
126 goto out;
128 spin_lock_irqsave(&consistent_lock, flags);
130 list_for_each_entry(c, &head->vm_list, vm_list) {
131 if ((addr + size) < addr)
132 goto nospc;
133 if ((addr + size) <= c->vm_start)
134 goto found;
135 addr = c->vm_end;
136 if (addr > end)
137 goto nospc;
140 found:
142 * Insert this entry _before_ the one we found.
144 list_add_tail(&new->vm_list, &c->vm_list);
145 new->vm_start = addr;
146 new->vm_end = addr + size;
148 spin_unlock_irqrestore(&consistent_lock, flags);
149 return new;
151 nospc:
152 spin_unlock_irqrestore(&consistent_lock, flags);
153 kfree(new);
154 out:
155 return NULL;
158 static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
160 struct vm_region *c;
162 list_for_each_entry(c, &head->vm_list, vm_list) {
163 if (c->vm_start == addr)
164 goto out;
166 c = NULL;
167 out:
168 return c;
172 * Allocate DMA-coherent memory space and return both the kernel remapped
173 * virtual and bus address for that space.
175 void *
176 __dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp)
178 struct page *page;
179 struct vm_region *c;
180 unsigned long order;
181 u64 mask = 0x00ffffff, limit; /* ISA default */
183 if (!consistent_pte) {
184 printk(KERN_ERR "%s: not initialised\n", __func__);
185 dump_stack();
186 return NULL;
189 size = PAGE_ALIGN(size);
190 limit = (mask + 1) & ~mask;
191 if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) {
192 printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
193 size, mask);
194 return NULL;
197 order = get_order(size);
199 if (mask != 0xffffffff)
200 gfp |= GFP_DMA;
202 page = alloc_pages(gfp, order);
203 if (!page)
204 goto no_page;
207 * Invalidate any data that might be lurking in the
208 * kernel direct-mapped region for device DMA.
211 unsigned long kaddr = (unsigned long)page_address(page);
212 memset(page_address(page), 0, size);
213 flush_dcache_range(kaddr, kaddr + size);
217 * Allocate a virtual address in the consistent mapping region.
219 c = vm_region_alloc(&consistent_head, size,
220 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
221 if (c) {
222 unsigned long vaddr = c->vm_start;
223 pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr);
224 struct page *end = page + (1 << order);
227 * Set the "dma handle"
229 *handle = page_to_bus(page);
231 do {
232 BUG_ON(!pte_none(*pte));
234 set_page_count(page, 1);
235 SetPageReserved(page);
236 set_pte_at(&init_mm, vaddr,
237 pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL)));
238 page++;
239 pte++;
240 vaddr += PAGE_SIZE;
241 } while (size -= PAGE_SIZE);
244 * Free the otherwise unused pages.
246 while (page < end) {
247 set_page_count(page, 1);
248 __free_page(page);
249 page++;
252 return (void *)c->vm_start;
255 if (page)
256 __free_pages(page, order);
257 no_page:
258 return NULL;
260 EXPORT_SYMBOL(__dma_alloc_coherent);
263 * free a page as defined by the above mapping.
265 void __dma_free_coherent(size_t size, void *vaddr)
267 struct vm_region *c;
268 unsigned long flags, addr;
269 pte_t *ptep;
271 size = PAGE_ALIGN(size);
273 spin_lock_irqsave(&consistent_lock, flags);
275 c = vm_region_find(&consistent_head, (unsigned long)vaddr);
276 if (!c)
277 goto no_area;
279 if ((c->vm_end - c->vm_start) != size) {
280 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
281 __func__, c->vm_end - c->vm_start, size);
282 dump_stack();
283 size = c->vm_end - c->vm_start;
286 ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
287 addr = c->vm_start;
288 do {
289 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
290 unsigned long pfn;
292 ptep++;
293 addr += PAGE_SIZE;
295 if (!pte_none(pte) && pte_present(pte)) {
296 pfn = pte_pfn(pte);
298 if (pfn_valid(pfn)) {
299 struct page *page = pfn_to_page(pfn);
300 ClearPageReserved(page);
302 __free_page(page);
303 continue;
307 printk(KERN_CRIT "%s: bad page in kernel page table\n",
308 __func__);
309 } while (size -= PAGE_SIZE);
311 flush_tlb_kernel_range(c->vm_start, c->vm_end);
313 list_del(&c->vm_list);
315 spin_unlock_irqrestore(&consistent_lock, flags);
317 kfree(c);
318 return;
320 no_area:
321 spin_unlock_irqrestore(&consistent_lock, flags);
322 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
323 __func__, vaddr);
324 dump_stack();
326 EXPORT_SYMBOL(__dma_free_coherent);
329 * Initialise the consistent memory allocation.
331 static int __init dma_alloc_init(void)
333 pgd_t *pgd;
334 pmd_t *pmd;
335 pte_t *pte;
336 int ret = 0;
338 spin_lock(&init_mm.page_table_lock);
340 do {
341 pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
342 pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
343 if (!pmd) {
344 printk(KERN_ERR "%s: no pmd tables\n", __func__);
345 ret = -ENOMEM;
346 break;
348 WARN_ON(!pmd_none(*pmd));
350 pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE);
351 if (!pte) {
352 printk(KERN_ERR "%s: no pte tables\n", __func__);
353 ret = -ENOMEM;
354 break;
357 consistent_pte = pte;
358 } while (0);
360 spin_unlock(&init_mm.page_table_lock);
362 return ret;
365 core_initcall(dma_alloc_init);
368 * make an area consistent.
370 void __dma_sync(void *vaddr, size_t size, int direction)
372 unsigned long start = (unsigned long)vaddr;
373 unsigned long end = start + size;
375 switch (direction) {
376 case DMA_NONE:
377 BUG();
378 case DMA_FROM_DEVICE: /* invalidate only */
379 invalidate_dcache_range(start, end);
380 break;
381 case DMA_TO_DEVICE: /* writeback only */
382 clean_dcache_range(start, end);
383 break;
384 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
385 flush_dcache_range(start, end);
386 break;
389 EXPORT_SYMBOL(__dma_sync);
391 #ifdef CONFIG_HIGHMEM
393 * __dma_sync_page() implementation for systems using highmem.
394 * In this case, each page of a buffer must be kmapped/kunmapped
395 * in order to have a virtual address for __dma_sync(). This must
396 * not sleep so kmap_atmomic()/kunmap_atomic() are used.
398 * Note: yes, it is possible and correct to have a buffer extend
399 * beyond the first page.
401 static inline void __dma_sync_page_highmem(struct page *page,
402 unsigned long offset, size_t size, int direction)
404 size_t seg_size = min((size_t)PAGE_SIZE, size) - offset;
405 size_t cur_size = seg_size;
406 unsigned long flags, start, seg_offset = offset;
407 int nr_segs = PAGE_ALIGN(size + (PAGE_SIZE - offset))/PAGE_SIZE;
408 int seg_nr = 0;
410 local_irq_save(flags);
412 do {
413 start = (unsigned long)kmap_atomic(page + seg_nr,
414 KM_PPC_SYNC_PAGE) + seg_offset;
416 /* Sync this buffer segment */
417 __dma_sync((void *)start, seg_size, direction);
418 kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
419 seg_nr++;
421 /* Calculate next buffer segment size */
422 seg_size = min((size_t)PAGE_SIZE, size - cur_size);
424 /* Add the segment size to our running total */
425 cur_size += seg_size;
426 seg_offset = 0;
427 } while (seg_nr < nr_segs);
429 local_irq_restore(flags);
431 #endif /* CONFIG_HIGHMEM */
434 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
435 * takes a struct page instead of a virtual address
437 void __dma_sync_page(struct page *page, unsigned long offset,
438 size_t size, int direction)
440 #ifdef CONFIG_HIGHMEM
441 __dma_sync_page_highmem(page, offset, size, direction);
442 #else
443 unsigned long start = (unsigned long)page_address(page) + offset;
444 __dma_sync((void *)start, size, direction);
445 #endif
447 EXPORT_SYMBOL(__dma_sync_page);