2 * PowerPC version derived from arch/arm/mm/consistent.c
3 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
5 * Copyright (C) 2000 Russell King
7 * Consistent memory allocators. Used for DMA devices that want to
8 * share uncached memory with the processor core. The function return
9 * is the virtual address and 'dma_handle' is the physical address.
10 * Mostly stolen from the ARM port, with some changes for PowerPC.
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17 * implementation. This is pulled straight from ARM and barely
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/signal.h>
28 #include <linux/sched.h>
29 #include <linux/kernel.h>
30 #include <linux/errno.h>
31 #include <linux/string.h>
32 #include <linux/types.h>
33 #include <linux/ptrace.h>
34 #include <linux/mman.h>
36 #include <linux/swap.h>
37 #include <linux/stddef.h>
38 #include <linux/vmalloc.h>
39 #include <linux/init.h>
40 #include <linux/delay.h>
41 #include <linux/bootmem.h>
42 #include <linux/highmem.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/hardirq.h>
46 #include <asm/pgalloc.h>
49 #include <asm/mmu_context.h>
50 #include <asm/pgtable.h>
52 #include <asm/uaccess.h>
54 #include <asm/machdep.h>
56 int map_page(unsigned long va
, phys_addr_t pa
, int flags
);
58 #include <asm/tlbflush.h>
61 * This address range defaults to a value that is safe for all
62 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
63 * can be further configured for specific applications under
64 * the "Advanced Setup" menu. -Matt
66 #define CONSISTENT_BASE (CONFIG_CONSISTENT_START)
67 #define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE)
68 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
71 * This is the page table (2MB) covering uncached, DMA consistent allocations
73 static pte_t
*consistent_pte
;
74 static DEFINE_SPINLOCK(consistent_lock
);
77 * VM region handling support.
79 * This should become something generic, handling VM region allocations for
80 * vmalloc and similar (ioremap, module space, etc).
82 * I envisage vmalloc()'s supporting vm_struct becoming:
85 * struct vm_region region;
86 * unsigned long flags;
87 * struct page **pages;
88 * unsigned int nr_pages;
89 * unsigned long phys_addr;
92 * get_vm_area() would then call vm_region_alloc with an appropriate
93 * struct vm_region head (eg):
95 * struct vm_region vmalloc_head = {
96 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
97 * .vm_start = VMALLOC_START,
98 * .vm_end = VMALLOC_END,
101 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
102 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
103 * would have to initialise this each time prior to calling vm_region_alloc().
106 struct list_head vm_list
;
107 unsigned long vm_start
;
108 unsigned long vm_end
;
111 static struct vm_region consistent_head
= {
112 .vm_list
= LIST_HEAD_INIT(consistent_head
.vm_list
),
113 .vm_start
= CONSISTENT_BASE
,
114 .vm_end
= CONSISTENT_END
,
117 static struct vm_region
*
118 vm_region_alloc(struct vm_region
*head
, size_t size
, int gfp
)
120 unsigned long addr
= head
->vm_start
, end
= head
->vm_end
- size
;
122 struct vm_region
*c
, *new;
124 new = kmalloc(sizeof(struct vm_region
), gfp
);
128 spin_lock_irqsave(&consistent_lock
, flags
);
130 list_for_each_entry(c
, &head
->vm_list
, vm_list
) {
131 if ((addr
+ size
) < addr
)
133 if ((addr
+ size
) <= c
->vm_start
)
142 * Insert this entry _before_ the one we found.
144 list_add_tail(&new->vm_list
, &c
->vm_list
);
145 new->vm_start
= addr
;
146 new->vm_end
= addr
+ size
;
148 spin_unlock_irqrestore(&consistent_lock
, flags
);
152 spin_unlock_irqrestore(&consistent_lock
, flags
);
158 static struct vm_region
*vm_region_find(struct vm_region
*head
, unsigned long addr
)
162 list_for_each_entry(c
, &head
->vm_list
, vm_list
) {
163 if (c
->vm_start
== addr
)
172 * Allocate DMA-coherent memory space and return both the kernel remapped
173 * virtual and bus address for that space.
176 __dma_alloc_coherent(size_t size
, dma_addr_t
*handle
, int gfp
)
181 u64 mask
= 0x00ffffff, limit
; /* ISA default */
183 if (!consistent_pte
) {
184 printk(KERN_ERR
"%s: not initialised\n", __func__
);
189 size
= PAGE_ALIGN(size
);
190 limit
= (mask
+ 1) & ~mask
;
191 if ((limit
&& size
>= limit
) || size
>= (CONSISTENT_END
- CONSISTENT_BASE
)) {
192 printk(KERN_WARNING
"coherent allocation too big (requested %#x mask %#Lx)\n",
197 order
= get_order(size
);
199 if (mask
!= 0xffffffff)
202 page
= alloc_pages(gfp
, order
);
207 * Invalidate any data that might be lurking in the
208 * kernel direct-mapped region for device DMA.
211 unsigned long kaddr
= (unsigned long)page_address(page
);
212 memset(page_address(page
), 0, size
);
213 flush_dcache_range(kaddr
, kaddr
+ size
);
217 * Allocate a virtual address in the consistent mapping region.
219 c
= vm_region_alloc(&consistent_head
, size
,
220 gfp
& ~(__GFP_DMA
| __GFP_HIGHMEM
));
222 unsigned long vaddr
= c
->vm_start
;
223 pte_t
*pte
= consistent_pte
+ CONSISTENT_OFFSET(vaddr
);
224 struct page
*end
= page
+ (1 << order
);
227 * Set the "dma handle"
229 *handle
= page_to_bus(page
);
232 BUG_ON(!pte_none(*pte
));
234 set_page_count(page
, 1);
235 SetPageReserved(page
);
236 set_pte_at(&init_mm
, vaddr
,
237 pte
, mk_pte(page
, pgprot_noncached(PAGE_KERNEL
)));
241 } while (size
-= PAGE_SIZE
);
244 * Free the otherwise unused pages.
247 set_page_count(page
, 1);
252 return (void *)c
->vm_start
;
256 __free_pages(page
, order
);
260 EXPORT_SYMBOL(__dma_alloc_coherent
);
263 * free a page as defined by the above mapping.
265 void __dma_free_coherent(size_t size
, void *vaddr
)
268 unsigned long flags
, addr
;
271 size
= PAGE_ALIGN(size
);
273 spin_lock_irqsave(&consistent_lock
, flags
);
275 c
= vm_region_find(&consistent_head
, (unsigned long)vaddr
);
279 if ((c
->vm_end
- c
->vm_start
) != size
) {
280 printk(KERN_ERR
"%s: freeing wrong coherent size (%ld != %d)\n",
281 __func__
, c
->vm_end
- c
->vm_start
, size
);
283 size
= c
->vm_end
- c
->vm_start
;
286 ptep
= consistent_pte
+ CONSISTENT_OFFSET(c
->vm_start
);
289 pte_t pte
= ptep_get_and_clear(&init_mm
, addr
, ptep
);
295 if (!pte_none(pte
) && pte_present(pte
)) {
298 if (pfn_valid(pfn
)) {
299 struct page
*page
= pfn_to_page(pfn
);
300 ClearPageReserved(page
);
307 printk(KERN_CRIT
"%s: bad page in kernel page table\n",
309 } while (size
-= PAGE_SIZE
);
311 flush_tlb_kernel_range(c
->vm_start
, c
->vm_end
);
313 list_del(&c
->vm_list
);
315 spin_unlock_irqrestore(&consistent_lock
, flags
);
321 spin_unlock_irqrestore(&consistent_lock
, flags
);
322 printk(KERN_ERR
"%s: trying to free invalid coherent area: %p\n",
326 EXPORT_SYMBOL(__dma_free_coherent
);
329 * Initialise the consistent memory allocation.
331 static int __init
dma_alloc_init(void)
338 spin_lock(&init_mm
.page_table_lock
);
341 pgd
= pgd_offset(&init_mm
, CONSISTENT_BASE
);
342 pmd
= pmd_alloc(&init_mm
, pgd
, CONSISTENT_BASE
);
344 printk(KERN_ERR
"%s: no pmd tables\n", __func__
);
348 WARN_ON(!pmd_none(*pmd
));
350 pte
= pte_alloc_kernel(&init_mm
, pmd
, CONSISTENT_BASE
);
352 printk(KERN_ERR
"%s: no pte tables\n", __func__
);
357 consistent_pte
= pte
;
360 spin_unlock(&init_mm
.page_table_lock
);
365 core_initcall(dma_alloc_init
);
368 * make an area consistent.
370 void __dma_sync(void *vaddr
, size_t size
, int direction
)
372 unsigned long start
= (unsigned long)vaddr
;
373 unsigned long end
= start
+ size
;
378 case DMA_FROM_DEVICE
: /* invalidate only */
379 invalidate_dcache_range(start
, end
);
381 case DMA_TO_DEVICE
: /* writeback only */
382 clean_dcache_range(start
, end
);
384 case DMA_BIDIRECTIONAL
: /* writeback and invalidate */
385 flush_dcache_range(start
, end
);
389 EXPORT_SYMBOL(__dma_sync
);
391 #ifdef CONFIG_HIGHMEM
393 * __dma_sync_page() implementation for systems using highmem.
394 * In this case, each page of a buffer must be kmapped/kunmapped
395 * in order to have a virtual address for __dma_sync(). This must
396 * not sleep so kmap_atmomic()/kunmap_atomic() are used.
398 * Note: yes, it is possible and correct to have a buffer extend
399 * beyond the first page.
401 static inline void __dma_sync_page_highmem(struct page
*page
,
402 unsigned long offset
, size_t size
, int direction
)
404 size_t seg_size
= min((size_t)PAGE_SIZE
, size
) - offset
;
405 size_t cur_size
= seg_size
;
406 unsigned long flags
, start
, seg_offset
= offset
;
407 int nr_segs
= PAGE_ALIGN(size
+ (PAGE_SIZE
- offset
))/PAGE_SIZE
;
410 local_irq_save(flags
);
413 start
= (unsigned long)kmap_atomic(page
+ seg_nr
,
414 KM_PPC_SYNC_PAGE
) + seg_offset
;
416 /* Sync this buffer segment */
417 __dma_sync((void *)start
, seg_size
, direction
);
418 kunmap_atomic((void *)start
, KM_PPC_SYNC_PAGE
);
421 /* Calculate next buffer segment size */
422 seg_size
= min((size_t)PAGE_SIZE
, size
- cur_size
);
424 /* Add the segment size to our running total */
425 cur_size
+= seg_size
;
427 } while (seg_nr
< nr_segs
);
429 local_irq_restore(flags
);
431 #endif /* CONFIG_HIGHMEM */
434 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
435 * takes a struct page instead of a virtual address
437 void __dma_sync_page(struct page
*page
, unsigned long offset
,
438 size_t size
, int direction
)
440 #ifdef CONFIG_HIGHMEM
441 __dma_sync_page_highmem(page
, offset
, size
, direction
);
443 unsigned long start
= (unsigned long)page_address(page
) + offset
;
444 __dma_sync((void *)start
, size
, direction
);
447 EXPORT_SYMBOL(__dma_sync_page
);