2 * PowerPC version derived from arch/arm/mm/consistent.c
3 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
5 * Copyright (C) 2000 Russell King
7 * Consistent memory allocators. Used for DMA devices that want to
8 * share uncached memory with the processor core. The function return
9 * is the virtual address and 'dma_handle' is the physical address.
10 * Mostly stolen from the ARM port, with some changes for PowerPC.
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17 * implementation. This is pulled straight from ARM and barely
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/highmem.h>
31 #include <linux/dma-mapping.h>
33 #include <asm/tlbflush.h>
36 * This address range defaults to a value that is safe for all
37 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
38 * can be further configured for specific applications under
39 * the "Advanced Setup" menu. -Matt
41 #define CONSISTENT_BASE (CONFIG_CONSISTENT_START)
42 #define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE)
43 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
46 * This is the page table (2MB) covering uncached, DMA consistent allocations
48 static pte_t
*consistent_pte
;
49 static DEFINE_SPINLOCK(consistent_lock
);
52 * VM region handling support.
54 * This should become something generic, handling VM region allocations for
55 * vmalloc and similar (ioremap, module space, etc).
57 * I envisage vmalloc()'s supporting vm_struct becoming:
60 * struct vm_region region;
61 * unsigned long flags;
62 * struct page **pages;
63 * unsigned int nr_pages;
64 * unsigned long phys_addr;
67 * get_vm_area() would then call vm_region_alloc with an appropriate
68 * struct vm_region head (eg):
70 * struct vm_region vmalloc_head = {
71 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
72 * .vm_start = VMALLOC_START,
73 * .vm_end = VMALLOC_END,
76 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
77 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
78 * would have to initialise this each time prior to calling vm_region_alloc().
81 struct list_head vm_list
;
82 unsigned long vm_start
;
86 static struct vm_region consistent_head
= {
87 .vm_list
= LIST_HEAD_INIT(consistent_head
.vm_list
),
88 .vm_start
= CONSISTENT_BASE
,
89 .vm_end
= CONSISTENT_END
,
92 static struct vm_region
*
93 vm_region_alloc(struct vm_region
*head
, size_t size
, gfp_t gfp
)
95 unsigned long addr
= head
->vm_start
, end
= head
->vm_end
- size
;
97 struct vm_region
*c
, *new;
99 new = kmalloc(sizeof(struct vm_region
), gfp
);
103 spin_lock_irqsave(&consistent_lock
, flags
);
105 list_for_each_entry(c
, &head
->vm_list
, vm_list
) {
106 if ((addr
+ size
) < addr
)
108 if ((addr
+ size
) <= c
->vm_start
)
117 * Insert this entry _before_ the one we found.
119 list_add_tail(&new->vm_list
, &c
->vm_list
);
120 new->vm_start
= addr
;
121 new->vm_end
= addr
+ size
;
123 spin_unlock_irqrestore(&consistent_lock
, flags
);
127 spin_unlock_irqrestore(&consistent_lock
, flags
);
133 static struct vm_region
*vm_region_find(struct vm_region
*head
, unsigned long addr
)
137 list_for_each_entry(c
, &head
->vm_list
, vm_list
) {
138 if (c
->vm_start
== addr
)
147 * Allocate DMA-coherent memory space and return both the kernel remapped
148 * virtual and bus address for that space.
151 __dma_alloc_coherent(size_t size
, dma_addr_t
*handle
, gfp_t gfp
)
156 u64 mask
= 0x00ffffff, limit
; /* ISA default */
158 if (!consistent_pte
) {
159 printk(KERN_ERR
"%s: not initialised\n", __func__
);
164 size
= PAGE_ALIGN(size
);
165 limit
= (mask
+ 1) & ~mask
;
166 if ((limit
&& size
>= limit
) || size
>= (CONSISTENT_END
- CONSISTENT_BASE
)) {
167 printk(KERN_WARNING
"coherent allocation too big (requested %#x mask %#Lx)\n",
172 order
= get_order(size
);
174 if (mask
!= 0xffffffff)
177 page
= alloc_pages(gfp
, order
);
182 * Invalidate any data that might be lurking in the
183 * kernel direct-mapped region for device DMA.
186 unsigned long kaddr
= (unsigned long)page_address(page
);
187 memset(page_address(page
), 0, size
);
188 flush_dcache_range(kaddr
, kaddr
+ size
);
192 * Allocate a virtual address in the consistent mapping region.
194 c
= vm_region_alloc(&consistent_head
, size
,
195 gfp
& ~(__GFP_DMA
| __GFP_HIGHMEM
));
197 unsigned long vaddr
= c
->vm_start
;
198 pte_t
*pte
= consistent_pte
+ CONSISTENT_OFFSET(vaddr
);
199 struct page
*end
= page
+ (1 << order
);
201 split_page(page
, order
);
204 * Set the "dma handle"
206 *handle
= page_to_bus(page
);
209 BUG_ON(!pte_none(*pte
));
211 SetPageReserved(page
);
212 set_pte_at(&init_mm
, vaddr
,
213 pte
, mk_pte(page
, pgprot_noncached(PAGE_KERNEL
)));
217 } while (size
-= PAGE_SIZE
);
220 * Free the otherwise unused pages.
227 return (void *)c
->vm_start
;
231 __free_pages(page
, order
);
235 EXPORT_SYMBOL(__dma_alloc_coherent
);
238 * free a page as defined by the above mapping.
240 void __dma_free_coherent(size_t size
, void *vaddr
)
243 unsigned long flags
, addr
;
246 size
= PAGE_ALIGN(size
);
248 spin_lock_irqsave(&consistent_lock
, flags
);
250 c
= vm_region_find(&consistent_head
, (unsigned long)vaddr
);
254 if ((c
->vm_end
- c
->vm_start
) != size
) {
255 printk(KERN_ERR
"%s: freeing wrong coherent size (%ld != %d)\n",
256 __func__
, c
->vm_end
- c
->vm_start
, size
);
258 size
= c
->vm_end
- c
->vm_start
;
261 ptep
= consistent_pte
+ CONSISTENT_OFFSET(c
->vm_start
);
264 pte_t pte
= ptep_get_and_clear(&init_mm
, addr
, ptep
);
270 if (!pte_none(pte
) && pte_present(pte
)) {
273 if (pfn_valid(pfn
)) {
274 struct page
*page
= pfn_to_page(pfn
);
275 ClearPageReserved(page
);
282 printk(KERN_CRIT
"%s: bad page in kernel page table\n",
284 } while (size
-= PAGE_SIZE
);
286 flush_tlb_kernel_range(c
->vm_start
, c
->vm_end
);
288 list_del(&c
->vm_list
);
290 spin_unlock_irqrestore(&consistent_lock
, flags
);
296 spin_unlock_irqrestore(&consistent_lock
, flags
);
297 printk(KERN_ERR
"%s: trying to free invalid coherent area: %p\n",
301 EXPORT_SYMBOL(__dma_free_coherent
);
304 * Initialise the consistent memory allocation.
306 static int __init
dma_alloc_init(void)
315 pgd
= pgd_offset(&init_mm
, CONSISTENT_BASE
);
316 pud
= pud_alloc(&init_mm
, pgd
, CONSISTENT_BASE
);
317 pmd
= pmd_alloc(&init_mm
, pud
, CONSISTENT_BASE
);
319 printk(KERN_ERR
"%s: no pmd tables\n", __func__
);
323 WARN_ON(!pmd_none(*pmd
));
325 pte
= pte_alloc_kernel(pmd
, CONSISTENT_BASE
);
327 printk(KERN_ERR
"%s: no pte tables\n", __func__
);
332 consistent_pte
= pte
;
338 core_initcall(dma_alloc_init
);
341 * make an area consistent.
343 void __dma_sync(void *vaddr
, size_t size
, int direction
)
345 unsigned long start
= (unsigned long)vaddr
;
346 unsigned long end
= start
+ size
;
351 case DMA_FROM_DEVICE
:
353 * invalidate only when cache-line aligned otherwise there is
354 * the potential for discarding uncommitted data from the cache
356 if ((start
& (L1_CACHE_BYTES
- 1)) || (size
& (L1_CACHE_BYTES
- 1)))
357 flush_dcache_range(start
, end
);
359 invalidate_dcache_range(start
, end
);
361 case DMA_TO_DEVICE
: /* writeback only */
362 clean_dcache_range(start
, end
);
364 case DMA_BIDIRECTIONAL
: /* writeback and invalidate */
365 flush_dcache_range(start
, end
);
369 EXPORT_SYMBOL(__dma_sync
);
371 #ifdef CONFIG_HIGHMEM
373 * __dma_sync_page() implementation for systems using highmem.
374 * In this case, each page of a buffer must be kmapped/kunmapped
375 * in order to have a virtual address for __dma_sync(). This must
376 * not sleep so kmap_atomic()/kunmap_atomic() are used.
378 * Note: yes, it is possible and correct to have a buffer extend
379 * beyond the first page.
381 static inline void __dma_sync_page_highmem(struct page
*page
,
382 unsigned long offset
, size_t size
, int direction
)
384 size_t seg_size
= min((size_t)(PAGE_SIZE
- offset
), size
);
385 size_t cur_size
= seg_size
;
386 unsigned long flags
, start
, seg_offset
= offset
;
387 int nr_segs
= 1 + ((size
- seg_size
) + PAGE_SIZE
- 1)/PAGE_SIZE
;
390 local_irq_save(flags
);
393 start
= (unsigned long)kmap_atomic(page
+ seg_nr
,
394 KM_PPC_SYNC_PAGE
) + seg_offset
;
396 /* Sync this buffer segment */
397 __dma_sync((void *)start
, seg_size
, direction
);
398 kunmap_atomic((void *)start
, KM_PPC_SYNC_PAGE
);
401 /* Calculate next buffer segment size */
402 seg_size
= min((size_t)PAGE_SIZE
, size
- cur_size
);
404 /* Add the segment size to our running total */
405 cur_size
+= seg_size
;
407 } while (seg_nr
< nr_segs
);
409 local_irq_restore(flags
);
411 #endif /* CONFIG_HIGHMEM */
414 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
415 * takes a struct page instead of a virtual address
417 void __dma_sync_page(struct page
*page
, unsigned long offset
,
418 size_t size
, int direction
)
420 #ifdef CONFIG_HIGHMEM
421 __dma_sync_page_highmem(page
, offset
, size
, direction
);
423 unsigned long start
= (unsigned long)page_address(page
) + offset
;
424 __dma_sync((void *)start
, size
, direction
);
427 EXPORT_SYMBOL(__dma_sync_page
);