2 * PowerPC version derived from arch/arm/mm/consistent.c
3 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
5 * Copyright (C) 2000 Russell King
7 * Consistent memory allocators. Used for DMA devices that want to
8 * share uncached memory with the processor core. The function return
9 * is the virtual address and 'dma_handle' is the physical address.
10 * Mostly stolen from the ARM port, with some changes for PowerPC.
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17 * implementation. This is pulled straight from ARM and barely
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/highmem.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/export.h>
35 #include <asm/tlbflush.h>
41 * This address range defaults to a value that is safe for all
42 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
43 * can be further configured for specific applications under
44 * the "Advanced Setup" menu. -Matt
46 #define CONSISTENT_BASE (IOREMAP_TOP)
47 #define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
48 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
51 * This is the page table (2MB) covering uncached, DMA consistent allocations
53 static DEFINE_SPINLOCK(consistent_lock
);
56 * VM region handling support.
58 * This should become something generic, handling VM region allocations for
59 * vmalloc and similar (ioremap, module space, etc).
61 * I envisage vmalloc()'s supporting vm_struct becoming:
64 * struct vm_region region;
65 * unsigned long flags;
66 * struct page **pages;
67 * unsigned int nr_pages;
68 * unsigned long phys_addr;
71 * get_vm_area() would then call vm_region_alloc with an appropriate
72 * struct vm_region head (eg):
74 * struct vm_region vmalloc_head = {
75 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
76 * .vm_start = VMALLOC_START,
77 * .vm_end = VMALLOC_END,
80 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
81 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
82 * would have to initialise this each time prior to calling vm_region_alloc().
84 struct ppc_vm_region
{
85 struct list_head vm_list
;
86 unsigned long vm_start
;
90 static struct ppc_vm_region consistent_head
= {
91 .vm_list
= LIST_HEAD_INIT(consistent_head
.vm_list
),
92 .vm_start
= CONSISTENT_BASE
,
93 .vm_end
= CONSISTENT_END
,
96 static struct ppc_vm_region
*
97 ppc_vm_region_alloc(struct ppc_vm_region
*head
, size_t size
, gfp_t gfp
)
99 unsigned long addr
= head
->vm_start
, end
= head
->vm_end
- size
;
101 struct ppc_vm_region
*c
, *new;
103 new = kmalloc(sizeof(struct ppc_vm_region
), gfp
);
107 spin_lock_irqsave(&consistent_lock
, flags
);
109 list_for_each_entry(c
, &head
->vm_list
, vm_list
) {
110 if ((addr
+ size
) < addr
)
112 if ((addr
+ size
) <= c
->vm_start
)
121 * Insert this entry _before_ the one we found.
123 list_add_tail(&new->vm_list
, &c
->vm_list
);
124 new->vm_start
= addr
;
125 new->vm_end
= addr
+ size
;
127 spin_unlock_irqrestore(&consistent_lock
, flags
);
131 spin_unlock_irqrestore(&consistent_lock
, flags
);
137 static struct ppc_vm_region
*ppc_vm_region_find(struct ppc_vm_region
*head
, unsigned long addr
)
139 struct ppc_vm_region
*c
;
141 list_for_each_entry(c
, &head
->vm_list
, vm_list
) {
142 if (c
->vm_start
== addr
)
151 * Allocate DMA-coherent memory space and return both the kernel remapped
152 * virtual and bus address for that space.
155 __dma_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
)
158 struct ppc_vm_region
*c
;
160 u64 mask
= ISA_DMA_THRESHOLD
, limit
;
163 mask
= dev
->coherent_dma_mask
;
166 * Sanity check the DMA mask - it must be non-zero, and
167 * must be able to be satisfied by a DMA allocation.
170 dev_warn(dev
, "coherent DMA mask is unset\n");
174 if ((~mask
) & ISA_DMA_THRESHOLD
) {
175 dev_warn(dev
, "coherent DMA mask %#llx is smaller "
176 "than system GFP_DMA mask %#llx\n",
177 mask
, (unsigned long long)ISA_DMA_THRESHOLD
);
183 size
= PAGE_ALIGN(size
);
184 limit
= (mask
+ 1) & ~mask
;
185 if ((limit
&& size
>= limit
) ||
186 size
>= (CONSISTENT_END
- CONSISTENT_BASE
)) {
187 printk(KERN_WARNING
"coherent allocation too big (requested %#x mask %#Lx)\n",
192 order
= get_order(size
);
194 /* Might be useful if we ever have a real legacy DMA zone... */
195 if (mask
!= 0xffffffff)
198 page
= alloc_pages(gfp
, order
);
203 * Invalidate any data that might be lurking in the
204 * kernel direct-mapped region for device DMA.
207 unsigned long kaddr
= (unsigned long)page_address(page
);
208 memset(page_address(page
), 0, size
);
209 flush_dcache_range(kaddr
, kaddr
+ size
);
213 * Allocate a virtual address in the consistent mapping region.
215 c
= ppc_vm_region_alloc(&consistent_head
, size
,
216 gfp
& ~(__GFP_DMA
| __GFP_HIGHMEM
));
218 unsigned long vaddr
= c
->vm_start
;
219 struct page
*end
= page
+ (1 << order
);
221 split_page(page
, order
);
224 * Set the "dma handle"
226 *handle
= page_to_phys(page
);
229 SetPageReserved(page
);
230 map_kernel_page(vaddr
, page_to_phys(page
),
231 pgprot_val(pgprot_noncached(PAGE_KERNEL
)));
234 } while (size
-= PAGE_SIZE
);
237 * Free the otherwise unused pages.
244 return (void *)c
->vm_start
;
248 __free_pages(page
, order
);
252 EXPORT_SYMBOL(__dma_alloc_coherent
);
255 * free a page as defined by the above mapping.
257 void __dma_free_coherent(size_t size
, void *vaddr
)
259 struct ppc_vm_region
*c
;
260 unsigned long flags
, addr
;
262 size
= PAGE_ALIGN(size
);
264 spin_lock_irqsave(&consistent_lock
, flags
);
266 c
= ppc_vm_region_find(&consistent_head
, (unsigned long)vaddr
);
270 if ((c
->vm_end
- c
->vm_start
) != size
) {
271 printk(KERN_ERR
"%s: freeing wrong coherent size (%ld != %d)\n",
272 __func__
, c
->vm_end
- c
->vm_start
, size
);
274 size
= c
->vm_end
- c
->vm_start
;
282 ptep
= pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr
),
286 if (!pte_none(*ptep
) && pte_present(*ptep
)) {
287 pfn
= pte_pfn(*ptep
);
288 pte_clear(&init_mm
, addr
, ptep
);
289 if (pfn_valid(pfn
)) {
290 struct page
*page
= pfn_to_page(pfn
);
291 __free_reserved_page(page
);
295 } while (size
-= PAGE_SIZE
);
297 flush_tlb_kernel_range(c
->vm_start
, c
->vm_end
);
299 list_del(&c
->vm_list
);
301 spin_unlock_irqrestore(&consistent_lock
, flags
);
307 spin_unlock_irqrestore(&consistent_lock
, flags
);
308 printk(KERN_ERR
"%s: trying to free invalid coherent area: %p\n",
312 EXPORT_SYMBOL(__dma_free_coherent
);
315 * make an area consistent.
317 void __dma_sync(void *vaddr
, size_t size
, int direction
)
319 unsigned long start
= (unsigned long)vaddr
;
320 unsigned long end
= start
+ size
;
325 case DMA_FROM_DEVICE
:
327 * invalidate only when cache-line aligned otherwise there is
328 * the potential for discarding uncommitted data from the cache
330 if ((start
| end
) & (L1_CACHE_BYTES
- 1))
331 flush_dcache_range(start
, end
);
333 invalidate_dcache_range(start
, end
);
335 case DMA_TO_DEVICE
: /* writeback only */
336 clean_dcache_range(start
, end
);
338 case DMA_BIDIRECTIONAL
: /* writeback and invalidate */
339 flush_dcache_range(start
, end
);
343 EXPORT_SYMBOL(__dma_sync
);
345 #ifdef CONFIG_HIGHMEM
347 * __dma_sync_page() implementation for systems using highmem.
348 * In this case, each page of a buffer must be kmapped/kunmapped
349 * in order to have a virtual address for __dma_sync(). This must
350 * not sleep so kmap_atomic()/kunmap_atomic() are used.
352 * Note: yes, it is possible and correct to have a buffer extend
353 * beyond the first page.
355 static inline void __dma_sync_page_highmem(struct page
*page
,
356 unsigned long offset
, size_t size
, int direction
)
358 size_t seg_size
= min((size_t)(PAGE_SIZE
- offset
), size
);
359 size_t cur_size
= seg_size
;
360 unsigned long flags
, start
, seg_offset
= offset
;
361 int nr_segs
= 1 + ((size
- seg_size
) + PAGE_SIZE
- 1)/PAGE_SIZE
;
364 local_irq_save(flags
);
367 start
= (unsigned long)kmap_atomic(page
+ seg_nr
) + seg_offset
;
369 /* Sync this buffer segment */
370 __dma_sync((void *)start
, seg_size
, direction
);
371 kunmap_atomic((void *)start
);
374 /* Calculate next buffer segment size */
375 seg_size
= min((size_t)PAGE_SIZE
, size
- cur_size
);
377 /* Add the segment size to our running total */
378 cur_size
+= seg_size
;
380 } while (seg_nr
< nr_segs
);
382 local_irq_restore(flags
);
384 #endif /* CONFIG_HIGHMEM */
387 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
388 * takes a struct page instead of a virtual address
390 void __dma_sync_page(struct page
*page
, unsigned long offset
,
391 size_t size
, int direction
)
393 #ifdef CONFIG_HIGHMEM
394 __dma_sync_page_highmem(page
, offset
, size
, direction
);
396 unsigned long start
= (unsigned long)page_address(page
) + offset
;
397 __dma_sync((void *)start
, size
, direction
);
400 EXPORT_SYMBOL(__dma_sync_page
);
403 * Return the PFN for a given cpu virtual address returned by
404 * __dma_alloc_coherent. This is used by dma_mmap_coherent()
406 unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr
)
408 /* This should always be populated, so we don't test every
409 * level. If that fails, we'll have a nice crash which
410 * will be as good as a BUG_ON()
412 pgd_t
*pgd
= pgd_offset_k(cpu_addr
);
413 pud_t
*pud
= pud_offset(pgd
, cpu_addr
);
414 pmd_t
*pmd
= pmd_offset(pud
, cpu_addr
);
415 pte_t
*ptep
= pte_offset_kernel(pmd
, cpu_addr
);
417 if (pte_none(*ptep
) || !pte_present(*ptep
))
419 return pte_pfn(*ptep
);