2 * PowerPC version derived from arch/arm/mm/consistent.c
3 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
5 * Copyright (C) 2000 Russell King
7 * Consistent memory allocators. Used for DMA devices that want to
8 * share uncached memory with the processor core. The function return
9 * is the virtual address and 'dma_handle' is the physical address.
10 * Mostly stolen from the ARM port, with some changes for PowerPC.
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17 * implementation. This is pulled straight from ARM and barely
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/highmem.h>
32 #include <linux/dma-mapping.h>
34 #include <asm/tlbflush.h>
39 * This address range defaults to a value that is safe for all
40 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
41 * can be further configured for specific applications under
42 * the "Advanced Setup" menu. -Matt
44 #define CONSISTENT_BASE (IOREMAP_TOP)
45 #define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
46 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
49 * This is the page table (2MB) covering uncached, DMA consistent allocations
51 static DEFINE_SPINLOCK(consistent_lock
);
54 * VM region handling support.
56 * This should become something generic, handling VM region allocations for
57 * vmalloc and similar (ioremap, module space, etc).
59 * I envisage vmalloc()'s supporting vm_struct becoming:
62 * struct vm_region region;
63 * unsigned long flags;
64 * struct page **pages;
65 * unsigned int nr_pages;
66 * unsigned long phys_addr;
69 * get_vm_area() would then call vm_region_alloc with an appropriate
70 * struct vm_region head (eg):
72 * struct vm_region vmalloc_head = {
73 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
74 * .vm_start = VMALLOC_START,
75 * .vm_end = VMALLOC_END,
78 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
79 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
80 * would have to initialise this each time prior to calling vm_region_alloc().
82 struct ppc_vm_region
{
83 struct list_head vm_list
;
84 unsigned long vm_start
;
88 static struct ppc_vm_region consistent_head
= {
89 .vm_list
= LIST_HEAD_INIT(consistent_head
.vm_list
),
90 .vm_start
= CONSISTENT_BASE
,
91 .vm_end
= CONSISTENT_END
,
94 static struct ppc_vm_region
*
95 ppc_vm_region_alloc(struct ppc_vm_region
*head
, size_t size
, gfp_t gfp
)
97 unsigned long addr
= head
->vm_start
, end
= head
->vm_end
- size
;
99 struct ppc_vm_region
*c
, *new;
101 new = kmalloc(sizeof(struct ppc_vm_region
), gfp
);
105 spin_lock_irqsave(&consistent_lock
, flags
);
107 list_for_each_entry(c
, &head
->vm_list
, vm_list
) {
108 if ((addr
+ size
) < addr
)
110 if ((addr
+ size
) <= c
->vm_start
)
119 * Insert this entry _before_ the one we found.
121 list_add_tail(&new->vm_list
, &c
->vm_list
);
122 new->vm_start
= addr
;
123 new->vm_end
= addr
+ size
;
125 spin_unlock_irqrestore(&consistent_lock
, flags
);
129 spin_unlock_irqrestore(&consistent_lock
, flags
);
135 static struct ppc_vm_region
*ppc_vm_region_find(struct ppc_vm_region
*head
, unsigned long addr
)
137 struct ppc_vm_region
*c
;
139 list_for_each_entry(c
, &head
->vm_list
, vm_list
) {
140 if (c
->vm_start
== addr
)
149 * Allocate DMA-coherent memory space and return both the kernel remapped
150 * virtual and bus address for that space.
153 __dma_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
)
156 struct ppc_vm_region
*c
;
158 u64 mask
= ISA_DMA_THRESHOLD
, limit
;
161 mask
= dev
->coherent_dma_mask
;
164 * Sanity check the DMA mask - it must be non-zero, and
165 * must be able to be satisfied by a DMA allocation.
168 dev_warn(dev
, "coherent DMA mask is unset\n");
172 if ((~mask
) & ISA_DMA_THRESHOLD
) {
173 dev_warn(dev
, "coherent DMA mask %#llx is smaller "
174 "than system GFP_DMA mask %#llx\n",
175 mask
, (unsigned long long)ISA_DMA_THRESHOLD
);
181 size
= PAGE_ALIGN(size
);
182 limit
= (mask
+ 1) & ~mask
;
183 if ((limit
&& size
>= limit
) ||
184 size
>= (CONSISTENT_END
- CONSISTENT_BASE
)) {
185 printk(KERN_WARNING
"coherent allocation too big (requested %#x mask %#Lx)\n",
190 order
= get_order(size
);
192 /* Might be useful if we ever have a real legacy DMA zone... */
193 if (mask
!= 0xffffffff)
196 page
= alloc_pages(gfp
, order
);
201 * Invalidate any data that might be lurking in the
202 * kernel direct-mapped region for device DMA.
205 unsigned long kaddr
= (unsigned long)page_address(page
);
206 memset(page_address(page
), 0, size
);
207 flush_dcache_range(kaddr
, kaddr
+ size
);
211 * Allocate a virtual address in the consistent mapping region.
213 c
= ppc_vm_region_alloc(&consistent_head
, size
,
214 gfp
& ~(__GFP_DMA
| __GFP_HIGHMEM
));
216 unsigned long vaddr
= c
->vm_start
;
217 struct page
*end
= page
+ (1 << order
);
219 split_page(page
, order
);
222 * Set the "dma handle"
224 *handle
= page_to_phys(page
);
227 SetPageReserved(page
);
228 map_page(vaddr
, page_to_phys(page
),
229 pgprot_noncached(PAGE_KERNEL
));
232 } while (size
-= PAGE_SIZE
);
235 * Free the otherwise unused pages.
242 return (void *)c
->vm_start
;
246 __free_pages(page
, order
);
250 EXPORT_SYMBOL(__dma_alloc_coherent
);
253 * free a page as defined by the above mapping.
255 void __dma_free_coherent(size_t size
, void *vaddr
)
257 struct ppc_vm_region
*c
;
258 unsigned long flags
, addr
;
260 size
= PAGE_ALIGN(size
);
262 spin_lock_irqsave(&consistent_lock
, flags
);
264 c
= ppc_vm_region_find(&consistent_head
, (unsigned long)vaddr
);
268 if ((c
->vm_end
- c
->vm_start
) != size
) {
269 printk(KERN_ERR
"%s: freeing wrong coherent size (%ld != %d)\n",
270 __func__
, c
->vm_end
- c
->vm_start
, size
);
272 size
= c
->vm_end
- c
->vm_start
;
280 ptep
= pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr
),
284 if (!pte_none(*ptep
) && pte_present(*ptep
)) {
285 pfn
= pte_pfn(*ptep
);
286 pte_clear(&init_mm
, addr
, ptep
);
287 if (pfn_valid(pfn
)) {
288 struct page
*page
= pfn_to_page(pfn
);
290 ClearPageReserved(page
);
295 } while (size
-= PAGE_SIZE
);
297 flush_tlb_kernel_range(c
->vm_start
, c
->vm_end
);
299 list_del(&c
->vm_list
);
301 spin_unlock_irqrestore(&consistent_lock
, flags
);
307 spin_unlock_irqrestore(&consistent_lock
, flags
);
308 printk(KERN_ERR
"%s: trying to free invalid coherent area: %p\n",
312 EXPORT_SYMBOL(__dma_free_coherent
);
315 * make an area consistent.
317 void __dma_sync(void *vaddr
, size_t size
, int direction
)
319 unsigned long start
= (unsigned long)vaddr
;
320 unsigned long end
= start
+ size
;
325 case DMA_FROM_DEVICE
:
327 * invalidate only when cache-line aligned otherwise there is
328 * the potential for discarding uncommitted data from the cache
330 if ((start
& (L1_CACHE_BYTES
- 1)) || (size
& (L1_CACHE_BYTES
- 1)))
331 flush_dcache_range(start
, end
);
333 invalidate_dcache_range(start
, end
);
335 case DMA_TO_DEVICE
: /* writeback only */
336 clean_dcache_range(start
, end
);
338 case DMA_BIDIRECTIONAL
: /* writeback and invalidate */
339 flush_dcache_range(start
, end
);
343 EXPORT_SYMBOL(__dma_sync
);
345 #ifdef CONFIG_HIGHMEM
347 * __dma_sync_page() implementation for systems using highmem.
348 * In this case, each page of a buffer must be kmapped/kunmapped
349 * in order to have a virtual address for __dma_sync(). This must
350 * not sleep so kmap_atomic()/kunmap_atomic() are used.
352 * Note: yes, it is possible and correct to have a buffer extend
353 * beyond the first page.
355 static inline void __dma_sync_page_highmem(struct page
*page
,
356 unsigned long offset
, size_t size
, int direction
)
358 size_t seg_size
= min((size_t)(PAGE_SIZE
- offset
), size
);
359 size_t cur_size
= seg_size
;
360 unsigned long flags
, start
, seg_offset
= offset
;
361 int nr_segs
= 1 + ((size
- seg_size
) + PAGE_SIZE
- 1)/PAGE_SIZE
;
364 local_irq_save(flags
);
367 start
= (unsigned long)kmap_atomic(page
+ seg_nr
,
368 KM_PPC_SYNC_PAGE
) + seg_offset
;
370 /* Sync this buffer segment */
371 __dma_sync((void *)start
, seg_size
, direction
);
372 kunmap_atomic((void *)start
, KM_PPC_SYNC_PAGE
);
375 /* Calculate next buffer segment size */
376 seg_size
= min((size_t)PAGE_SIZE
, size
- cur_size
);
378 /* Add the segment size to our running total */
379 cur_size
+= seg_size
;
381 } while (seg_nr
< nr_segs
);
383 local_irq_restore(flags
);
385 #endif /* CONFIG_HIGHMEM */
388 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
389 * takes a struct page instead of a virtual address
391 void __dma_sync_page(struct page
*page
, unsigned long offset
,
392 size_t size
, int direction
)
394 #ifdef CONFIG_HIGHMEM
395 __dma_sync_page_highmem(page
, offset
, size
, direction
);
397 unsigned long start
= (unsigned long)page_address(page
) + offset
;
398 __dma_sync((void *)start
, size
, direction
);
401 EXPORT_SYMBOL(__dma_sync_page
);
404 * Return the PFN for a given cpu virtual address returned by
405 * __dma_alloc_coherent. This is used by dma_mmap_coherent()
407 unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr
)
409 /* This should always be populated, so we don't test every
410 * level. If that fails, we'll have a nice crash which
411 * will be as good as a BUG_ON()
413 pgd_t
*pgd
= pgd_offset_k(cpu_addr
);
414 pud_t
*pud
= pud_offset(pgd
, cpu_addr
);
415 pmd_t
*pmd
= pmd_offset(pud
, cpu_addr
);
416 pte_t
*ptep
= pte_offset_kernel(pmd
, cpu_addr
);
418 if (pte_none(*ptep
) || !pte_present(*ptep
))
420 return pte_pfn(*ptep
);