2 * linux/arch/arm/mm/consistent.c
4 * Copyright (C) 2000-2004 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA uncached mapping support.
12 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/errno.h>
16 #include <linux/list.h>
17 #include <linux/init.h>
18 #include <linux/device.h>
19 #include <linux/dma-mapping.h>
21 #include <asm/cacheflush.h>
22 #include <asm/tlbflush.h>
23 #include <asm/sizes.h>
25 /* Sanity check size */
26 #if (CONSISTENT_DMA_SIZE % SZ_2M)
27 #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
30 #define CONSISTENT_END (0xffe00000)
31 #define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
33 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
34 #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
35 #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
39 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
41 static pte_t
*consistent_pte
[NUM_CONSISTENT_PTES
];
42 static DEFINE_SPINLOCK(consistent_lock
);
45 * VM region handling support.
47 * This should become something generic, handling VM region allocations for
48 * vmalloc and similar (ioremap, module space, etc).
50 * I envisage vmalloc()'s supporting vm_struct becoming:
53 * struct vm_region region;
54 * unsigned long flags;
55 * struct page **pages;
56 * unsigned int nr_pages;
57 * unsigned long phys_addr;
60 * get_vm_area() would then call vm_region_alloc with an appropriate
61 * struct vm_region head (eg):
63 * struct vm_region vmalloc_head = {
64 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
65 * .vm_start = VMALLOC_START,
66 * .vm_end = VMALLOC_END,
69 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
70 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
71 * would have to initialise this each time prior to calling vm_region_alloc().
74 struct list_head vm_list
;
75 unsigned long vm_start
;
77 struct page
*vm_pages
;
81 static struct vm_region consistent_head
= {
82 .vm_list
= LIST_HEAD_INIT(consistent_head
.vm_list
),
83 .vm_start
= CONSISTENT_BASE
,
84 .vm_end
= CONSISTENT_END
,
87 static struct vm_region
*
88 vm_region_alloc(struct vm_region
*head
, size_t size
, gfp_t gfp
)
90 unsigned long addr
= head
->vm_start
, end
= head
->vm_end
- size
;
92 struct vm_region
*c
, *new;
94 new = kmalloc(sizeof(struct vm_region
), gfp
);
98 spin_lock_irqsave(&consistent_lock
, flags
);
100 list_for_each_entry(c
, &head
->vm_list
, vm_list
) {
101 if ((addr
+ size
) < addr
)
103 if ((addr
+ size
) <= c
->vm_start
)
112 * Insert this entry _before_ the one we found.
114 list_add_tail(&new->vm_list
, &c
->vm_list
);
115 new->vm_start
= addr
;
116 new->vm_end
= addr
+ size
;
119 spin_unlock_irqrestore(&consistent_lock
, flags
);
123 spin_unlock_irqrestore(&consistent_lock
, flags
);
129 static struct vm_region
*vm_region_find(struct vm_region
*head
, unsigned long addr
)
133 list_for_each_entry(c
, &head
->vm_list
, vm_list
) {
134 if (c
->vm_active
&& c
->vm_start
== addr
)
142 #ifdef CONFIG_HUGETLB_PAGE
143 #error ARM Coherent DMA allocator does not (yet) support huge TLB
147 __dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
,
153 u64 mask
= ISA_DMA_THRESHOLD
, limit
;
155 if (!consistent_pte
[0]) {
156 printk(KERN_ERR
"%s: not initialised\n", __func__
);
162 mask
= dev
->coherent_dma_mask
;
165 * Sanity check the DMA mask - it must be non-zero, and
166 * must be able to be satisfied by a DMA allocation.
169 dev_warn(dev
, "coherent DMA mask is unset\n");
173 if ((~mask
) & ISA_DMA_THRESHOLD
) {
174 dev_warn(dev
, "coherent DMA mask %#llx is smaller "
175 "than system GFP_DMA mask %#llx\n",
176 mask
, (unsigned long long)ISA_DMA_THRESHOLD
);
182 * Sanity check the allocation size.
184 size
= PAGE_ALIGN(size
);
185 limit
= (mask
+ 1) & ~mask
;
186 if ((limit
&& size
>= limit
) ||
187 size
>= (CONSISTENT_END
- CONSISTENT_BASE
)) {
188 printk(KERN_WARNING
"coherent allocation too big "
189 "(requested %#x mask %#llx)\n", size
, mask
);
193 order
= get_order(size
);
195 if (mask
!= 0xffffffff)
198 page
= alloc_pages(gfp
, order
);
203 * Invalidate any data that might be lurking in the
204 * kernel direct-mapped region for device DMA.
207 unsigned long kaddr
= (unsigned long)page_address(page
);
208 memset(page_address(page
), 0, size
);
209 dmac_flush_range(kaddr
, kaddr
+ size
);
213 * Allocate a virtual address in the consistent mapping region.
215 c
= vm_region_alloc(&consistent_head
, size
,
216 gfp
& ~(__GFP_DMA
| __GFP_HIGHMEM
));
219 struct page
*end
= page
+ (1 << order
);
220 int idx
= CONSISTENT_PTE_INDEX(c
->vm_start
);
221 u32 off
= CONSISTENT_OFFSET(c
->vm_start
) & (PTRS_PER_PTE
-1);
223 pte
= consistent_pte
[idx
] + off
;
226 split_page(page
, order
);
229 * Set the "dma handle"
231 *handle
= page_to_dma(dev
, page
);
234 BUG_ON(!pte_none(*pte
));
237 * x86 does not mark the pages reserved...
239 SetPageReserved(page
);
240 set_pte(pte
, mk_pte(page
, prot
));
244 if (off
>= PTRS_PER_PTE
) {
246 pte
= consistent_pte
[++idx
];
248 } while (size
-= PAGE_SIZE
);
251 * Free the otherwise unused pages.
258 return (void *)c
->vm_start
;
262 __free_pages(page
, order
);
269 * Allocate DMA-coherent memory space and return both the kernel remapped
270 * virtual and bus address for that space.
273 dma_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
)
275 return __dma_alloc(dev
, size
, handle
, gfp
,
276 pgprot_noncached(pgprot_kernel
));
278 EXPORT_SYMBOL(dma_alloc_coherent
);
281 * Allocate a writecombining region, in much the same way as
282 * dma_alloc_coherent above.
285 dma_alloc_writecombine(struct device
*dev
, size_t size
, dma_addr_t
*handle
, gfp_t gfp
)
287 return __dma_alloc(dev
, size
, handle
, gfp
,
288 pgprot_writecombine(pgprot_kernel
));
290 EXPORT_SYMBOL(dma_alloc_writecombine
);
292 static int dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
293 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
)
295 unsigned long flags
, user_size
, kern_size
;
299 user_size
= (vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
;
301 spin_lock_irqsave(&consistent_lock
, flags
);
302 c
= vm_region_find(&consistent_head
, (unsigned long)cpu_addr
);
303 spin_unlock_irqrestore(&consistent_lock
, flags
);
306 unsigned long off
= vma
->vm_pgoff
;
308 kern_size
= (c
->vm_end
- c
->vm_start
) >> PAGE_SHIFT
;
310 if (off
< kern_size
&&
311 user_size
<= (kern_size
- off
)) {
312 vma
->vm_flags
|= VM_RESERVED
;
313 ret
= remap_pfn_range(vma
, vma
->vm_start
,
314 page_to_pfn(c
->vm_pages
) + off
,
315 user_size
<< PAGE_SHIFT
,
323 int dma_mmap_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
324 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
)
326 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
327 return dma_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
329 EXPORT_SYMBOL(dma_mmap_coherent
);
331 int dma_mmap_writecombine(struct device
*dev
, struct vm_area_struct
*vma
,
332 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
)
334 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
335 return dma_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
337 EXPORT_SYMBOL(dma_mmap_writecombine
);
340 * free a page as defined by the above mapping.
341 * Must not be called with IRQs disabled.
343 void dma_free_coherent(struct device
*dev
, size_t size
, void *cpu_addr
, dma_addr_t handle
)
346 unsigned long flags
, addr
;
351 WARN_ON(irqs_disabled());
353 size
= PAGE_ALIGN(size
);
355 spin_lock_irqsave(&consistent_lock
, flags
);
356 c
= vm_region_find(&consistent_head
, (unsigned long)cpu_addr
);
361 spin_unlock_irqrestore(&consistent_lock
, flags
);
363 if ((c
->vm_end
- c
->vm_start
) != size
) {
364 printk(KERN_ERR
"%s: freeing wrong coherent size (%ld != %d)\n",
365 __func__
, c
->vm_end
- c
->vm_start
, size
);
367 size
= c
->vm_end
- c
->vm_start
;
370 idx
= CONSISTENT_PTE_INDEX(c
->vm_start
);
371 off
= CONSISTENT_OFFSET(c
->vm_start
) & (PTRS_PER_PTE
-1);
372 ptep
= consistent_pte
[idx
] + off
;
375 pte_t pte
= ptep_get_and_clear(&init_mm
, addr
, ptep
);
381 if (off
>= PTRS_PER_PTE
) {
383 ptep
= consistent_pte
[++idx
];
386 if (!pte_none(pte
) && pte_present(pte
)) {
389 if (pfn_valid(pfn
)) {
390 struct page
*page
= pfn_to_page(pfn
);
393 * x86 does not mark the pages reserved...
395 ClearPageReserved(page
);
402 printk(KERN_CRIT
"%s: bad page in kernel page table\n",
404 } while (size
-= PAGE_SIZE
);
406 flush_tlb_kernel_range(c
->vm_start
, c
->vm_end
);
408 spin_lock_irqsave(&consistent_lock
, flags
);
409 list_del(&c
->vm_list
);
410 spin_unlock_irqrestore(&consistent_lock
, flags
);
416 spin_unlock_irqrestore(&consistent_lock
, flags
);
417 printk(KERN_ERR
"%s: trying to free invalid coherent area: %p\n",
421 EXPORT_SYMBOL(dma_free_coherent
);
424 * Initialise the consistent memory allocation.
426 static int __init
consistent_init(void)
432 u32 base
= CONSISTENT_BASE
;
435 pgd
= pgd_offset(&init_mm
, base
);
436 pmd
= pmd_alloc(&init_mm
, pgd
, base
);
438 printk(KERN_ERR
"%s: no pmd tables\n", __func__
);
442 WARN_ON(!pmd_none(*pmd
));
444 pte
= pte_alloc_kernel(pmd
, base
);
446 printk(KERN_ERR
"%s: no pte tables\n", __func__
);
451 consistent_pte
[i
++] = pte
;
452 base
+= (1 << PGDIR_SHIFT
);
453 } while (base
< CONSISTENT_END
);
458 core_initcall(consistent_init
);
461 * Make an area consistent for devices.
463 void consistent_sync(void *vaddr
, size_t size
, int direction
)
465 unsigned long start
= (unsigned long)vaddr
;
466 unsigned long end
= start
+ size
;
469 case DMA_FROM_DEVICE
: /* invalidate only */
470 dmac_inv_range(start
, end
);
472 case DMA_TO_DEVICE
: /* writeback only */
473 dmac_clean_range(start
, end
);
475 case DMA_BIDIRECTIONAL
: /* writeback and invalidate */
476 dmac_flush_range(start
, end
);
482 EXPORT_SYMBOL(consistent_sync
);