Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / arch / powerpc / mm / dma-noncoherent.c
blob329be36c0a8dd25aaa53749ef43c7c0087f14201
1 /*
2 * PowerPC version derived from arch/arm/mm/consistent.c
3 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
5 * Copyright (C) 2000 Russell King
7 * Consistent memory allocators. Used for DMA devices that want to
8 * share uncached memory with the processor core. The function return
9 * is the virtual address and 'dma_handle' is the physical address.
10 * Mostly stolen from the ARM port, with some changes for PowerPC.
11 * -- Dan
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17 * implementation. This is pulled straight from ARM and barely
18 * modified. -Matt
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/highmem.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/export.h>
35 #include <asm/tlbflush.h>
37 #include "mmu_decl.h"
40 * This address range defaults to a value that is safe for all
41 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
42 * can be further configured for specific applications under
43 * the "Advanced Setup" menu. -Matt
45 #define CONSISTENT_BASE (IOREMAP_TOP)
46 #define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
47 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
50 * This is the page table (2MB) covering uncached, DMA consistent allocations
52 static DEFINE_SPINLOCK(consistent_lock);
55 * VM region handling support.
57 * This should become something generic, handling VM region allocations for
58 * vmalloc and similar (ioremap, module space, etc).
60 * I envisage vmalloc()'s supporting vm_struct becoming:
62 * struct vm_struct {
63 * struct vm_region region;
64 * unsigned long flags;
65 * struct page **pages;
66 * unsigned int nr_pages;
67 * unsigned long phys_addr;
68 * };
70 * get_vm_area() would then call vm_region_alloc with an appropriate
71 * struct vm_region head (eg):
73 * struct vm_region vmalloc_head = {
74 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
75 * .vm_start = VMALLOC_START,
76 * .vm_end = VMALLOC_END,
77 * };
79 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
80 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
81 * would have to initialise this each time prior to calling vm_region_alloc().
83 struct ppc_vm_region {
84 struct list_head vm_list;
85 unsigned long vm_start;
86 unsigned long vm_end;
89 static struct ppc_vm_region consistent_head = {
90 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
91 .vm_start = CONSISTENT_BASE,
92 .vm_end = CONSISTENT_END,
95 static struct ppc_vm_region *
96 ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
98 unsigned long addr = head->vm_start, end = head->vm_end - size;
99 unsigned long flags;
100 struct ppc_vm_region *c, *new;
102 new = kmalloc(sizeof(struct ppc_vm_region), gfp);
103 if (!new)
104 goto out;
106 spin_lock_irqsave(&consistent_lock, flags);
108 list_for_each_entry(c, &head->vm_list, vm_list) {
109 if ((addr + size) < addr)
110 goto nospc;
111 if ((addr + size) <= c->vm_start)
112 goto found;
113 addr = c->vm_end;
114 if (addr > end)
115 goto nospc;
118 found:
120 * Insert this entry _before_ the one we found.
122 list_add_tail(&new->vm_list, &c->vm_list);
123 new->vm_start = addr;
124 new->vm_end = addr + size;
126 spin_unlock_irqrestore(&consistent_lock, flags);
127 return new;
129 nospc:
130 spin_unlock_irqrestore(&consistent_lock, flags);
131 kfree(new);
132 out:
133 return NULL;
136 static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
138 struct ppc_vm_region *c;
140 list_for_each_entry(c, &head->vm_list, vm_list) {
141 if (c->vm_start == addr)
142 goto out;
144 c = NULL;
145 out:
146 return c;
150 * Allocate DMA-coherent memory space and return both the kernel remapped
151 * virtual and bus address for that space.
153 void *
154 __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
156 struct page *page;
157 struct ppc_vm_region *c;
158 unsigned long order;
159 u64 mask = ISA_DMA_THRESHOLD, limit;
161 if (dev) {
162 mask = dev->coherent_dma_mask;
165 * Sanity check the DMA mask - it must be non-zero, and
166 * must be able to be satisfied by a DMA allocation.
168 if (mask == 0) {
169 dev_warn(dev, "coherent DMA mask is unset\n");
170 goto no_page;
173 if ((~mask) & ISA_DMA_THRESHOLD) {
174 dev_warn(dev, "coherent DMA mask %#llx is smaller "
175 "than system GFP_DMA mask %#llx\n",
176 mask, (unsigned long long)ISA_DMA_THRESHOLD);
177 goto no_page;
182 size = PAGE_ALIGN(size);
183 limit = (mask + 1) & ~mask;
184 if ((limit && size >= limit) ||
185 size >= (CONSISTENT_END - CONSISTENT_BASE)) {
186 printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
187 size, mask);
188 return NULL;
191 order = get_order(size);
193 /* Might be useful if we ever have a real legacy DMA zone... */
194 if (mask != 0xffffffff)
195 gfp |= GFP_DMA;
197 page = alloc_pages(gfp, order);
198 if (!page)
199 goto no_page;
202 * Invalidate any data that might be lurking in the
203 * kernel direct-mapped region for device DMA.
206 unsigned long kaddr = (unsigned long)page_address(page);
207 memset(page_address(page), 0, size);
208 flush_dcache_range(kaddr, kaddr + size);
212 * Allocate a virtual address in the consistent mapping region.
214 c = ppc_vm_region_alloc(&consistent_head, size,
215 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
216 if (c) {
217 unsigned long vaddr = c->vm_start;
218 struct page *end = page + (1 << order);
220 split_page(page, order);
223 * Set the "dma handle"
225 *handle = page_to_phys(page);
227 do {
228 SetPageReserved(page);
229 map_page(vaddr, page_to_phys(page),
230 pgprot_noncached(PAGE_KERNEL));
231 page++;
232 vaddr += PAGE_SIZE;
233 } while (size -= PAGE_SIZE);
236 * Free the otherwise unused pages.
238 while (page < end) {
239 __free_page(page);
240 page++;
243 return (void *)c->vm_start;
246 if (page)
247 __free_pages(page, order);
248 no_page:
249 return NULL;
251 EXPORT_SYMBOL(__dma_alloc_coherent);
254 * free a page as defined by the above mapping.
256 void __dma_free_coherent(size_t size, void *vaddr)
258 struct ppc_vm_region *c;
259 unsigned long flags, addr;
261 size = PAGE_ALIGN(size);
263 spin_lock_irqsave(&consistent_lock, flags);
265 c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
266 if (!c)
267 goto no_area;
269 if ((c->vm_end - c->vm_start) != size) {
270 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
271 __func__, c->vm_end - c->vm_start, size);
272 dump_stack();
273 size = c->vm_end - c->vm_start;
276 addr = c->vm_start;
277 do {
278 pte_t *ptep;
279 unsigned long pfn;
281 ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
282 addr),
283 addr),
284 addr);
285 if (!pte_none(*ptep) && pte_present(*ptep)) {
286 pfn = pte_pfn(*ptep);
287 pte_clear(&init_mm, addr, ptep);
288 if (pfn_valid(pfn)) {
289 struct page *page = pfn_to_page(pfn);
291 ClearPageReserved(page);
292 __free_page(page);
295 addr += PAGE_SIZE;
296 } while (size -= PAGE_SIZE);
298 flush_tlb_kernel_range(c->vm_start, c->vm_end);
300 list_del(&c->vm_list);
302 spin_unlock_irqrestore(&consistent_lock, flags);
304 kfree(c);
305 return;
307 no_area:
308 spin_unlock_irqrestore(&consistent_lock, flags);
309 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
310 __func__, vaddr);
311 dump_stack();
313 EXPORT_SYMBOL(__dma_free_coherent);
316 * make an area consistent.
318 void __dma_sync(void *vaddr, size_t size, int direction)
320 unsigned long start = (unsigned long)vaddr;
321 unsigned long end = start + size;
323 switch (direction) {
324 case DMA_NONE:
325 BUG();
326 case DMA_FROM_DEVICE:
328 * invalidate only when cache-line aligned otherwise there is
329 * the potential for discarding uncommitted data from the cache
331 if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1)))
332 flush_dcache_range(start, end);
333 else
334 invalidate_dcache_range(start, end);
335 break;
336 case DMA_TO_DEVICE: /* writeback only */
337 clean_dcache_range(start, end);
338 break;
339 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
340 flush_dcache_range(start, end);
341 break;
344 EXPORT_SYMBOL(__dma_sync);
346 #ifdef CONFIG_HIGHMEM
348 * __dma_sync_page() implementation for systems using highmem.
349 * In this case, each page of a buffer must be kmapped/kunmapped
350 * in order to have a virtual address for __dma_sync(). This must
351 * not sleep so kmap_atomic()/kunmap_atomic() are used.
353 * Note: yes, it is possible and correct to have a buffer extend
354 * beyond the first page.
356 static inline void __dma_sync_page_highmem(struct page *page,
357 unsigned long offset, size_t size, int direction)
359 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
360 size_t cur_size = seg_size;
361 unsigned long flags, start, seg_offset = offset;
362 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
363 int seg_nr = 0;
365 local_irq_save(flags);
367 do {
368 start = (unsigned long)kmap_atomic(page + seg_nr,
369 KM_PPC_SYNC_PAGE) + seg_offset;
371 /* Sync this buffer segment */
372 __dma_sync((void *)start, seg_size, direction);
373 kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
374 seg_nr++;
376 /* Calculate next buffer segment size */
377 seg_size = min((size_t)PAGE_SIZE, size - cur_size);
379 /* Add the segment size to our running total */
380 cur_size += seg_size;
381 seg_offset = 0;
382 } while (seg_nr < nr_segs);
384 local_irq_restore(flags);
386 #endif /* CONFIG_HIGHMEM */
389 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
390 * takes a struct page instead of a virtual address
392 void __dma_sync_page(struct page *page, unsigned long offset,
393 size_t size, int direction)
395 #ifdef CONFIG_HIGHMEM
396 __dma_sync_page_highmem(page, offset, size, direction);
397 #else
398 unsigned long start = (unsigned long)page_address(page) + offset;
399 __dma_sync((void *)start, size, direction);
400 #endif
402 EXPORT_SYMBOL(__dma_sync_page);
405 * Return the PFN for a given cpu virtual address returned by
406 * __dma_alloc_coherent. This is used by dma_mmap_coherent()
408 unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
410 /* This should always be populated, so we don't test every
411 * level. If that fails, we'll have a nice crash which
412 * will be as good as a BUG_ON()
414 pgd_t *pgd = pgd_offset_k(cpu_addr);
415 pud_t *pud = pud_offset(pgd, cpu_addr);
416 pmd_t *pmd = pmd_offset(pud, cpu_addr);
417 pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
419 if (pte_none(*ptep) || !pte_present(*ptep))
420 return 0;
421 return pte_pfn(*ptep);