ALSA: hda: SPDIF mux controls
[linux/fpc-iii.git] / arch / powerpc / lib / dma-noncoherent.c
blob5d83907f6591dbf9cf68b32dbceaa99ddaf3563e
1 /*
2 * PowerPC version derived from arch/arm/mm/consistent.c
3 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
5 * Copyright (C) 2000 Russell King
7 * Consistent memory allocators. Used for DMA devices that want to
8 * share uncached memory with the processor core. The function return
9 * is the virtual address and 'dma_handle' is the physical address.
10 * Mostly stolen from the ARM port, with some changes for PowerPC.
11 * -- Dan
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17 * implementation. This is pulled straight from ARM and barely
18 * modified. -Matt
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/highmem.h>
31 #include <linux/dma-mapping.h>
33 #include <asm/tlbflush.h>
36 * This address range defaults to a value that is safe for all
37 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
38 * can be further configured for specific applications under
39 * the "Advanced Setup" menu. -Matt
41 #define CONSISTENT_BASE (CONFIG_CONSISTENT_START)
42 #define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE)
43 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
46 * This is the page table (2MB) covering uncached, DMA consistent allocations
48 static pte_t *consistent_pte;
49 static DEFINE_SPINLOCK(consistent_lock);
52 * VM region handling support.
54 * This should become something generic, handling VM region allocations for
55 * vmalloc and similar (ioremap, module space, etc).
57 * I envisage vmalloc()'s supporting vm_struct becoming:
59 * struct vm_struct {
60 * struct vm_region region;
61 * unsigned long flags;
62 * struct page **pages;
63 * unsigned int nr_pages;
64 * unsigned long phys_addr;
65 * };
67 * get_vm_area() would then call vm_region_alloc with an appropriate
68 * struct vm_region head (eg):
70 * struct vm_region vmalloc_head = {
71 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
72 * .vm_start = VMALLOC_START,
73 * .vm_end = VMALLOC_END,
74 * };
76 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
77 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
78 * would have to initialise this each time prior to calling vm_region_alloc().
80 struct vm_region {
81 struct list_head vm_list;
82 unsigned long vm_start;
83 unsigned long vm_end;
86 static struct vm_region consistent_head = {
87 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
88 .vm_start = CONSISTENT_BASE,
89 .vm_end = CONSISTENT_END,
92 static struct vm_region *
93 vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
95 unsigned long addr = head->vm_start, end = head->vm_end - size;
96 unsigned long flags;
97 struct vm_region *c, *new;
99 new = kmalloc(sizeof(struct vm_region), gfp);
100 if (!new)
101 goto out;
103 spin_lock_irqsave(&consistent_lock, flags);
105 list_for_each_entry(c, &head->vm_list, vm_list) {
106 if ((addr + size) < addr)
107 goto nospc;
108 if ((addr + size) <= c->vm_start)
109 goto found;
110 addr = c->vm_end;
111 if (addr > end)
112 goto nospc;
115 found:
117 * Insert this entry _before_ the one we found.
119 list_add_tail(&new->vm_list, &c->vm_list);
120 new->vm_start = addr;
121 new->vm_end = addr + size;
123 spin_unlock_irqrestore(&consistent_lock, flags);
124 return new;
126 nospc:
127 spin_unlock_irqrestore(&consistent_lock, flags);
128 kfree(new);
129 out:
130 return NULL;
133 static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
135 struct vm_region *c;
137 list_for_each_entry(c, &head->vm_list, vm_list) {
138 if (c->vm_start == addr)
139 goto out;
141 c = NULL;
142 out:
143 return c;
147 * Allocate DMA-coherent memory space and return both the kernel remapped
148 * virtual and bus address for that space.
150 void *
151 __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
153 struct page *page;
154 struct vm_region *c;
155 unsigned long order;
156 u64 mask = 0x00ffffff, limit; /* ISA default */
158 if (!consistent_pte) {
159 printk(KERN_ERR "%s: not initialised\n", __func__);
160 dump_stack();
161 return NULL;
164 size = PAGE_ALIGN(size);
165 limit = (mask + 1) & ~mask;
166 if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) {
167 printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
168 size, mask);
169 return NULL;
172 order = get_order(size);
174 if (mask != 0xffffffff)
175 gfp |= GFP_DMA;
177 page = alloc_pages(gfp, order);
178 if (!page)
179 goto no_page;
182 * Invalidate any data that might be lurking in the
183 * kernel direct-mapped region for device DMA.
186 unsigned long kaddr = (unsigned long)page_address(page);
187 memset(page_address(page), 0, size);
188 flush_dcache_range(kaddr, kaddr + size);
192 * Allocate a virtual address in the consistent mapping region.
194 c = vm_region_alloc(&consistent_head, size,
195 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
196 if (c) {
197 unsigned long vaddr = c->vm_start;
198 pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr);
199 struct page *end = page + (1 << order);
201 split_page(page, order);
204 * Set the "dma handle"
206 *handle = page_to_bus(page);
208 do {
209 BUG_ON(!pte_none(*pte));
211 SetPageReserved(page);
212 set_pte_at(&init_mm, vaddr,
213 pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL)));
214 page++;
215 pte++;
216 vaddr += PAGE_SIZE;
217 } while (size -= PAGE_SIZE);
220 * Free the otherwise unused pages.
222 while (page < end) {
223 __free_page(page);
224 page++;
227 return (void *)c->vm_start;
230 if (page)
231 __free_pages(page, order);
232 no_page:
233 return NULL;
235 EXPORT_SYMBOL(__dma_alloc_coherent);
238 * free a page as defined by the above mapping.
240 void __dma_free_coherent(size_t size, void *vaddr)
242 struct vm_region *c;
243 unsigned long flags, addr;
244 pte_t *ptep;
246 size = PAGE_ALIGN(size);
248 spin_lock_irqsave(&consistent_lock, flags);
250 c = vm_region_find(&consistent_head, (unsigned long)vaddr);
251 if (!c)
252 goto no_area;
254 if ((c->vm_end - c->vm_start) != size) {
255 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
256 __func__, c->vm_end - c->vm_start, size);
257 dump_stack();
258 size = c->vm_end - c->vm_start;
261 ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
262 addr = c->vm_start;
263 do {
264 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
265 unsigned long pfn;
267 ptep++;
268 addr += PAGE_SIZE;
270 if (!pte_none(pte) && pte_present(pte)) {
271 pfn = pte_pfn(pte);
273 if (pfn_valid(pfn)) {
274 struct page *page = pfn_to_page(pfn);
275 ClearPageReserved(page);
277 __free_page(page);
278 continue;
282 printk(KERN_CRIT "%s: bad page in kernel page table\n",
283 __func__);
284 } while (size -= PAGE_SIZE);
286 flush_tlb_kernel_range(c->vm_start, c->vm_end);
288 list_del(&c->vm_list);
290 spin_unlock_irqrestore(&consistent_lock, flags);
292 kfree(c);
293 return;
295 no_area:
296 spin_unlock_irqrestore(&consistent_lock, flags);
297 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
298 __func__, vaddr);
299 dump_stack();
301 EXPORT_SYMBOL(__dma_free_coherent);
304 * Initialise the consistent memory allocation.
306 static int __init dma_alloc_init(void)
308 pgd_t *pgd;
309 pud_t *pud;
310 pmd_t *pmd;
311 pte_t *pte;
312 int ret = 0;
314 do {
315 pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
316 pud = pud_alloc(&init_mm, pgd, CONSISTENT_BASE);
317 pmd = pmd_alloc(&init_mm, pud, CONSISTENT_BASE);
318 if (!pmd) {
319 printk(KERN_ERR "%s: no pmd tables\n", __func__);
320 ret = -ENOMEM;
321 break;
323 WARN_ON(!pmd_none(*pmd));
325 pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
326 if (!pte) {
327 printk(KERN_ERR "%s: no pte tables\n", __func__);
328 ret = -ENOMEM;
329 break;
332 consistent_pte = pte;
333 } while (0);
335 return ret;
338 core_initcall(dma_alloc_init);
341 * make an area consistent.
343 void __dma_sync(void *vaddr, size_t size, int direction)
345 unsigned long start = (unsigned long)vaddr;
346 unsigned long end = start + size;
348 switch (direction) {
349 case DMA_NONE:
350 BUG();
351 case DMA_FROM_DEVICE:
353 * invalidate only when cache-line aligned otherwise there is
354 * the potential for discarding uncommitted data from the cache
356 if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1)))
357 flush_dcache_range(start, end);
358 else
359 invalidate_dcache_range(start, end);
360 break;
361 case DMA_TO_DEVICE: /* writeback only */
362 clean_dcache_range(start, end);
363 break;
364 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
365 flush_dcache_range(start, end);
366 break;
369 EXPORT_SYMBOL(__dma_sync);
371 #ifdef CONFIG_HIGHMEM
373 * __dma_sync_page() implementation for systems using highmem.
374 * In this case, each page of a buffer must be kmapped/kunmapped
375 * in order to have a virtual address for __dma_sync(). This must
376 * not sleep so kmap_atomic()/kunmap_atomic() are used.
378 * Note: yes, it is possible and correct to have a buffer extend
379 * beyond the first page.
381 static inline void __dma_sync_page_highmem(struct page *page,
382 unsigned long offset, size_t size, int direction)
384 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
385 size_t cur_size = seg_size;
386 unsigned long flags, start, seg_offset = offset;
387 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
388 int seg_nr = 0;
390 local_irq_save(flags);
392 do {
393 start = (unsigned long)kmap_atomic(page + seg_nr,
394 KM_PPC_SYNC_PAGE) + seg_offset;
396 /* Sync this buffer segment */
397 __dma_sync((void *)start, seg_size, direction);
398 kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
399 seg_nr++;
401 /* Calculate next buffer segment size */
402 seg_size = min((size_t)PAGE_SIZE, size - cur_size);
404 /* Add the segment size to our running total */
405 cur_size += seg_size;
406 seg_offset = 0;
407 } while (seg_nr < nr_segs);
409 local_irq_restore(flags);
411 #endif /* CONFIG_HIGHMEM */
414 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
415 * takes a struct page instead of a virtual address
417 void __dma_sync_page(struct page *page, unsigned long offset,
418 size_t size, int direction)
420 #ifdef CONFIG_HIGHMEM
421 __dma_sync_page_highmem(page, offset, size, direction);
422 #else
423 unsigned long start = (unsigned long)page_address(page) + offset;
424 __dma_sync((void *)start, size, direction);
425 #endif
427 EXPORT_SYMBOL(__dma_sync_page);