avoid endless loops in lib/swiotlb.c
[wrt350n-kernel.git] / arch / parisc / kernel / pci-dma.c
blob9448d4e91142a5e62f56afa639de5539d5d28173
1 /*
2 ** PARISC 1.1 Dynamic DMA mapping support.
3 ** This implementation is for PA-RISC platforms that do not support
4 ** I/O TLBs (aka DMA address translation hardware).
5 ** See Documentation/DMA-mapping.txt for interface definitions.
6 **
7 ** (c) Copyright 1999,2000 Hewlett-Packard Company
8 ** (c) Copyright 2000 Grant Grundler
9 ** (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
10 ** (c) Copyright 2000 John Marvin
12 ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
13 ** (I assume it's from David Mosberger-Tang but there was no Copyright)
15 ** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
17 ** - ggg
20 #include <linux/init.h>
21 #include <linux/mm.h>
22 #include <linux/pci.h>
23 #include <linux/proc_fs.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/scatterlist.h>
30 #include <asm/cacheflush.h>
31 #include <asm/dma.h> /* for DMA_CHUNK_SIZE */
32 #include <asm/io.h>
33 #include <asm/page.h> /* get_order */
34 #include <asm/pgalloc.h>
35 #include <asm/uaccess.h>
36 #include <asm/tlbflush.h> /* for purge_tlb_*() macros */
38 static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
39 static unsigned long pcxl_used_bytes __read_mostly = 0;
40 static unsigned long pcxl_used_pages __read_mostly = 0;
42 extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
43 static spinlock_t pcxl_res_lock;
44 static char *pcxl_res_map;
45 static int pcxl_res_hint;
46 static int pcxl_res_size;
48 #ifdef DEBUG_PCXL_RESOURCE
49 #define DBG_RES(x...) printk(x)
50 #else
51 #define DBG_RES(x...)
52 #endif
56 ** Dump a hex representation of the resource map.
59 #ifdef DUMP_RESMAP
60 static
61 void dump_resmap(void)
63 u_long *res_ptr = (unsigned long *)pcxl_res_map;
64 u_long i = 0;
66 printk("res_map: ");
67 for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
68 printk("%08lx ", *res_ptr);
70 printk("\n");
72 #else
73 static inline void dump_resmap(void) {;}
74 #endif
76 static int pa11_dma_supported( struct device *dev, u64 mask)
78 return 1;
81 static inline int map_pte_uncached(pte_t * pte,
82 unsigned long vaddr,
83 unsigned long size, unsigned long *paddr_ptr)
85 unsigned long end;
86 unsigned long orig_vaddr = vaddr;
88 vaddr &= ~PMD_MASK;
89 end = vaddr + size;
90 if (end > PMD_SIZE)
91 end = PMD_SIZE;
92 do {
93 if (!pte_none(*pte))
94 printk(KERN_ERR "map_pte_uncached: page already exists\n");
95 set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
96 purge_tlb_start();
97 pdtlb_kernel(orig_vaddr);
98 purge_tlb_end();
99 vaddr += PAGE_SIZE;
100 orig_vaddr += PAGE_SIZE;
101 (*paddr_ptr) += PAGE_SIZE;
102 pte++;
103 } while (vaddr < end);
104 return 0;
107 static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
108 unsigned long size, unsigned long *paddr_ptr)
110 unsigned long end;
111 unsigned long orig_vaddr = vaddr;
113 vaddr &= ~PGDIR_MASK;
114 end = vaddr + size;
115 if (end > PGDIR_SIZE)
116 end = PGDIR_SIZE;
117 do {
118 pte_t * pte = pte_alloc_kernel(pmd, vaddr);
119 if (!pte)
120 return -ENOMEM;
121 if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
122 return -ENOMEM;
123 vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
124 orig_vaddr += PMD_SIZE;
125 pmd++;
126 } while (vaddr < end);
127 return 0;
130 static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
131 unsigned long paddr)
133 pgd_t * dir;
134 unsigned long end = vaddr + size;
136 dir = pgd_offset_k(vaddr);
137 do {
138 pmd_t *pmd;
140 pmd = pmd_alloc(NULL, dir, vaddr);
141 if (!pmd)
142 return -ENOMEM;
143 if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
144 return -ENOMEM;
145 vaddr = vaddr + PGDIR_SIZE;
146 dir++;
147 } while (vaddr && (vaddr < end));
148 return 0;
151 static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
152 unsigned long size)
154 pte_t * pte;
155 unsigned long end;
156 unsigned long orig_vaddr = vaddr;
158 if (pmd_none(*pmd))
159 return;
160 if (pmd_bad(*pmd)) {
161 pmd_ERROR(*pmd);
162 pmd_clear(pmd);
163 return;
165 pte = pte_offset_map(pmd, vaddr);
166 vaddr &= ~PMD_MASK;
167 end = vaddr + size;
168 if (end > PMD_SIZE)
169 end = PMD_SIZE;
170 do {
171 pte_t page = *pte;
172 pte_clear(&init_mm, vaddr, pte);
173 purge_tlb_start();
174 pdtlb_kernel(orig_vaddr);
175 purge_tlb_end();
176 vaddr += PAGE_SIZE;
177 orig_vaddr += PAGE_SIZE;
178 pte++;
179 if (pte_none(page) || pte_present(page))
180 continue;
181 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
182 } while (vaddr < end);
185 static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
186 unsigned long size)
188 pmd_t * pmd;
189 unsigned long end;
190 unsigned long orig_vaddr = vaddr;
192 if (pgd_none(*dir))
193 return;
194 if (pgd_bad(*dir)) {
195 pgd_ERROR(*dir);
196 pgd_clear(dir);
197 return;
199 pmd = pmd_offset(dir, vaddr);
200 vaddr &= ~PGDIR_MASK;
201 end = vaddr + size;
202 if (end > PGDIR_SIZE)
203 end = PGDIR_SIZE;
204 do {
205 unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
206 vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
207 orig_vaddr += PMD_SIZE;
208 pmd++;
209 } while (vaddr < end);
212 static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
214 pgd_t * dir;
215 unsigned long end = vaddr + size;
217 dir = pgd_offset_k(vaddr);
218 do {
219 unmap_uncached_pmd(dir, vaddr, end - vaddr);
220 vaddr = vaddr + PGDIR_SIZE;
221 dir++;
222 } while (vaddr && (vaddr < end));
225 #define PCXL_SEARCH_LOOP(idx, mask, size) \
226 for(; res_ptr < res_end; ++res_ptr) \
228 if(0 == ((*res_ptr) & mask)) { \
229 *res_ptr |= mask; \
230 idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
231 pcxl_res_hint = idx + (size >> 3); \
232 goto resource_found; \
236 #define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \
237 u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
238 u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
239 PCXL_SEARCH_LOOP(idx, mask, size); \
240 res_ptr = (u##size *)&pcxl_res_map[0]; \
241 PCXL_SEARCH_LOOP(idx, mask, size); \
244 unsigned long
245 pcxl_alloc_range(size_t size)
247 int res_idx;
248 u_long mask, flags;
249 unsigned int pages_needed = size >> PAGE_SHIFT;
251 mask = (u_long) -1L;
252 mask >>= BITS_PER_LONG - pages_needed;
254 DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
255 size, pages_needed, mask);
257 spin_lock_irqsave(&pcxl_res_lock, flags);
259 if(pages_needed <= 8) {
260 PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
261 } else if(pages_needed <= 16) {
262 PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
263 } else if(pages_needed <= 32) {
264 PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
265 } else {
266 panic("%s: pcxl_alloc_range() Too many pages to map.\n",
267 __FILE__);
270 dump_resmap();
271 panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
272 __FILE__);
274 resource_found:
276 DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
277 res_idx, mask, pcxl_res_hint);
279 pcxl_used_pages += pages_needed;
280 pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
282 spin_unlock_irqrestore(&pcxl_res_lock, flags);
284 dump_resmap();
287 ** return the corresponding vaddr in the pcxl dma map
289 return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
292 #define PCXL_FREE_MAPPINGS(idx, m, size) \
293 u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
294 /* BUG_ON((*res_ptr & m) != m); */ \
295 *res_ptr &= ~m;
298 ** clear bits in the pcxl resource map
300 static void
301 pcxl_free_range(unsigned long vaddr, size_t size)
303 u_long mask, flags;
304 unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
305 unsigned int pages_mapped = size >> PAGE_SHIFT;
307 mask = (u_long) -1L;
308 mask >>= BITS_PER_LONG - pages_mapped;
310 DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
311 res_idx, size, pages_mapped, mask);
313 spin_lock_irqsave(&pcxl_res_lock, flags);
315 if(pages_mapped <= 8) {
316 PCXL_FREE_MAPPINGS(res_idx, mask, 8);
317 } else if(pages_mapped <= 16) {
318 PCXL_FREE_MAPPINGS(res_idx, mask, 16);
319 } else if(pages_mapped <= 32) {
320 PCXL_FREE_MAPPINGS(res_idx, mask, 32);
321 } else {
322 panic("%s: pcxl_free_range() Too many pages to unmap.\n",
323 __FILE__);
326 pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
327 pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
329 spin_unlock_irqrestore(&pcxl_res_lock, flags);
331 dump_resmap();
334 static int proc_pcxl_dma_show(struct seq_file *m, void *v)
336 #if 0
337 u_long i = 0;
338 unsigned long *res_ptr = (u_long *)pcxl_res_map;
339 #endif
340 unsigned long total_pages = pcxl_res_size << 3; /* 8 bits per byte */
342 seq_printf(m, "\nDMA Mapping Area size : %d bytes (%ld pages)\n",
343 PCXL_DMA_MAP_SIZE, total_pages);
345 seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
347 seq_puts(m, " total: free: used: % used:\n");
348 seq_printf(m, "blocks %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
349 pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
350 (pcxl_used_bytes * 100) / pcxl_res_size);
352 seq_printf(m, "pages %8ld %8ld %8ld %8ld%%\n", total_pages,
353 total_pages - pcxl_used_pages, pcxl_used_pages,
354 (pcxl_used_pages * 100 / total_pages));
356 #if 0
357 seq_puts(m, "\nResource bitmap:");
359 for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
360 if ((i & 7) == 0)
361 seq_puts(m,"\n ");
362 seq_printf(m, "%s %08lx", buf, *res_ptr);
364 #endif
365 seq_putc(m, '\n');
366 return 0;
369 static int proc_pcxl_dma_open(struct inode *inode, struct file *file)
371 return single_open(file, proc_pcxl_dma_show, NULL);
374 static const struct file_operations proc_pcxl_dma_ops = {
375 .owner = THIS_MODULE,
376 .open = proc_pcxl_dma_open,
377 .read = seq_read,
378 .llseek = seq_lseek,
379 .release = single_release,
382 static int __init
383 pcxl_dma_init(void)
385 if (pcxl_dma_start == 0)
386 return 0;
388 spin_lock_init(&pcxl_res_lock);
389 pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
390 pcxl_res_hint = 0;
391 pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
392 get_order(pcxl_res_size));
393 memset(pcxl_res_map, 0, pcxl_res_size);
394 proc_gsc_root = proc_mkdir("gsc", NULL);
395 if (!proc_gsc_root)
396 printk(KERN_WARNING
397 "pcxl_dma_init: Unable to create gsc /proc dir entry\n");
398 else {
399 struct proc_dir_entry* ent;
400 ent = create_proc_entry("pcxl_dma", 0, proc_gsc_root);
401 if (ent)
402 ent->proc_fops = &proc_pcxl_dma_ops;
403 else
404 printk(KERN_WARNING
405 "pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
407 return 0;
410 __initcall(pcxl_dma_init);
412 static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
414 unsigned long vaddr;
415 unsigned long paddr;
416 int order;
418 order = get_order(size);
419 size = 1 << (order + PAGE_SHIFT);
420 vaddr = pcxl_alloc_range(size);
421 paddr = __get_free_pages(flag, order);
422 flush_kernel_dcache_range(paddr, size);
423 paddr = __pa(paddr);
424 map_uncached_pages(vaddr, size, paddr);
425 *dma_handle = (dma_addr_t) paddr;
427 #if 0
428 /* This probably isn't needed to support EISA cards.
429 ** ISA cards will certainly only support 24-bit DMA addressing.
430 ** Not clear if we can, want, or need to support ISA.
432 if (!dev || *dev->coherent_dma_mask < 0xffffffff)
433 gfp |= GFP_DMA;
434 #endif
435 return (void *)vaddr;
438 static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
440 int order;
442 order = get_order(size);
443 size = 1 << (order + PAGE_SHIFT);
444 unmap_uncached_pages((unsigned long)vaddr, size);
445 pcxl_free_range((unsigned long)vaddr, size);
446 free_pages((unsigned long)__va(dma_handle), order);
449 static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
451 if (direction == DMA_NONE) {
452 printk(KERN_ERR "pa11_dma_map_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
453 BUG();
456 flush_kernel_dcache_range((unsigned long) addr, size);
457 return virt_to_phys(addr);
460 static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
462 if (direction == DMA_NONE) {
463 printk(KERN_ERR "pa11_dma_unmap_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
464 BUG();
467 if (direction == DMA_TO_DEVICE)
468 return;
471 * For PCI_DMA_FROMDEVICE this flush is not necessary for the
472 * simple map/unmap case. However, it IS necessary if if
473 * pci_dma_sync_single_* has been called and the buffer reused.
476 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
477 return;
480 static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
482 int i;
484 if (direction == DMA_NONE)
485 BUG();
487 for (i = 0; i < nents; i++, sglist++ ) {
488 unsigned long vaddr = sg_virt_addr(sglist);
489 sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(vaddr);
490 sg_dma_len(sglist) = sglist->length;
491 flush_kernel_dcache_range(vaddr, sglist->length);
493 return nents;
496 static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
498 int i;
500 if (direction == DMA_NONE)
501 BUG();
503 if (direction == DMA_TO_DEVICE)
504 return;
506 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
508 for (i = 0; i < nents; i++, sglist++ )
509 flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
510 return;
513 static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
515 if (direction == DMA_NONE)
516 BUG();
518 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
521 static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
523 if (direction == DMA_NONE)
524 BUG();
526 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
529 static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
531 int i;
533 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
535 for (i = 0; i < nents; i++, sglist++ )
536 flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
539 static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
541 int i;
543 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
545 for (i = 0; i < nents; i++, sglist++ )
546 flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
549 struct hppa_dma_ops pcxl_dma_ops = {
550 .dma_supported = pa11_dma_supported,
551 .alloc_consistent = pa11_dma_alloc_consistent,
552 .alloc_noncoherent = pa11_dma_alloc_consistent,
553 .free_consistent = pa11_dma_free_consistent,
554 .map_single = pa11_dma_map_single,
555 .unmap_single = pa11_dma_unmap_single,
556 .map_sg = pa11_dma_map_sg,
557 .unmap_sg = pa11_dma_unmap_sg,
558 .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
559 .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
560 .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
561 .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
564 static void *fail_alloc_consistent(struct device *dev, size_t size,
565 dma_addr_t *dma_handle, gfp_t flag)
567 return NULL;
570 static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
571 dma_addr_t *dma_handle, gfp_t flag)
573 void *addr;
575 addr = (void *)__get_free_pages(flag, get_order(size));
576 if (addr)
577 *dma_handle = (dma_addr_t)virt_to_phys(addr);
579 return addr;
582 static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
583 void *vaddr, dma_addr_t iova)
585 free_pages((unsigned long)vaddr, get_order(size));
586 return;
589 struct hppa_dma_ops pcx_dma_ops = {
590 .dma_supported = pa11_dma_supported,
591 .alloc_consistent = fail_alloc_consistent,
592 .alloc_noncoherent = pa11_dma_alloc_noncoherent,
593 .free_consistent = pa11_dma_free_noncoherent,
594 .map_single = pa11_dma_map_single,
595 .unmap_single = pa11_dma_unmap_single,
596 .map_sg = pa11_dma_map_sg,
597 .unmap_sg = pa11_dma_unmap_sg,
598 .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
599 .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
600 .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
601 .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,