1 // SPDX-License-Identifier: GPL-2.0
3 ** PARISC 1.1 Dynamic DMA mapping support.
4 ** This implementation is for PA-RISC platforms that do not support
5 ** I/O TLBs (aka DMA address translation hardware).
6 ** See Documentation/DMA-API-HOWTO.txt for interface definitions.
8 ** (c) Copyright 1999,2000 Hewlett-Packard Company
9 ** (c) Copyright 2000 Grant Grundler
10 ** (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
11 ** (c) Copyright 2000 John Marvin
13 ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
14 ** (I assume it's from David Mosberger-Tang but there was no Copyright)
16 ** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
21 #include <linux/init.h>
22 #include <linux/gfp.h>
24 #include <linux/pci.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/string.h>
28 #include <linux/types.h>
29 #include <linux/scatterlist.h>
30 #include <linux/export.h>
32 #include <asm/cacheflush.h>
33 #include <asm/dma.h> /* for DMA_CHUNK_SIZE */
35 #include <asm/page.h> /* get_order */
36 #include <asm/pgalloc.h>
37 #include <linux/uaccess.h>
38 #include <asm/tlbflush.h> /* for purge_tlb_*() macros */
40 static struct proc_dir_entry
* proc_gsc_root __read_mostly
= NULL
;
41 static unsigned long pcxl_used_bytes __read_mostly
= 0;
42 static unsigned long pcxl_used_pages __read_mostly
= 0;
44 extern unsigned long pcxl_dma_start
; /* Start of pcxl dma mapping area */
45 static DEFINE_SPINLOCK(pcxl_res_lock
);
46 static char *pcxl_res_map
;
47 static int pcxl_res_hint
;
48 static int pcxl_res_size
;
50 #ifdef DEBUG_PCXL_RESOURCE
51 #define DBG_RES(x...) printk(x)
58 ** Dump a hex representation of the resource map.
63 void dump_resmap(void)
65 u_long
*res_ptr
= (unsigned long *)pcxl_res_map
;
69 for(; i
< (pcxl_res_size
/ sizeof(unsigned long)); ++i
, ++res_ptr
)
70 printk("%08lx ", *res_ptr
);
75 static inline void dump_resmap(void) {;}
78 static inline int map_pte_uncached(pte_t
* pte
,
80 unsigned long size
, unsigned long *paddr_ptr
)
83 unsigned long orig_vaddr
= vaddr
;
93 printk(KERN_ERR
"map_pte_uncached: page already exists\n");
94 purge_tlb_start(flags
);
95 set_pte(pte
, __mk_pte(*paddr_ptr
, PAGE_KERNEL_UNC
));
96 pdtlb_kernel(orig_vaddr
);
99 orig_vaddr
+= PAGE_SIZE
;
100 (*paddr_ptr
) += PAGE_SIZE
;
102 } while (vaddr
< end
);
106 static inline int map_pmd_uncached(pmd_t
* pmd
, unsigned long vaddr
,
107 unsigned long size
, unsigned long *paddr_ptr
)
110 unsigned long orig_vaddr
= vaddr
;
112 vaddr
&= ~PGDIR_MASK
;
114 if (end
> PGDIR_SIZE
)
117 pte_t
* pte
= pte_alloc_kernel(pmd
, vaddr
);
120 if (map_pte_uncached(pte
, orig_vaddr
, end
- vaddr
, paddr_ptr
))
122 vaddr
= (vaddr
+ PMD_SIZE
) & PMD_MASK
;
123 orig_vaddr
+= PMD_SIZE
;
125 } while (vaddr
< end
);
129 static inline int map_uncached_pages(unsigned long vaddr
, unsigned long size
,
133 unsigned long end
= vaddr
+ size
;
135 dir
= pgd_offset_k(vaddr
);
139 pmd
= pmd_alloc(NULL
, dir
, vaddr
);
142 if (map_pmd_uncached(pmd
, vaddr
, end
- vaddr
, &paddr
))
144 vaddr
= vaddr
+ PGDIR_SIZE
;
146 } while (vaddr
&& (vaddr
< end
));
150 static inline void unmap_uncached_pte(pmd_t
* pmd
, unsigned long vaddr
,
155 unsigned long orig_vaddr
= vaddr
;
164 pte
= pte_offset_map(pmd
, vaddr
);
173 pte_clear(&init_mm
, vaddr
, pte
);
174 purge_tlb_start(flags
);
175 pdtlb_kernel(orig_vaddr
);
176 purge_tlb_end(flags
);
178 orig_vaddr
+= PAGE_SIZE
;
180 if (pte_none(page
) || pte_present(page
))
182 printk(KERN_CRIT
"Whee.. Swapped out page in kernel page table\n");
183 } while (vaddr
< end
);
186 static inline void unmap_uncached_pmd(pgd_t
* dir
, unsigned long vaddr
,
191 unsigned long orig_vaddr
= vaddr
;
200 pmd
= pmd_offset(dir
, vaddr
);
201 vaddr
&= ~PGDIR_MASK
;
203 if (end
> PGDIR_SIZE
)
206 unmap_uncached_pte(pmd
, orig_vaddr
, end
- vaddr
);
207 vaddr
= (vaddr
+ PMD_SIZE
) & PMD_MASK
;
208 orig_vaddr
+= PMD_SIZE
;
210 } while (vaddr
< end
);
213 static void unmap_uncached_pages(unsigned long vaddr
, unsigned long size
)
216 unsigned long end
= vaddr
+ size
;
218 dir
= pgd_offset_k(vaddr
);
220 unmap_uncached_pmd(dir
, vaddr
, end
- vaddr
);
221 vaddr
= vaddr
+ PGDIR_SIZE
;
223 } while (vaddr
&& (vaddr
< end
));
226 #define PCXL_SEARCH_LOOP(idx, mask, size) \
227 for(; res_ptr < res_end; ++res_ptr) \
229 if(0 == ((*res_ptr) & mask)) { \
231 idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
232 pcxl_res_hint = idx + (size >> 3); \
233 goto resource_found; \
237 #define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \
238 u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
239 u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
240 PCXL_SEARCH_LOOP(idx, mask, size); \
241 res_ptr = (u##size *)&pcxl_res_map[0]; \
242 PCXL_SEARCH_LOOP(idx, mask, size); \
246 pcxl_alloc_range(size_t size
)
250 unsigned int pages_needed
= size
>> PAGE_SHIFT
;
253 mask
>>= BITS_PER_LONG
- pages_needed
;
255 DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
256 size
, pages_needed
, mask
);
258 spin_lock_irqsave(&pcxl_res_lock
, flags
);
260 if(pages_needed
<= 8) {
261 PCXL_FIND_FREE_MAPPING(res_idx
, mask
, 8);
262 } else if(pages_needed
<= 16) {
263 PCXL_FIND_FREE_MAPPING(res_idx
, mask
, 16);
264 } else if(pages_needed
<= 32) {
265 PCXL_FIND_FREE_MAPPING(res_idx
, mask
, 32);
267 panic("%s: pcxl_alloc_range() Too many pages to map.\n",
272 panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
277 DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
278 res_idx
, mask
, pcxl_res_hint
);
280 pcxl_used_pages
+= pages_needed
;
281 pcxl_used_bytes
+= ((pages_needed
>> 3) ? (pages_needed
>> 3) : 1);
283 spin_unlock_irqrestore(&pcxl_res_lock
, flags
);
288 ** return the corresponding vaddr in the pcxl dma map
290 return (pcxl_dma_start
+ (res_idx
<< (PAGE_SHIFT
+ 3)));
293 #define PCXL_FREE_MAPPINGS(idx, m, size) \
294 u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
295 /* BUG_ON((*res_ptr & m) != m); */ \
299 ** clear bits in the pcxl resource map
302 pcxl_free_range(unsigned long vaddr
, size_t size
)
305 unsigned int res_idx
= (vaddr
- pcxl_dma_start
) >> (PAGE_SHIFT
+ 3);
306 unsigned int pages_mapped
= size
>> PAGE_SHIFT
;
309 mask
>>= BITS_PER_LONG
- pages_mapped
;
311 DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
312 res_idx
, size
, pages_mapped
, mask
);
314 spin_lock_irqsave(&pcxl_res_lock
, flags
);
316 if(pages_mapped
<= 8) {
317 PCXL_FREE_MAPPINGS(res_idx
, mask
, 8);
318 } else if(pages_mapped
<= 16) {
319 PCXL_FREE_MAPPINGS(res_idx
, mask
, 16);
320 } else if(pages_mapped
<= 32) {
321 PCXL_FREE_MAPPINGS(res_idx
, mask
, 32);
323 panic("%s: pcxl_free_range() Too many pages to unmap.\n",
327 pcxl_used_pages
-= (pages_mapped
? pages_mapped
: 1);
328 pcxl_used_bytes
-= ((pages_mapped
>> 3) ? (pages_mapped
>> 3) : 1);
330 spin_unlock_irqrestore(&pcxl_res_lock
, flags
);
335 static int proc_pcxl_dma_show(struct seq_file
*m
, void *v
)
339 unsigned long *res_ptr
= (u_long
*)pcxl_res_map
;
341 unsigned long total_pages
= pcxl_res_size
<< 3; /* 8 bits per byte */
343 seq_printf(m
, "\nDMA Mapping Area size : %d bytes (%ld pages)\n",
344 PCXL_DMA_MAP_SIZE
, total_pages
);
346 seq_printf(m
, "Resource bitmap : %d bytes\n", pcxl_res_size
);
348 seq_puts(m
, " total: free: used: % used:\n");
349 seq_printf(m
, "blocks %8d %8ld %8ld %8ld%%\n", pcxl_res_size
,
350 pcxl_res_size
- pcxl_used_bytes
, pcxl_used_bytes
,
351 (pcxl_used_bytes
* 100) / pcxl_res_size
);
353 seq_printf(m
, "pages %8ld %8ld %8ld %8ld%%\n", total_pages
,
354 total_pages
- pcxl_used_pages
, pcxl_used_pages
,
355 (pcxl_used_pages
* 100 / total_pages
));
358 seq_puts(m
, "\nResource bitmap:");
360 for(; i
< (pcxl_res_size
/ sizeof(u_long
)); ++i
, ++res_ptr
) {
363 seq_printf(m
, "%s %08lx", buf
, *res_ptr
);
370 static int proc_pcxl_dma_open(struct inode
*inode
, struct file
*file
)
372 return single_open(file
, proc_pcxl_dma_show
, NULL
);
375 static const struct file_operations proc_pcxl_dma_ops
= {
376 .owner
= THIS_MODULE
,
377 .open
= proc_pcxl_dma_open
,
380 .release
= single_release
,
386 if (pcxl_dma_start
== 0)
389 pcxl_res_size
= PCXL_DMA_MAP_SIZE
>> (PAGE_SHIFT
+ 3);
391 pcxl_res_map
= (char *)__get_free_pages(GFP_KERNEL
,
392 get_order(pcxl_res_size
));
393 memset(pcxl_res_map
, 0, pcxl_res_size
);
394 proc_gsc_root
= proc_mkdir("gsc", NULL
);
397 "pcxl_dma_init: Unable to create gsc /proc dir entry\n");
399 struct proc_dir_entry
* ent
;
400 ent
= proc_create("pcxl_dma", 0, proc_gsc_root
,
404 "pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
409 __initcall(pcxl_dma_init
);
411 static void *pa11_dma_alloc(struct device
*dev
, size_t size
,
412 dma_addr_t
*dma_handle
, gfp_t flag
, unsigned long attrs
)
418 order
= get_order(size
);
419 size
= 1 << (order
+ PAGE_SHIFT
);
420 vaddr
= pcxl_alloc_range(size
);
421 paddr
= __get_free_pages(flag
, order
);
422 flush_kernel_dcache_range(paddr
, size
);
424 map_uncached_pages(vaddr
, size
, paddr
);
425 *dma_handle
= (dma_addr_t
) paddr
;
428 /* This probably isn't needed to support EISA cards.
429 ** ISA cards will certainly only support 24-bit DMA addressing.
430 ** Not clear if we can, want, or need to support ISA.
432 if (!dev
|| *dev
->coherent_dma_mask
< 0xffffffff)
435 return (void *)vaddr
;
438 static void pa11_dma_free(struct device
*dev
, size_t size
, void *vaddr
,
439 dma_addr_t dma_handle
, unsigned long attrs
)
443 order
= get_order(size
);
444 size
= 1 << (order
+ PAGE_SHIFT
);
445 unmap_uncached_pages((unsigned long)vaddr
, size
);
446 pcxl_free_range((unsigned long)vaddr
, size
);
447 free_pages((unsigned long)__va(dma_handle
), order
);
450 static dma_addr_t
pa11_dma_map_page(struct device
*dev
, struct page
*page
,
451 unsigned long offset
, size_t size
,
452 enum dma_data_direction direction
, unsigned long attrs
)
454 void *addr
= page_address(page
) + offset
;
455 BUG_ON(direction
== DMA_NONE
);
457 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
458 flush_kernel_dcache_range((unsigned long) addr
, size
);
460 return virt_to_phys(addr
);
463 static void pa11_dma_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
464 size_t size
, enum dma_data_direction direction
,
467 BUG_ON(direction
== DMA_NONE
);
469 if (attrs
& DMA_ATTR_SKIP_CPU_SYNC
)
472 if (direction
== DMA_TO_DEVICE
)
476 * For PCI_DMA_FROMDEVICE this flush is not necessary for the
477 * simple map/unmap case. However, it IS necessary if if
478 * pci_dma_sync_single_* has been called and the buffer reused.
481 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle
), size
);
484 static int pa11_dma_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
485 int nents
, enum dma_data_direction direction
,
489 struct scatterlist
*sg
;
491 BUG_ON(direction
== DMA_NONE
);
493 for_each_sg(sglist
, sg
, nents
, i
) {
494 unsigned long vaddr
= (unsigned long)sg_virt(sg
);
496 sg_dma_address(sg
) = (dma_addr_t
) virt_to_phys(vaddr
);
497 sg_dma_len(sg
) = sg
->length
;
499 if (attrs
& DMA_ATTR_SKIP_CPU_SYNC
)
502 flush_kernel_dcache_range(vaddr
, sg
->length
);
507 static void pa11_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
508 int nents
, enum dma_data_direction direction
,
512 struct scatterlist
*sg
;
514 BUG_ON(direction
== DMA_NONE
);
516 if (attrs
& DMA_ATTR_SKIP_CPU_SYNC
)
519 if (direction
== DMA_TO_DEVICE
)
522 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
524 for_each_sg(sglist
, sg
, nents
, i
)
525 flush_kernel_vmap_range(sg_virt(sg
), sg
->length
);
528 static void pa11_dma_sync_single_for_cpu(struct device
*dev
,
529 dma_addr_t dma_handle
, size_t size
,
530 enum dma_data_direction direction
)
532 BUG_ON(direction
== DMA_NONE
);
534 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle
),
538 static void pa11_dma_sync_single_for_device(struct device
*dev
,
539 dma_addr_t dma_handle
, size_t size
,
540 enum dma_data_direction direction
)
542 BUG_ON(direction
== DMA_NONE
);
544 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle
),
548 static void pa11_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sglist
, int nents
, enum dma_data_direction direction
)
551 struct scatterlist
*sg
;
553 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
555 for_each_sg(sglist
, sg
, nents
, i
)
556 flush_kernel_vmap_range(sg_virt(sg
), sg
->length
);
559 static void pa11_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sglist
, int nents
, enum dma_data_direction direction
)
562 struct scatterlist
*sg
;
564 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
566 for_each_sg(sglist
, sg
, nents
, i
)
567 flush_kernel_vmap_range(sg_virt(sg
), sg
->length
);
570 static void pa11_dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
571 enum dma_data_direction direction
)
573 flush_kernel_dcache_range((unsigned long)vaddr
, size
);
576 const struct dma_map_ops pcxl_dma_ops
= {
577 .alloc
= pa11_dma_alloc
,
578 .free
= pa11_dma_free
,
579 .map_page
= pa11_dma_map_page
,
580 .unmap_page
= pa11_dma_unmap_page
,
581 .map_sg
= pa11_dma_map_sg
,
582 .unmap_sg
= pa11_dma_unmap_sg
,
583 .sync_single_for_cpu
= pa11_dma_sync_single_for_cpu
,
584 .sync_single_for_device
= pa11_dma_sync_single_for_device
,
585 .sync_sg_for_cpu
= pa11_dma_sync_sg_for_cpu
,
586 .sync_sg_for_device
= pa11_dma_sync_sg_for_device
,
587 .cache_sync
= pa11_dma_cache_sync
,
590 static void *pcx_dma_alloc(struct device
*dev
, size_t size
,
591 dma_addr_t
*dma_handle
, gfp_t flag
, unsigned long attrs
)
595 if ((attrs
& DMA_ATTR_NON_CONSISTENT
) == 0)
598 addr
= (void *)__get_free_pages(flag
, get_order(size
));
600 *dma_handle
= (dma_addr_t
)virt_to_phys(addr
);
605 static void pcx_dma_free(struct device
*dev
, size_t size
, void *vaddr
,
606 dma_addr_t iova
, unsigned long attrs
)
608 free_pages((unsigned long)vaddr
, get_order(size
));
612 const struct dma_map_ops pcx_dma_ops
= {
613 .alloc
= pcx_dma_alloc
,
614 .free
= pcx_dma_free
,
615 .map_page
= pa11_dma_map_page
,
616 .unmap_page
= pa11_dma_unmap_page
,
617 .map_sg
= pa11_dma_map_sg
,
618 .unmap_sg
= pa11_dma_unmap_sg
,
619 .sync_single_for_cpu
= pa11_dma_sync_single_for_cpu
,
620 .sync_single_for_device
= pa11_dma_sync_single_for_device
,
621 .sync_sg_for_cpu
= pa11_dma_sync_sg_for_cpu
,
622 .sync_sg_for_device
= pa11_dma_sync_sg_for_device
,
623 .cache_sync
= pa11_dma_cache_sync
,