4 * Linux architectural port borrowing liberally from similar works of
5 * others. All original copyrights apply as per the original source
8 * Modifications for the OpenRISC architecture:
9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 * DMA mapping callbacks...
18 * As alloc_coherent is the only DMA callback being used currently, that's
19 * the only thing implemented properly. The rest need looking into...
22 #include <linux/dma-mapping.h>
23 #include <linux/dma-debug.h>
25 #include <asm/cpuinfo.h>
26 #include <asm/spr_defs.h>
27 #include <asm/tlbflush.h>
29 static int page_set_nocache(pte_t
*pte
, unsigned long addr
,
30 unsigned long next
, struct mm_walk
*walk
)
34 pte_val(*pte
) |= _PAGE_CI
;
37 * Flush the page out of the TLB so that the new page flags get
38 * picked up next time there's an access
40 flush_tlb_page(NULL
, addr
);
42 /* Flush page out of dcache */
43 for (cl
= __pa(addr
); cl
< __pa(next
); cl
+= cpuinfo
.dcache_block_size
)
49 static int page_clear_nocache(pte_t
*pte
, unsigned long addr
,
50 unsigned long next
, struct mm_walk
*walk
)
52 pte_val(*pte
) &= ~_PAGE_CI
;
55 * Flush the page out of the TLB so that the new page flags get
56 * picked up next time there's an access
58 flush_tlb_page(NULL
, addr
);
64 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
66 * This function effectively just calls __get_free_pages, sets the
67 * cache-inhibit bit on those pages, and makes sure that the pages are
68 * flushed out of the cache before they are used.
71 void *or1k_dma_alloc_coherent(struct device
*dev
, size_t size
,
72 dma_addr_t
*dma_handle
, gfp_t gfp
)
76 struct mm_walk walk
= {
77 .pte_entry
= page_set_nocache
,
81 page
= alloc_pages_exact(size
, gfp
);
85 /* This gives us the real physical address of the first page. */
86 *dma_handle
= __pa(page
);
88 va
= (unsigned long)page
;
91 * We need to iterate through the pages, clearing the dcache for
92 * them and setting the cache-inhibit bit.
94 if (walk_page_range(va
, va
+ size
, &walk
)) {
95 free_pages_exact(page
, size
);
102 void or1k_dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
103 dma_addr_t dma_handle
)
105 unsigned long va
= (unsigned long)vaddr
;
106 struct mm_walk walk
= {
107 .pte_entry
= page_clear_nocache
,
111 /* walk_page_range shouldn't be able to fail here */
112 WARN_ON(walk_page_range(va
, va
+ size
, &walk
));
114 free_pages_exact(vaddr
, size
);
117 dma_addr_t
or1k_map_page(struct device
*dev
, struct page
*page
,
118 unsigned long offset
, size_t size
,
119 enum dma_data_direction dir
,
120 struct dma_attrs
*attrs
)
123 dma_addr_t addr
= page_to_phys(page
) + offset
;
127 /* Flush the dcache for the requested range */
128 for (cl
= addr
; cl
< addr
+ size
;
129 cl
+= cpuinfo
.dcache_block_size
)
130 mtspr(SPR_DCBFR
, cl
);
132 case DMA_FROM_DEVICE
:
133 /* Invalidate the dcache for the requested range */
134 for (cl
= addr
; cl
< addr
+ size
;
135 cl
+= cpuinfo
.dcache_block_size
)
136 mtspr(SPR_DCBIR
, cl
);
140 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
141 * flush nor invalidate the cache here as the area will need
142 * to be manually synced anyway.
150 void or1k_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
151 size_t size
, enum dma_data_direction dir
,
152 struct dma_attrs
*attrs
)
154 /* Nothing special to do here... */
157 void or1k_sync_single_for_cpu(struct device
*dev
,
158 dma_addr_t dma_handle
, size_t size
,
159 enum dma_data_direction dir
)
162 dma_addr_t addr
= dma_handle
;
164 /* Invalidate the dcache for the requested range */
165 for (cl
= addr
; cl
< addr
+ size
; cl
+= cpuinfo
.dcache_block_size
)
166 mtspr(SPR_DCBIR
, cl
);
169 void or1k_sync_single_for_device(struct device
*dev
,
170 dma_addr_t dma_handle
, size_t size
,
171 enum dma_data_direction dir
)
174 dma_addr_t addr
= dma_handle
;
176 /* Flush the dcache for the requested range */
177 for (cl
= addr
; cl
< addr
+ size
; cl
+= cpuinfo
.dcache_block_size
)
178 mtspr(SPR_DCBFR
, cl
);
181 /* Number of entries preallocated for DMA-API debugging */
182 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
184 static int __init
dma_init(void)
186 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
191 fs_initcall(dma_init
);