4 * Linux architectural port borrowing liberally from similar works of
5 * others. All original copyrights apply as per the original source
8 * Modifications for the OpenRISC architecture:
9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 * DMA mapping callbacks...
18 * As alloc_coherent is the only DMA callback being used currently, that's
19 * the only thing implemented properly. The rest need looking into...
22 #include <linux/dma-mapping.h>
23 #include <linux/dma-debug.h>
24 #include <linux/export.h>
25 #include <linux/dma-attrs.h>
27 #include <asm/cpuinfo.h>
28 #include <asm/spr_defs.h>
29 #include <asm/tlbflush.h>
32 page_set_nocache(pte_t
*pte
, unsigned long addr
,
33 unsigned long next
, struct mm_walk
*walk
)
37 pte_val(*pte
) |= _PAGE_CI
;
40 * Flush the page out of the TLB so that the new page flags get
41 * picked up next time there's an access
43 flush_tlb_page(NULL
, addr
);
45 /* Flush page out of dcache */
46 for (cl
= __pa(addr
); cl
< __pa(next
); cl
+= cpuinfo
.dcache_block_size
)
53 page_clear_nocache(pte_t
*pte
, unsigned long addr
,
54 unsigned long next
, struct mm_walk
*walk
)
56 pte_val(*pte
) &= ~_PAGE_CI
;
59 * Flush the page out of the TLB so that the new page flags get
60 * picked up next time there's an access
62 flush_tlb_page(NULL
, addr
);
68 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
70 * This function effectively just calls __get_free_pages, sets the
71 * cache-inhibit bit on those pages, and makes sure that the pages are
72 * flushed out of the cache before they are used.
74 * If the NON_CONSISTENT attribute is set, then this function just
75 * returns "normal", cachable memory.
77 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
78 * into consideration here, too. All current known implementations of
79 * the OR1K support only strongly ordered memory accesses, so that flag
80 * is being ignored for now; uncached but write-combined memory is a
81 * missing feature of the OR1K.
84 or1k_dma_alloc(struct device
*dev
, size_t size
,
85 dma_addr_t
*dma_handle
, gfp_t gfp
,
86 struct dma_attrs
*attrs
)
90 struct mm_walk walk
= {
91 .pte_entry
= page_set_nocache
,
95 page
= alloc_pages_exact(size
, gfp
);
99 /* This gives us the real physical address of the first page. */
100 *dma_handle
= __pa(page
);
102 va
= (unsigned long)page
;
104 if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT
, attrs
)) {
106 * We need to iterate through the pages, clearing the dcache for
107 * them and setting the cache-inhibit bit.
109 if (walk_page_range(va
, va
+ size
, &walk
)) {
110 free_pages_exact(page
, size
);
119 or1k_dma_free(struct device
*dev
, size_t size
, void *vaddr
,
120 dma_addr_t dma_handle
, struct dma_attrs
*attrs
)
122 unsigned long va
= (unsigned long)vaddr
;
123 struct mm_walk walk
= {
124 .pte_entry
= page_clear_nocache
,
128 if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT
, attrs
)) {
129 /* walk_page_range shouldn't be able to fail here */
130 WARN_ON(walk_page_range(va
, va
+ size
, &walk
));
133 free_pages_exact(vaddr
, size
);
137 or1k_map_page(struct device
*dev
, struct page
*page
,
138 unsigned long offset
, size_t size
,
139 enum dma_data_direction dir
,
140 struct dma_attrs
*attrs
)
143 dma_addr_t addr
= page_to_phys(page
) + offset
;
147 /* Flush the dcache for the requested range */
148 for (cl
= addr
; cl
< addr
+ size
;
149 cl
+= cpuinfo
.dcache_block_size
)
150 mtspr(SPR_DCBFR
, cl
);
152 case DMA_FROM_DEVICE
:
153 /* Invalidate the dcache for the requested range */
154 for (cl
= addr
; cl
< addr
+ size
;
155 cl
+= cpuinfo
.dcache_block_size
)
156 mtspr(SPR_DCBIR
, cl
);
160 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
161 * flush nor invalidate the cache here as the area will need
162 * to be manually synced anyway.
171 or1k_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
172 size_t size
, enum dma_data_direction dir
,
173 struct dma_attrs
*attrs
)
175 /* Nothing special to do here... */
179 or1k_map_sg(struct device
*dev
, struct scatterlist
*sg
,
180 int nents
, enum dma_data_direction dir
,
181 struct dma_attrs
*attrs
)
183 struct scatterlist
*s
;
186 for_each_sg(sg
, s
, nents
, i
) {
187 s
->dma_address
= or1k_map_page(dev
, sg_page(s
), s
->offset
,
188 s
->length
, dir
, NULL
);
195 or1k_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
196 int nents
, enum dma_data_direction dir
,
197 struct dma_attrs
*attrs
)
199 struct scatterlist
*s
;
202 for_each_sg(sg
, s
, nents
, i
) {
203 or1k_unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, NULL
);
208 or1k_sync_single_for_cpu(struct device
*dev
,
209 dma_addr_t dma_handle
, size_t size
,
210 enum dma_data_direction dir
)
213 dma_addr_t addr
= dma_handle
;
215 /* Invalidate the dcache for the requested range */
216 for (cl
= addr
; cl
< addr
+ size
; cl
+= cpuinfo
.dcache_block_size
)
217 mtspr(SPR_DCBIR
, cl
);
221 or1k_sync_single_for_device(struct device
*dev
,
222 dma_addr_t dma_handle
, size_t size
,
223 enum dma_data_direction dir
)
226 dma_addr_t addr
= dma_handle
;
228 /* Flush the dcache for the requested range */
229 for (cl
= addr
; cl
< addr
+ size
; cl
+= cpuinfo
.dcache_block_size
)
230 mtspr(SPR_DCBFR
, cl
);
233 struct dma_map_ops or1k_dma_map_ops
= {
234 .alloc
= or1k_dma_alloc
,
235 .free
= or1k_dma_free
,
236 .map_page
= or1k_map_page
,
237 .unmap_page
= or1k_unmap_page
,
238 .map_sg
= or1k_map_sg
,
239 .unmap_sg
= or1k_unmap_sg
,
240 .sync_single_for_cpu
= or1k_sync_single_for_cpu
,
241 .sync_single_for_device
= or1k_sync_single_for_device
,
243 EXPORT_SYMBOL(or1k_dma_map_ops
);
245 /* Number of entries preallocated for DMA-API debugging */
246 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
248 static int __init
dma_init(void)
250 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
254 fs_initcall(dma_init
);