4 * Linux architectural port borrowing liberally from similar works of
5 * others. All original copyrights apply as per the original source
8 * Modifications for the OpenRISC architecture:
9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 * DMA mapping callbacks...
18 * As alloc_coherent is the only DMA callback being used currently, that's
19 * the only thing implemented properly. The rest need looking into...
22 #include <linux/dma-mapping.h>
23 #include <linux/dma-debug.h>
25 #include <asm/cpuinfo.h>
26 #include <asm/spr_defs.h>
27 #include <asm/tlbflush.h>
29 static int page_set_nocache(pte_t
*pte
, unsigned long addr
,
30 unsigned long next
, struct mm_walk
*walk
)
34 pte_val(*pte
) |= _PAGE_CI
;
37 * Flush the page out of the TLB so that the new page flags get
38 * picked up next time there's an access
40 flush_tlb_page(NULL
, addr
);
42 /* Flush page out of dcache */
43 for (cl
= __pa(addr
); cl
< __pa(next
); cl
+= cpuinfo
.dcache_block_size
)
49 static int page_clear_nocache(pte_t
*pte
, unsigned long addr
,
50 unsigned long next
, struct mm_walk
*walk
)
52 pte_val(*pte
) &= ~_PAGE_CI
;
55 * Flush the page out of the TLB so that the new page flags get
56 * picked up next time there's an access
58 flush_tlb_page(NULL
, addr
);
64 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
66 * This function effectively just calls __get_free_pages, sets the
67 * cache-inhibit bit on those pages, and makes sure that the pages are
68 * flushed out of the cache before they are used.
71 void *or1k_dma_alloc_coherent(struct device
*dev
, size_t size
,
72 dma_addr_t
*dma_handle
, gfp_t gfp
)
76 struct mm_walk walk
= {
77 .pte_entry
= page_set_nocache
,
81 page
= alloc_pages_exact(size
, gfp
);
85 /* This gives us the real physical address of the first page. */
86 *dma_handle
= __pa(page
);
88 va
= (unsigned long)page
;
91 * We need to iterate through the pages, clearing the dcache for
92 * them and setting the cache-inhibit bit.
94 if (walk_page_range(va
, va
+ size
, &walk
)) {
95 free_pages_exact(page
, size
);
102 void or1k_dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
103 dma_addr_t dma_handle
)
105 unsigned long va
= (unsigned long)vaddr
;
106 struct mm_walk walk
= {
107 .pte_entry
= page_clear_nocache
,
111 /* walk_page_range shouldn't be able to fail here */
112 WARN_ON(walk_page_range(va
, va
+ size
, &walk
));
114 free_pages_exact(vaddr
, size
);
117 dma_addr_t
or1k_map_page(struct device
*dev
, struct page
*page
,
118 unsigned long offset
, size_t size
,
119 enum dma_data_direction dir
,
120 struct dma_attrs
*attrs
)
123 dma_addr_t addr
= page_to_phys(page
) + offset
;
127 /* Flush the dcache for the requested range */
128 for (cl
= addr
; cl
< addr
+ size
;
129 cl
+= cpuinfo
.dcache_block_size
)
130 mtspr(SPR_DCBFR
, cl
);
132 case DMA_FROM_DEVICE
:
133 /* Invalidate the dcache for the requested range */
134 for (cl
= addr
; cl
< addr
+ size
;
135 cl
+= cpuinfo
.dcache_block_size
)
136 mtspr(SPR_DCBIR
, cl
);
140 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
141 * flush nor invalidate the cache here as the area will need
142 * to be manually synced anyway.
150 void or1k_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
151 size_t size
, enum dma_data_direction dir
,
152 struct dma_attrs
*attrs
)
154 /* Nothing special to do here... */
157 int or1k_map_sg(struct device
*dev
, struct scatterlist
*sg
,
158 int nents
, enum dma_data_direction dir
,
159 struct dma_attrs
*attrs
)
161 struct scatterlist
*s
;
164 for_each_sg(sg
, s
, nents
, i
) {
165 s
->dma_address
= or1k_map_page(dev
, sg_page(s
), s
->offset
,
166 s
->length
, dir
, NULL
);
172 void or1k_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
173 int nents
, enum dma_data_direction dir
,
174 struct dma_attrs
*attrs
)
176 struct scatterlist
*s
;
179 for_each_sg(sg
, s
, nents
, i
) {
180 or1k_unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
, NULL
);
184 void or1k_sync_single_for_cpu(struct device
*dev
,
185 dma_addr_t dma_handle
, size_t size
,
186 enum dma_data_direction dir
)
189 dma_addr_t addr
= dma_handle
;
191 /* Invalidate the dcache for the requested range */
192 for (cl
= addr
; cl
< addr
+ size
; cl
+= cpuinfo
.dcache_block_size
)
193 mtspr(SPR_DCBIR
, cl
);
196 void or1k_sync_single_for_device(struct device
*dev
,
197 dma_addr_t dma_handle
, size_t size
,
198 enum dma_data_direction dir
)
201 dma_addr_t addr
= dma_handle
;
203 /* Flush the dcache for the requested range */
204 for (cl
= addr
; cl
< addr
+ size
; cl
+= cpuinfo
.dcache_block_size
)
205 mtspr(SPR_DCBFR
, cl
);
208 /* Number of entries preallocated for DMA-API debugging */
209 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
211 static int __init
dma_init(void)
213 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
217 fs_initcall(dma_init
);