4 * Linux architectural port borrowing liberally from similar works of
5 * others. All original copyrights apply as per the original source
8 * OpenRISC implementation:
9 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
17 #ifndef __ASM_OPENRISC_DMA_MAPPING_H
18 #define __ASM_OPENRISC_DMA_MAPPING_H
21 * See Documentation/DMA-API-HOWTO.txt and
22 * Documentation/DMA-API.txt for documentation.
24 * This file is written with the intention of eventually moving over
25 * to largely using asm-generic/dma-mapping-common.h in its place.
28 #include <linux/dma-debug.h>
29 #include <asm-generic/dma-coherent.h>
30 #include <linux/kmemcheck.h>
32 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
35 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
36 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
38 void *or1k_dma_alloc_coherent(struct device
*dev
, size_t size
,
39 dma_addr_t
*dma_handle
, gfp_t flag
);
40 void or1k_dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
41 dma_addr_t dma_handle
);
42 dma_addr_t
or1k_map_page(struct device
*dev
, struct page
*page
,
43 unsigned long offset
, size_t size
,
44 enum dma_data_direction dir
,
45 struct dma_attrs
*attrs
);
46 void or1k_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
47 size_t size
, enum dma_data_direction dir
,
48 struct dma_attrs
*attrs
);
49 int or1k_map_sg(struct device
*dev
, struct scatterlist
*sg
,
50 int nents
, enum dma_data_direction dir
,
51 struct dma_attrs
*attrs
);
52 void or1k_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
53 int nents
, enum dma_data_direction dir
,
54 struct dma_attrs
*attrs
);
55 void or1k_sync_single_for_cpu(struct device
*dev
,
56 dma_addr_t dma_handle
, size_t size
,
57 enum dma_data_direction dir
);
58 void or1k_sync_single_for_device(struct device
*dev
,
59 dma_addr_t dma_handle
, size_t size
,
60 enum dma_data_direction dir
);
62 static inline void *dma_alloc_coherent(struct device
*dev
, size_t size
,
63 dma_addr_t
*dma_handle
, gfp_t flag
)
67 memory
= or1k_dma_alloc_coherent(dev
, size
, dma_handle
, flag
);
69 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, memory
);
73 static inline void dma_free_coherent(struct device
*dev
, size_t size
,
74 void *cpu_addr
, dma_addr_t dma_handle
)
76 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
77 or1k_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
80 static inline dma_addr_t
dma_map_single(struct device
*dev
, void *ptr
,
82 enum dma_data_direction dir
)
86 kmemcheck_mark_initialized(ptr
, size
);
87 BUG_ON(!valid_dma_direction(dir
));
88 addr
= or1k_map_page(dev
, virt_to_page(ptr
),
89 (unsigned long)ptr
& ~PAGE_MASK
, size
,
91 debug_dma_map_page(dev
, virt_to_page(ptr
),
92 (unsigned long)ptr
& ~PAGE_MASK
, size
,
97 static inline void dma_unmap_single(struct device
*dev
, dma_addr_t addr
,
99 enum dma_data_direction dir
)
101 BUG_ON(!valid_dma_direction(dir
));
102 or1k_unmap_page(dev
, addr
, size
, dir
, NULL
);
103 debug_dma_unmap_page(dev
, addr
, size
, dir
, true);
106 static inline int dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
107 int nents
, enum dma_data_direction dir
)
110 struct scatterlist
*s
;
112 for_each_sg(sg
, s
, nents
, i
)
113 kmemcheck_mark_initialized(sg_virt(s
), s
->length
);
114 BUG_ON(!valid_dma_direction(dir
));
115 ents
= or1k_map_sg(dev
, sg
, nents
, dir
, NULL
);
116 debug_dma_map_sg(dev
, sg
, nents
, ents
, dir
);
121 static inline void dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
122 int nents
, enum dma_data_direction dir
)
124 BUG_ON(!valid_dma_direction(dir
));
125 debug_dma_unmap_sg(dev
, sg
, nents
, dir
);
126 or1k_unmap_sg(dev
, sg
, nents
, dir
, NULL
);
129 static inline dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
130 size_t offset
, size_t size
,
131 enum dma_data_direction dir
)
135 kmemcheck_mark_initialized(page_address(page
) + offset
, size
);
136 BUG_ON(!valid_dma_direction(dir
));
137 addr
= or1k_map_page(dev
, page
, offset
, size
, dir
, NULL
);
138 debug_dma_map_page(dev
, page
, offset
, size
, dir
, addr
, false);
143 static inline void dma_unmap_page(struct device
*dev
, dma_addr_t addr
,
144 size_t size
, enum dma_data_direction dir
)
146 BUG_ON(!valid_dma_direction(dir
));
147 or1k_unmap_page(dev
, addr
, size
, dir
, NULL
);
148 debug_dma_unmap_page(dev
, addr
, size
, dir
, true);
151 static inline void dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t addr
,
153 enum dma_data_direction dir
)
155 BUG_ON(!valid_dma_direction(dir
));
156 or1k_sync_single_for_cpu(dev
, addr
, size
, dir
);
157 debug_dma_sync_single_for_cpu(dev
, addr
, size
, dir
);
160 static inline void dma_sync_single_for_device(struct device
*dev
,
161 dma_addr_t addr
, size_t size
,
162 enum dma_data_direction dir
)
164 BUG_ON(!valid_dma_direction(dir
));
165 or1k_sync_single_for_device(dev
, addr
, size
, dir
);
166 debug_dma_sync_single_for_device(dev
, addr
, size
, dir
);
169 static inline int dma_supported(struct device
*dev
, u64 dma_mask
)
171 /* Support 32 bit DMA mask exclusively */
172 return dma_mask
== DMA_BIT_MASK(32);
175 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
180 static inline int dma_set_mask(struct device
*dev
, u64 dma_mask
)
182 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
185 *dev
->dma_mask
= dma_mask
;
189 #endif /* __ASM_OPENRISC_DMA_MAPPING_H */