2 * DMA Mapping glue for ARC
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #ifndef ASM_ARC_DMA_MAPPING_H
12 #define ASM_ARC_DMA_MAPPING_H
14 #include <asm-generic/dma-coherent.h>
15 #include <asm/cacheflush.h>
17 #ifndef CONFIG_ARC_PLAT_NEEDS_CPU_TO_DMA
19 * dma_map_* API take cpu addresses, which is kernel logical address in the
20 * untranslated address space (0x8000_0000) based. The dma address (bus addr)
21 * ideally needs to be 0x0000_0000 based hence these glue routines.
22 * However given that intermediate bus bridges can ignore the high bit, we can
23 * do with these routines being no-ops.
24 * If a platform/device comes up which sriclty requires 0 based bus addr
25 * (e.g. AHB-PCI bridge on Angel4 board), then it can provide it's own versions
27 #define plat_dma_addr_to_kernel(dev, addr) ((unsigned long)(addr))
28 #define plat_kernel_addr_to_dma(dev, ptr) ((dma_addr_t)(ptr))
31 #include <plat/dma_addr.h>
34 void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
35 dma_addr_t
*dma_handle
, gfp_t gfp
);
37 void dma_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
38 dma_addr_t dma_handle
);
40 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
41 dma_addr_t
*dma_handle
, gfp_t gfp
);
43 void dma_free_coherent(struct device
*dev
, size_t size
, void *kvaddr
,
44 dma_addr_t dma_handle
);
46 /* drivers/base/dma-mapping.c */
47 extern int dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
48 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
49 extern int dma_common_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
50 void *cpu_addr
, dma_addr_t dma_addr
,
53 #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
54 #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
57 * streaming DMA Mapping API...
58 * CPU accesses page via normal paddr, thus needs to explicitly made
59 * consistent before each use
62 static inline void __inline_dma_cache_sync(unsigned long paddr
, size_t size
,
63 enum dma_data_direction dir
)
67 dma_cache_inv(paddr
, size
);
70 dma_cache_wback(paddr
, size
);
72 case DMA_BIDIRECTIONAL
:
73 dma_cache_wback_inv(paddr
, size
);
76 pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir
, paddr
);
80 void __arc_dma_cache_sync(unsigned long paddr
, size_t size
,
81 enum dma_data_direction dir
);
83 #define _dma_cache_sync(addr, sz, dir) \
85 if (__builtin_constant_p(dir)) \
86 __inline_dma_cache_sync(addr, sz, dir); \
88 __arc_dma_cache_sync(addr, sz, dir); \
92 static inline dma_addr_t
93 dma_map_single(struct device
*dev
, void *cpu_addr
, size_t size
,
94 enum dma_data_direction dir
)
96 _dma_cache_sync((unsigned long)cpu_addr
, size
, dir
);
97 return plat_kernel_addr_to_dma(dev
, cpu_addr
);
101 dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
,
102 size_t size
, enum dma_data_direction dir
)
106 static inline dma_addr_t
107 dma_map_page(struct device
*dev
, struct page
*page
,
108 unsigned long offset
, size_t size
,
109 enum dma_data_direction dir
)
111 unsigned long paddr
= page_to_phys(page
) + offset
;
112 return dma_map_single(dev
, (void *)paddr
, size
, dir
);
116 dma_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
117 size_t size
, enum dma_data_direction dir
)
122 dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
123 int nents
, enum dma_data_direction dir
)
125 struct scatterlist
*s
;
128 for_each_sg(sg
, s
, nents
, i
)
129 s
->dma_address
= dma_map_page(dev
, sg_page(s
), s
->offset
,
136 dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
137 int nents
, enum dma_data_direction dir
)
139 struct scatterlist
*s
;
142 for_each_sg(sg
, s
, nents
, i
)
143 dma_unmap_page(dev
, sg_dma_address(s
), sg_dma_len(s
), dir
);
147 dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
148 size_t size
, enum dma_data_direction dir
)
150 _dma_cache_sync(plat_dma_addr_to_kernel(dev
, dma_handle
), size
,
155 dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
,
156 size_t size
, enum dma_data_direction dir
)
158 _dma_cache_sync(plat_dma_addr_to_kernel(dev
, dma_handle
), size
,
163 dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
164 unsigned long offset
, size_t size
,
165 enum dma_data_direction direction
)
167 _dma_cache_sync(plat_dma_addr_to_kernel(dev
, dma_handle
) + offset
,
168 size
, DMA_FROM_DEVICE
);
172 dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t dma_handle
,
173 unsigned long offset
, size_t size
,
174 enum dma_data_direction direction
)
176 _dma_cache_sync(plat_dma_addr_to_kernel(dev
, dma_handle
) + offset
,
177 size
, DMA_TO_DEVICE
);
181 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
182 enum dma_data_direction dir
)
186 for (i
= 0; i
< nelems
; i
++, sg
++)
187 _dma_cache_sync((unsigned int)sg_virt(sg
), sg
->length
, dir
);
191 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
, int nelems
,
192 enum dma_data_direction dir
)
196 for (i
= 0; i
< nelems
; i
++, sg
++)
197 _dma_cache_sync((unsigned int)sg_virt(sg
), sg
->length
, dir
);
200 static inline int dma_supported(struct device
*dev
, u64 dma_mask
)
202 /* Support 32 bit DMA mask exclusively */
203 return dma_mask
== DMA_BIT_MASK(32);
206 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
211 static inline int dma_set_mask(struct device
*dev
, u64 dma_mask
)
213 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
216 *dev
->dma_mask
= dma_mask
;