2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #ifndef _ASM_TILE_DMA_MAPPING_H
16 #define _ASM_TILE_DMA_MAPPING_H
19 #include <linux/scatterlist.h>
20 #include <linux/cache.h>
24 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
27 extern struct dma_map_ops
*tile_dma_map_ops
;
28 extern struct dma_map_ops
*gx_pci_dma_map_ops
;
29 extern struct dma_map_ops
*gx_legacy_pci_dma_map_ops
;
30 extern struct dma_map_ops
*gx_hybrid_pci_dma_map_ops
;
32 static inline struct dma_map_ops
*get_dma_ops(struct device
*dev
)
34 if (dev
&& dev
->archdata
.dma_ops
)
35 return dev
->archdata
.dma_ops
;
37 return tile_dma_map_ops
;
40 static inline dma_addr_t
get_dma_offset(struct device
*dev
)
42 return dev
->archdata
.dma_offset
;
45 static inline void set_dma_offset(struct device
*dev
, dma_addr_t off
)
47 dev
->archdata
.dma_offset
= off
;
50 static inline dma_addr_t
phys_to_dma(struct device
*dev
, phys_addr_t paddr
)
55 static inline phys_addr_t
dma_to_phys(struct device
*dev
, dma_addr_t daddr
)
60 static inline void dma_mark_clean(void *addr
, size_t size
) {}
62 #include <asm-generic/dma-mapping-common.h>
64 static inline void set_dma_ops(struct device
*dev
, struct dma_map_ops
*ops
)
66 dev
->archdata
.dma_ops
= ops
;
69 static inline bool dma_capable(struct device
*dev
, dma_addr_t addr
, size_t size
)
74 return addr
+ size
- 1 <= *dev
->dma_mask
;
78 dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
80 debug_dma_mapping_error(dev
, dma_addr
);
81 return get_dma_ops(dev
)->mapping_error(dev
, dma_addr
);
85 dma_supported(struct device
*dev
, u64 mask
)
87 return get_dma_ops(dev
)->dma_supported(dev
, mask
);
91 dma_set_mask(struct device
*dev
, u64 mask
)
93 struct dma_map_ops
*dma_ops
= get_dma_ops(dev
);
96 * For PCI devices with 64-bit DMA addressing capability, promote
97 * the dma_ops to hybrid, with the consistent memory DMA space limited
98 * to 32-bit. For 32-bit capable devices, limit the streaming DMA
99 * address range to max_direct_dma_addr.
101 if (dma_ops
== gx_pci_dma_map_ops
||
102 dma_ops
== gx_hybrid_pci_dma_map_ops
||
103 dma_ops
== gx_legacy_pci_dma_map_ops
) {
104 if (mask
== DMA_BIT_MASK(64) &&
105 dma_ops
== gx_legacy_pci_dma_map_ops
)
106 set_dma_ops(dev
, gx_hybrid_pci_dma_map_ops
);
107 else if (mask
> dev
->archdata
.max_direct_dma_addr
)
108 mask
= dev
->archdata
.max_direct_dma_addr
;
111 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
114 *dev
->dma_mask
= mask
;
119 static inline void *dma_alloc_attrs(struct device
*dev
, size_t size
,
120 dma_addr_t
*dma_handle
, gfp_t flag
,
121 struct dma_attrs
*attrs
)
123 struct dma_map_ops
*dma_ops
= get_dma_ops(dev
);
126 cpu_addr
= dma_ops
->alloc(dev
, size
, dma_handle
, flag
, attrs
);
128 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, cpu_addr
);
133 static inline void dma_free_attrs(struct device
*dev
, size_t size
,
134 void *cpu_addr
, dma_addr_t dma_handle
,
135 struct dma_attrs
*attrs
)
137 struct dma_map_ops
*dma_ops
= get_dma_ops(dev
);
139 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
141 dma_ops
->free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
144 #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
145 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
146 #define dma_free_coherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
147 #define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
150 * dma_alloc_noncoherent() is #defined to return coherent memory,
151 * so there's no need to do any flushing here.
153 static inline void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
154 enum dma_data_direction direction
)
158 #endif /* _ASM_TILE_DMA_MAPPING_H */