2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #ifndef _ASM_TILE_DMA_MAPPING_H
16 #define _ASM_TILE_DMA_MAPPING_H
19 #include <linux/scatterlist.h>
20 #include <linux/cache.h>
23 extern struct dma_map_ops
*tile_dma_map_ops
;
24 extern struct dma_map_ops
*gx_pci_dma_map_ops
;
25 extern struct dma_map_ops
*gx_legacy_pci_dma_map_ops
;
27 static inline struct dma_map_ops
*get_dma_ops(struct device
*dev
)
29 if (dev
&& dev
->archdata
.dma_ops
)
30 return dev
->archdata
.dma_ops
;
32 return tile_dma_map_ops
;
35 static inline dma_addr_t
get_dma_offset(struct device
*dev
)
37 return dev
->archdata
.dma_offset
;
40 static inline void set_dma_offset(struct device
*dev
, dma_addr_t off
)
42 dev
->archdata
.dma_offset
= off
;
45 static inline dma_addr_t
phys_to_dma(struct device
*dev
, phys_addr_t paddr
)
47 return paddr
+ get_dma_offset(dev
);
50 static inline phys_addr_t
dma_to_phys(struct device
*dev
, dma_addr_t daddr
)
52 return daddr
- get_dma_offset(dev
);
55 static inline void dma_mark_clean(void *addr
, size_t size
) {}
57 #include <asm-generic/dma-mapping-common.h>
59 static inline void set_dma_ops(struct device
*dev
, struct dma_map_ops
*ops
)
61 dev
->archdata
.dma_ops
= ops
;
64 static inline bool dma_capable(struct device
*dev
, dma_addr_t addr
, size_t size
)
69 return addr
+ size
- 1 <= *dev
->dma_mask
;
73 dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
75 debug_dma_mapping_error(dev
, dma_addr
);
76 return get_dma_ops(dev
)->mapping_error(dev
, dma_addr
);
80 dma_supported(struct device
*dev
, u64 mask
)
82 return get_dma_ops(dev
)->dma_supported(dev
, mask
);
86 dma_set_mask(struct device
*dev
, u64 mask
)
88 struct dma_map_ops
*dma_ops
= get_dma_ops(dev
);
90 /* Handle legacy PCI devices with limited memory addressability. */
91 if ((dma_ops
== gx_pci_dma_map_ops
) && (mask
<= DMA_BIT_MASK(32))) {
92 set_dma_ops(dev
, gx_legacy_pci_dma_map_ops
);
93 set_dma_offset(dev
, 0);
94 if (mask
> dev
->archdata
.max_direct_dma_addr
)
95 mask
= dev
->archdata
.max_direct_dma_addr
;
98 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
101 *dev
->dma_mask
= mask
;
106 static inline void *dma_alloc_attrs(struct device
*dev
, size_t size
,
107 dma_addr_t
*dma_handle
, gfp_t flag
,
108 struct dma_attrs
*attrs
)
110 struct dma_map_ops
*dma_ops
= get_dma_ops(dev
);
113 cpu_addr
= dma_ops
->alloc(dev
, size
, dma_handle
, flag
, attrs
);
115 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, cpu_addr
);
120 static inline void dma_free_attrs(struct device
*dev
, size_t size
,
121 void *cpu_addr
, dma_addr_t dma_handle
,
122 struct dma_attrs
*attrs
)
124 struct dma_map_ops
*dma_ops
= get_dma_ops(dev
);
126 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
128 dma_ops
->free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
131 #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
132 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
133 #define dma_free_coherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
134 #define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
137 * dma_alloc_noncoherent() is #defined to return coherent memory,
138 * so there's no need to do any flushing here.
140 static inline void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
141 enum dma_data_direction direction
)
145 #endif /* _ASM_TILE_DMA_MAPPING_H */