1 #ifndef _ASM_IA64_DMA_MAPPING_H
2 #define _ASM_IA64_DMA_MAPPING_H
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 #include <asm/machvec.h>
9 #include <linux/scatterlist.h>
10 #include <asm/swiotlb.h>
11 #include <linux/dma-debug.h>
13 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
15 #define DMA_ERROR_CODE 0
17 extern struct dma_map_ops
*dma_ops
;
18 extern struct ia64_machine_vector ia64_mv
;
19 extern void set_iommu_machvec(void);
21 extern void machvec_dma_sync_single(struct device
*, dma_addr_t
, size_t,
22 enum dma_data_direction
);
23 extern void machvec_dma_sync_sg(struct device
*, struct scatterlist
*, int,
24 enum dma_data_direction
);
26 #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
28 static inline void *dma_alloc_attrs(struct device
*dev
, size_t size
,
29 dma_addr_t
*daddr
, gfp_t gfp
,
30 struct dma_attrs
*attrs
)
32 struct dma_map_ops
*ops
= platform_dma_get_ops(dev
);
35 caddr
= ops
->alloc(dev
, size
, daddr
, gfp
, attrs
);
36 debug_dma_alloc_coherent(dev
, size
, *daddr
, caddr
);
40 #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
42 static inline void dma_free_attrs(struct device
*dev
, size_t size
,
43 void *caddr
, dma_addr_t daddr
,
44 struct dma_attrs
*attrs
)
46 struct dma_map_ops
*ops
= platform_dma_get_ops(dev
);
47 debug_dma_free_coherent(dev
, size
, caddr
, daddr
);
48 ops
->free(dev
, size
, caddr
, daddr
, attrs
);
51 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
52 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
54 #define get_dma_ops(dev) platform_dma_get_ops(dev)
56 #include <asm-generic/dma-mapping-common.h>
58 static inline int dma_mapping_error(struct device
*dev
, dma_addr_t daddr
)
60 struct dma_map_ops
*ops
= platform_dma_get_ops(dev
);
61 debug_dma_mapping_error(dev
, daddr
);
62 return ops
->mapping_error(dev
, daddr
);
65 static inline int dma_supported(struct device
*dev
, u64 mask
)
67 struct dma_map_ops
*ops
= platform_dma_get_ops(dev
);
68 return ops
->dma_supported(dev
, mask
);
72 dma_set_mask (struct device
*dev
, u64 mask
)
74 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
76 *dev
->dma_mask
= mask
;
80 static inline bool dma_capable(struct device
*dev
, dma_addr_t addr
, size_t size
)
85 return addr
+ size
- 1 <= *dev
->dma_mask
;
88 static inline dma_addr_t
phys_to_dma(struct device
*dev
, phys_addr_t paddr
)
93 static inline phys_addr_t
dma_to_phys(struct device
*dev
, dma_addr_t daddr
)
99 dma_cache_sync (struct device
*dev
, void *vaddr
, size_t size
,
100 enum dma_data_direction dir
)
103 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
104 * ensure that dma_cache_sync() enforces order, hence the mb().
109 #endif /* _ASM_IA64_DMA_MAPPING_H */