1 #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
2 #define _ASM_ARM_XEN_PAGE_COHERENT_H
5 #include <linux/dma-attrs.h>
6 #include <linux/dma-mapping.h>
8 void __xen_dma_map_page(struct device
*hwdev
, struct page
*page
,
9 dma_addr_t dev_addr
, unsigned long offset
, size_t size
,
10 enum dma_data_direction dir
, struct dma_attrs
*attrs
);
11 void __xen_dma_unmap_page(struct device
*hwdev
, dma_addr_t handle
,
12 size_t size
, enum dma_data_direction dir
,
13 struct dma_attrs
*attrs
);
14 void __xen_dma_sync_single_for_cpu(struct device
*hwdev
,
15 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
);
17 void __xen_dma_sync_single_for_device(struct device
*hwdev
,
18 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
);
20 static inline void *xen_alloc_coherent_pages(struct device
*hwdev
, size_t size
,
21 dma_addr_t
*dma_handle
, gfp_t flags
,
22 struct dma_attrs
*attrs
)
24 return __generic_dma_ops(hwdev
)->alloc(hwdev
, size
, dma_handle
, flags
, attrs
);
27 static inline void xen_free_coherent_pages(struct device
*hwdev
, size_t size
,
28 void *cpu_addr
, dma_addr_t dma_handle
,
29 struct dma_attrs
*attrs
)
31 __generic_dma_ops(hwdev
)->free(hwdev
, size
, cpu_addr
, dma_handle
, attrs
);
34 static inline void xen_dma_map_page(struct device
*hwdev
, struct page
*page
,
35 dma_addr_t dev_addr
, unsigned long offset
, size_t size
,
36 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
38 bool local
= PFN_DOWN(dev_addr
) == page_to_pfn(page
);
39 /* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise
40 * is a foreign page grant-mapped in dom0. If the page is local we
41 * can safely call the native dma_ops function, otherwise we call
42 * the xen specific function. */
44 __generic_dma_ops(hwdev
)->map_page(hwdev
, page
, offset
, size
, dir
, attrs
);
46 __xen_dma_map_page(hwdev
, page
, dev_addr
, offset
, size
, dir
, attrs
);
49 static inline void xen_dma_unmap_page(struct device
*hwdev
, dma_addr_t handle
,
50 size_t size
, enum dma_data_direction dir
,
51 struct dma_attrs
*attrs
)
53 unsigned long pfn
= PFN_DOWN(handle
);
54 /* Dom0 is mapped 1:1, so calling pfn_valid on a foreign mfn will
55 * always return false. If the page is local we can safely call the
56 * native dma_ops function, otherwise we call the xen specific
59 if (__generic_dma_ops(hwdev
)->unmap_page
)
60 __generic_dma_ops(hwdev
)->unmap_page(hwdev
, handle
, size
, dir
, attrs
);
62 __xen_dma_unmap_page(hwdev
, handle
, size
, dir
, attrs
);
65 static inline void xen_dma_sync_single_for_cpu(struct device
*hwdev
,
66 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
68 unsigned long pfn
= PFN_DOWN(handle
);
70 if (__generic_dma_ops(hwdev
)->sync_single_for_cpu
)
71 __generic_dma_ops(hwdev
)->sync_single_for_cpu(hwdev
, handle
, size
, dir
);
73 __xen_dma_sync_single_for_cpu(hwdev
, handle
, size
, dir
);
76 static inline void xen_dma_sync_single_for_device(struct device
*hwdev
,
77 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
79 unsigned long pfn
= PFN_DOWN(handle
);
81 if (__generic_dma_ops(hwdev
)->sync_single_for_device
)
82 __generic_dma_ops(hwdev
)->sync_single_for_device(hwdev
, handle
, size
, dir
);
84 __xen_dma_sync_single_for_device(hwdev
, handle
, size
, dir
);
87 #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */