1 #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
2 #define _ASM_ARM_XEN_PAGE_COHERENT_H
5 #include <linux/dma-attrs.h>
6 #include <linux/dma-mapping.h>
8 void __xen_dma_map_page(struct device
*hwdev
, struct page
*page
,
9 dma_addr_t dev_addr
, unsigned long offset
, size_t size
,
10 enum dma_data_direction dir
, struct dma_attrs
*attrs
);
11 void __xen_dma_unmap_page(struct device
*hwdev
, dma_addr_t handle
,
12 size_t size
, enum dma_data_direction dir
,
13 struct dma_attrs
*attrs
);
14 void __xen_dma_sync_single_for_cpu(struct device
*hwdev
,
15 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
);
17 void __xen_dma_sync_single_for_device(struct device
*hwdev
,
18 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
);
20 static inline void *xen_alloc_coherent_pages(struct device
*hwdev
, size_t size
,
21 dma_addr_t
*dma_handle
, gfp_t flags
,
22 struct dma_attrs
*attrs
)
24 return __generic_dma_ops(hwdev
)->alloc(hwdev
, size
, dma_handle
, flags
, attrs
);
27 static inline void xen_free_coherent_pages(struct device
*hwdev
, size_t size
,
28 void *cpu_addr
, dma_addr_t dma_handle
,
29 struct dma_attrs
*attrs
)
31 __generic_dma_ops(hwdev
)->free(hwdev
, size
, cpu_addr
, dma_handle
, attrs
);
34 static inline void xen_dma_map_page(struct device
*hwdev
, struct page
*page
,
35 dma_addr_t dev_addr
, unsigned long offset
, size_t size
,
36 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
38 bool local
= XEN_PFN_DOWN(dev_addr
) == page_to_xen_pfn(page
);
40 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
41 * multiple Xen page, it's not possible to have a mix of local and
42 * foreign Xen page. So if the first xen_pfn == mfn the page is local
43 * otherwise it's a foreign page grant-mapped in dom0. If the page is
44 * local we can safely call the native dma_ops function, otherwise we
45 * call the xen specific function.
48 __generic_dma_ops(hwdev
)->map_page(hwdev
, page
, offset
, size
, dir
, attrs
);
50 __xen_dma_map_page(hwdev
, page
, dev_addr
, offset
, size
, dir
, attrs
);
53 static inline void xen_dma_unmap_page(struct device
*hwdev
, dma_addr_t handle
,
54 size_t size
, enum dma_data_direction dir
,
55 struct dma_attrs
*attrs
)
57 unsigned long pfn
= PFN_DOWN(handle
);
59 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
60 * multiple Xen page, it's not possible to have a mix of local and
61 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
62 * foreign mfn will always return false. If the page is local we can
63 * safely call the native dma_ops function, otherwise we call the xen
67 if (__generic_dma_ops(hwdev
)->unmap_page
)
68 __generic_dma_ops(hwdev
)->unmap_page(hwdev
, handle
, size
, dir
, attrs
);
70 __xen_dma_unmap_page(hwdev
, handle
, size
, dir
, attrs
);
73 static inline void xen_dma_sync_single_for_cpu(struct device
*hwdev
,
74 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
76 unsigned long pfn
= PFN_DOWN(handle
);
78 if (__generic_dma_ops(hwdev
)->sync_single_for_cpu
)
79 __generic_dma_ops(hwdev
)->sync_single_for_cpu(hwdev
, handle
, size
, dir
);
81 __xen_dma_sync_single_for_cpu(hwdev
, handle
, size
, dir
);
84 static inline void xen_dma_sync_single_for_device(struct device
*hwdev
,
85 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
87 unsigned long pfn
= PFN_DOWN(handle
);
89 if (__generic_dma_ops(hwdev
)->sync_single_for_device
)
90 __generic_dma_ops(hwdev
)->sync_single_for_device(hwdev
, handle
, size
, dir
);
92 __xen_dma_sync_single_for_device(hwdev
, handle
, size
, dir
);
95 #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */