1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
3 #define _ASM_ARM_XEN_PAGE_COHERENT_H
6 #include <asm/dma-mapping.h>
7 #include <linux/dma-mapping.h>
9 static inline const struct dma_map_ops
*xen_get_dma_ops(struct device
*dev
)
11 if (dev
&& dev
->archdata
.dev_dma_ops
)
12 return dev
->archdata
.dev_dma_ops
;
13 return get_arch_dma_ops(NULL
);
16 void __xen_dma_map_page(struct device
*hwdev
, struct page
*page
,
17 dma_addr_t dev_addr
, unsigned long offset
, size_t size
,
18 enum dma_data_direction dir
, unsigned long attrs
);
19 void __xen_dma_unmap_page(struct device
*hwdev
, dma_addr_t handle
,
20 size_t size
, enum dma_data_direction dir
,
22 void __xen_dma_sync_single_for_cpu(struct device
*hwdev
,
23 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
);
25 void __xen_dma_sync_single_for_device(struct device
*hwdev
,
26 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
);
28 static inline void *xen_alloc_coherent_pages(struct device
*hwdev
, size_t size
,
29 dma_addr_t
*dma_handle
, gfp_t flags
, unsigned long attrs
)
31 return xen_get_dma_ops(hwdev
)->alloc(hwdev
, size
, dma_handle
, flags
, attrs
);
34 static inline void xen_free_coherent_pages(struct device
*hwdev
, size_t size
,
35 void *cpu_addr
, dma_addr_t dma_handle
, unsigned long attrs
)
37 xen_get_dma_ops(hwdev
)->free(hwdev
, size
, cpu_addr
, dma_handle
, attrs
);
40 static inline void xen_dma_map_page(struct device
*hwdev
, struct page
*page
,
41 dma_addr_t dev_addr
, unsigned long offset
, size_t size
,
42 enum dma_data_direction dir
, unsigned long attrs
)
44 unsigned long page_pfn
= page_to_xen_pfn(page
);
45 unsigned long dev_pfn
= XEN_PFN_DOWN(dev_addr
);
46 unsigned long compound_pages
=
47 (1<<compound_order(page
)) * XEN_PFN_PER_PAGE
;
48 bool local
= (page_pfn
<= dev_pfn
) &&
49 (dev_pfn
- page_pfn
< compound_pages
);
52 * Dom0 is mapped 1:1, while the Linux page can span across
53 * multiple Xen pages, it's not possible for it to contain a
54 * mix of local and foreign Xen pages. So if the first xen_pfn
55 * == mfn the page is local otherwise it's a foreign page
56 * grant-mapped in dom0. If the page is local we can safely
57 * call the native dma_ops function, otherwise we call the xen
61 xen_get_dma_ops(hwdev
)->map_page(hwdev
, page
, offset
, size
, dir
, attrs
);
63 __xen_dma_map_page(hwdev
, page
, dev_addr
, offset
, size
, dir
, attrs
);
66 static inline void xen_dma_unmap_page(struct device
*hwdev
, dma_addr_t handle
,
67 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
69 unsigned long pfn
= PFN_DOWN(handle
);
71 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
72 * multiple Xen page, it's not possible to have a mix of local and
73 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
74 * foreign mfn will always return false. If the page is local we can
75 * safely call the native dma_ops function, otherwise we call the xen
79 if (xen_get_dma_ops(hwdev
)->unmap_page
)
80 xen_get_dma_ops(hwdev
)->unmap_page(hwdev
, handle
, size
, dir
, attrs
);
82 __xen_dma_unmap_page(hwdev
, handle
, size
, dir
, attrs
);
85 static inline void xen_dma_sync_single_for_cpu(struct device
*hwdev
,
86 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
88 unsigned long pfn
= PFN_DOWN(handle
);
90 if (xen_get_dma_ops(hwdev
)->sync_single_for_cpu
)
91 xen_get_dma_ops(hwdev
)->sync_single_for_cpu(hwdev
, handle
, size
, dir
);
93 __xen_dma_sync_single_for_cpu(hwdev
, handle
, size
, dir
);
96 static inline void xen_dma_sync_single_for_device(struct device
*hwdev
,
97 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
99 unsigned long pfn
= PFN_DOWN(handle
);
100 if (pfn_valid(pfn
)) {
101 if (xen_get_dma_ops(hwdev
)->sync_single_for_device
)
102 xen_get_dma_ops(hwdev
)->sync_single_for_device(hwdev
, handle
, size
, dir
);
104 __xen_dma_sync_single_for_device(hwdev
, handle
, size
, dir
);
107 #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */