blk: rq_data_dir() should not return a boolean
[cris-mirror.git] / arch / arm / include / asm / xen / page-coherent.h
blobefd5624128507fa5afd7f6ec981dc07edd2b2160
1 #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
2 #define _ASM_ARM_XEN_PAGE_COHERENT_H
4 #include <asm/page.h>
5 #include <linux/dma-attrs.h>
6 #include <linux/dma-mapping.h>
8 void __xen_dma_map_page(struct device *hwdev, struct page *page,
9 dma_addr_t dev_addr, unsigned long offset, size_t size,
10 enum dma_data_direction dir, struct dma_attrs *attrs);
11 void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
12 size_t size, enum dma_data_direction dir,
13 struct dma_attrs *attrs);
14 void __xen_dma_sync_single_for_cpu(struct device *hwdev,
15 dma_addr_t handle, size_t size, enum dma_data_direction dir);
17 void __xen_dma_sync_single_for_device(struct device *hwdev,
18 dma_addr_t handle, size_t size, enum dma_data_direction dir);
20 static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
21 dma_addr_t *dma_handle, gfp_t flags,
22 struct dma_attrs *attrs)
24 return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
27 static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
28 void *cpu_addr, dma_addr_t dma_handle,
29 struct dma_attrs *attrs)
31 __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
34 static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
35 dma_addr_t dev_addr, unsigned long offset, size_t size,
36 enum dma_data_direction dir, struct dma_attrs *attrs)
38 bool local = PFN_DOWN(dev_addr) == page_to_pfn(page);
39 /* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise
40 * is a foreign page grant-mapped in dom0. If the page is local we
41 * can safely call the native dma_ops function, otherwise we call
42 * the xen specific function. */
43 if (local)
44 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
45 else
46 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
49 static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
50 size_t size, enum dma_data_direction dir,
51 struct dma_attrs *attrs)
53 unsigned long pfn = PFN_DOWN(handle);
54 /* Dom0 is mapped 1:1, so calling pfn_valid on a foreign mfn will
55 * always return false. If the page is local we can safely call the
56 * native dma_ops function, otherwise we call the xen specific
57 * function. */
58 if (pfn_valid(pfn)) {
59 if (__generic_dma_ops(hwdev)->unmap_page)
60 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
61 } else
62 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
65 static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
66 dma_addr_t handle, size_t size, enum dma_data_direction dir)
68 unsigned long pfn = PFN_DOWN(handle);
69 if (pfn_valid(pfn)) {
70 if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
71 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
72 } else
73 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
76 static inline void xen_dma_sync_single_for_device(struct device *hwdev,
77 dma_addr_t handle, size_t size, enum dma_data_direction dir)
79 unsigned long pfn = PFN_DOWN(handle);
80 if (pfn_valid(pfn)) {
81 if (__generic_dma_ops(hwdev)->sync_single_for_device)
82 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
83 } else
84 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
87 #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */