2 #include <linux/dma-mapping.h>
3 #include <linux/bootmem.h>
5 #include <linux/highmem.h>
6 #include <linux/export.h>
7 #include <linux/of_address.h>
8 #include <linux/slab.h>
9 #include <linux/types.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/vmalloc.h>
12 #include <linux/swiotlb.h>
15 #include <xen/interface/grant_table.h>
16 #include <xen/interface/memory.h>
17 #include <xen/swiotlb-xen.h>
19 #include <asm/cacheflush.h>
20 #include <asm/xen/page.h>
21 #include <asm/xen/hypercall.h>
22 #include <asm/xen/interface.h>
28 static bool hypercall_cflush
= false;
30 /* functions called by SWIOTLB */
32 static void dma_cache_maint(dma_addr_t handle
, unsigned long offset
,
33 size_t size
, enum dma_data_direction dir
, enum dma_cache_op op
)
35 struct gnttab_cache_flush cflush
;
39 pfn
= (handle
>> PAGE_SHIFT
) + offset
/ PAGE_SIZE
;
45 /* buffers in highmem or foreign pages cannot cross page
47 if (len
+ offset
> PAGE_SIZE
)
48 len
= PAGE_SIZE
- offset
;
51 cflush
.a
.dev_bus_addr
= pfn
<< PAGE_SHIFT
;
52 cflush
.offset
= offset
;
55 if (op
== DMA_UNMAP
&& dir
!= DMA_TO_DEVICE
)
56 cflush
.op
= GNTTAB_CACHE_INVAL
;
58 if (dir
== DMA_FROM_DEVICE
)
59 cflush
.op
= GNTTAB_CACHE_INVAL
;
61 cflush
.op
= GNTTAB_CACHE_CLEAN
;
64 HYPERVISOR_grant_table_op(GNTTABOP_cache_flush
, &cflush
, 1);
72 static void __xen_dma_page_dev_to_cpu(struct device
*hwdev
, dma_addr_t handle
,
73 size_t size
, enum dma_data_direction dir
)
75 dma_cache_maint(handle
& PAGE_MASK
, handle
& ~PAGE_MASK
, size
, dir
, DMA_UNMAP
);
78 static void __xen_dma_page_cpu_to_dev(struct device
*hwdev
, dma_addr_t handle
,
79 size_t size
, enum dma_data_direction dir
)
81 dma_cache_maint(handle
& PAGE_MASK
, handle
& ~PAGE_MASK
, size
, dir
, DMA_MAP
);
84 void __xen_dma_map_page(struct device
*hwdev
, struct page
*page
,
85 dma_addr_t dev_addr
, unsigned long offset
, size_t size
,
86 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
88 if (is_device_dma_coherent(hwdev
))
90 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
93 __xen_dma_page_cpu_to_dev(hwdev
, dev_addr
, size
, dir
);
96 void __xen_dma_unmap_page(struct device
*hwdev
, dma_addr_t handle
,
97 size_t size
, enum dma_data_direction dir
,
98 struct dma_attrs
*attrs
)
101 if (is_device_dma_coherent(hwdev
))
103 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC
, attrs
))
106 __xen_dma_page_dev_to_cpu(hwdev
, handle
, size
, dir
);
109 void __xen_dma_sync_single_for_cpu(struct device
*hwdev
,
110 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
112 if (is_device_dma_coherent(hwdev
))
114 __xen_dma_page_dev_to_cpu(hwdev
, handle
, size
, dir
);
117 void __xen_dma_sync_single_for_device(struct device
*hwdev
,
118 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
120 if (is_device_dma_coherent(hwdev
))
122 __xen_dma_page_cpu_to_dev(hwdev
, handle
, size
, dir
);
125 bool xen_arch_need_swiotlb(struct device
*dev
,
129 return (!hypercall_cflush
&& (pfn
!= mfn
) && !is_device_dma_coherent(dev
));
132 int xen_create_contiguous_region(phys_addr_t pstart
, unsigned int order
,
133 unsigned int address_bits
,
134 dma_addr_t
*dma_handle
)
136 if (!xen_initial_domain())
139 /* we assume that dom0 is mapped 1:1 for now */
140 *dma_handle
= pstart
;
143 EXPORT_SYMBOL_GPL(xen_create_contiguous_region
);
145 void xen_destroy_contiguous_region(phys_addr_t pstart
, unsigned int order
)
149 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region
);
151 struct dma_map_ops
*xen_dma_ops
;
152 EXPORT_SYMBOL_GPL(xen_dma_ops
);
154 static struct dma_map_ops xen_swiotlb_dma_ops
= {
155 .mapping_error
= xen_swiotlb_dma_mapping_error
,
156 .alloc
= xen_swiotlb_alloc_coherent
,
157 .free
= xen_swiotlb_free_coherent
,
158 .sync_single_for_cpu
= xen_swiotlb_sync_single_for_cpu
,
159 .sync_single_for_device
= xen_swiotlb_sync_single_for_device
,
160 .sync_sg_for_cpu
= xen_swiotlb_sync_sg_for_cpu
,
161 .sync_sg_for_device
= xen_swiotlb_sync_sg_for_device
,
162 .map_sg
= xen_swiotlb_map_sg_attrs
,
163 .unmap_sg
= xen_swiotlb_unmap_sg_attrs
,
164 .map_page
= xen_swiotlb_map_page
,
165 .unmap_page
= xen_swiotlb_unmap_page
,
166 .dma_supported
= xen_swiotlb_dma_supported
,
167 .set_dma_mask
= xen_swiotlb_set_dma_mask
,
170 int __init
xen_mm_init(void)
172 struct gnttab_cache_flush cflush
;
173 if (!xen_initial_domain())
175 xen_swiotlb_init(1, false);
176 xen_dma_ops
= &xen_swiotlb_dma_ops
;
179 cflush
.a
.dev_bus_addr
= 0;
182 if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush
, &cflush
, 1) != -ENOSYS
)
183 hypercall_cflush
= true;
186 arch_initcall(xen_mm_init
);