2 #include <linux/dma-mapping.h>
4 #include <linux/highmem.h>
5 #include <linux/export.h>
6 #include <linux/memblock.h>
7 #include <linux/of_address.h>
8 #include <linux/slab.h>
9 #include <linux/types.h>
10 #include <linux/vmalloc.h>
11 #include <linux/swiotlb.h>
14 #include <xen/interface/grant_table.h>
15 #include <xen/interface/memory.h>
17 #include <xen/swiotlb-xen.h>
19 #include <asm/cacheflush.h>
20 #include <asm/xen/hypercall.h>
21 #include <asm/xen/interface.h>
23 unsigned long xen_get_swiotlb_free_pages(unsigned int order
)
25 struct memblock_region
*reg
;
26 gfp_t flags
= __GFP_NOWARN
|__GFP_KSWAPD_RECLAIM
;
28 for_each_memblock(memory
, reg
) {
29 if (reg
->base
< (phys_addr_t
)0xffffffff) {
34 return __get_free_pages(flags
, order
);
41 static bool hypercall_cflush
= false;
43 /* functions called by SWIOTLB */
45 static void dma_cache_maint(dma_addr_t handle
, unsigned long offset
,
46 size_t size
, enum dma_data_direction dir
, enum dma_cache_op op
)
48 struct gnttab_cache_flush cflush
;
49 unsigned long xen_pfn
;
52 xen_pfn
= (handle
>> XEN_PAGE_SHIFT
) + offset
/ XEN_PAGE_SIZE
;
53 offset
%= XEN_PAGE_SIZE
;
58 /* buffers in highmem or foreign pages cannot cross page
60 if (len
+ offset
> XEN_PAGE_SIZE
)
61 len
= XEN_PAGE_SIZE
- offset
;
64 cflush
.a
.dev_bus_addr
= xen_pfn
<< XEN_PAGE_SHIFT
;
65 cflush
.offset
= offset
;
68 if (op
== DMA_UNMAP
&& dir
!= DMA_TO_DEVICE
)
69 cflush
.op
= GNTTAB_CACHE_INVAL
;
71 if (dir
== DMA_FROM_DEVICE
)
72 cflush
.op
= GNTTAB_CACHE_INVAL
;
74 cflush
.op
= GNTTAB_CACHE_CLEAN
;
77 HYPERVISOR_grant_table_op(GNTTABOP_cache_flush
, &cflush
, 1);
85 static void __xen_dma_page_dev_to_cpu(struct device
*hwdev
, dma_addr_t handle
,
86 size_t size
, enum dma_data_direction dir
)
88 dma_cache_maint(handle
& PAGE_MASK
, handle
& ~PAGE_MASK
, size
, dir
, DMA_UNMAP
);
91 static void __xen_dma_page_cpu_to_dev(struct device
*hwdev
, dma_addr_t handle
,
92 size_t size
, enum dma_data_direction dir
)
94 dma_cache_maint(handle
& PAGE_MASK
, handle
& ~PAGE_MASK
, size
, dir
, DMA_MAP
);
97 void __xen_dma_map_page(struct device
*hwdev
, struct page
*page
,
98 dma_addr_t dev_addr
, unsigned long offset
, size_t size
,
99 enum dma_data_direction dir
, unsigned long attrs
)
101 if (is_device_dma_coherent(hwdev
))
103 if (attrs
& DMA_ATTR_SKIP_CPU_SYNC
)
106 __xen_dma_page_cpu_to_dev(hwdev
, dev_addr
, size
, dir
);
109 void __xen_dma_unmap_page(struct device
*hwdev
, dma_addr_t handle
,
110 size_t size
, enum dma_data_direction dir
,
114 if (is_device_dma_coherent(hwdev
))
116 if (attrs
& DMA_ATTR_SKIP_CPU_SYNC
)
119 __xen_dma_page_dev_to_cpu(hwdev
, handle
, size
, dir
);
122 void __xen_dma_sync_single_for_cpu(struct device
*hwdev
,
123 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
125 if (is_device_dma_coherent(hwdev
))
127 __xen_dma_page_dev_to_cpu(hwdev
, handle
, size
, dir
);
130 void __xen_dma_sync_single_for_device(struct device
*hwdev
,
131 dma_addr_t handle
, size_t size
, enum dma_data_direction dir
)
133 if (is_device_dma_coherent(hwdev
))
135 __xen_dma_page_cpu_to_dev(hwdev
, handle
, size
, dir
);
138 bool xen_arch_need_swiotlb(struct device
*dev
,
142 unsigned int xen_pfn
= XEN_PFN_DOWN(phys
);
143 unsigned int bfn
= XEN_PFN_DOWN(dev_addr
);
146 * The swiotlb buffer should be used if
147 * - Xen doesn't have the cache flush hypercall
148 * - The Linux page refers to foreign memory
149 * - The device doesn't support coherent DMA request
151 * The Linux page may be spanned acrros multiple Xen page, although
152 * it's not possible to have a mix of local and foreign Xen page.
153 * Furthermore, range_straddles_page_boundary is already checking
154 * if buffer is physically contiguous in the host RAM.
156 * Therefore we only need to check the first Xen page to know if we
157 * require a bounce buffer because the device doesn't support coherent
158 * memory and we are not able to flush the cache.
160 return (!hypercall_cflush
&& (xen_pfn
!= bfn
) &&
161 !is_device_dma_coherent(dev
));
164 int xen_create_contiguous_region(phys_addr_t pstart
, unsigned int order
,
165 unsigned int address_bits
,
166 dma_addr_t
*dma_handle
)
168 if (!xen_initial_domain())
171 /* we assume that dom0 is mapped 1:1 for now */
172 *dma_handle
= pstart
;
175 EXPORT_SYMBOL_GPL(xen_create_contiguous_region
);
177 void xen_destroy_contiguous_region(phys_addr_t pstart
, unsigned int order
)
181 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region
);
183 const struct dma_map_ops
*xen_dma_ops
;
184 EXPORT_SYMBOL(xen_dma_ops
);
186 int __init
xen_mm_init(void)
188 struct gnttab_cache_flush cflush
;
189 if (!xen_initial_domain())
191 xen_swiotlb_init(1, false);
192 xen_dma_ops
= &xen_swiotlb_dma_ops
;
195 cflush
.a
.dev_bus_addr
= 0;
198 if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush
, &cflush
, 1) != -ENOSYS
)
199 hypercall_cflush
= true;
202 arch_initcall(xen_mm_init
);