Merge 5.0-rc6 into driver-core-next
[linux/fpc-iii.git] / arch / arm / xen / mm.c
blobe1d44b903dfc3fd7f9d252ba266cc1e64ab20c61
1 #include <linux/cpu.h>
2 #include <linux/dma-mapping.h>
3 #include <linux/gfp.h>
4 #include <linux/highmem.h>
5 #include <linux/export.h>
6 #include <linux/memblock.h>
7 #include <linux/of_address.h>
8 #include <linux/slab.h>
9 #include <linux/types.h>
10 #include <linux/vmalloc.h>
11 #include <linux/swiotlb.h>
13 #include <xen/xen.h>
14 #include <xen/interface/grant_table.h>
15 #include <xen/interface/memory.h>
16 #include <xen/page.h>
17 #include <xen/swiotlb-xen.h>
19 #include <asm/cacheflush.h>
20 #include <asm/xen/hypercall.h>
21 #include <asm/xen/interface.h>
23 unsigned long xen_get_swiotlb_free_pages(unsigned int order)
25 struct memblock_region *reg;
26 gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
28 for_each_memblock(memory, reg) {
29 if (reg->base < (phys_addr_t)0xffffffff) {
30 flags |= __GFP_DMA;
31 break;
34 return __get_free_pages(flags, order);
37 enum dma_cache_op {
38 DMA_UNMAP,
39 DMA_MAP,
41 static bool hypercall_cflush = false;
43 /* functions called by SWIOTLB */
45 static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
46 size_t size, enum dma_data_direction dir, enum dma_cache_op op)
48 struct gnttab_cache_flush cflush;
49 unsigned long xen_pfn;
50 size_t left = size;
52 xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE;
53 offset %= XEN_PAGE_SIZE;
55 do {
56 size_t len = left;
58 /* buffers in highmem or foreign pages cannot cross page
59 * boundaries */
60 if (len + offset > XEN_PAGE_SIZE)
61 len = XEN_PAGE_SIZE - offset;
63 cflush.op = 0;
64 cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT;
65 cflush.offset = offset;
66 cflush.length = len;
68 if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
69 cflush.op = GNTTAB_CACHE_INVAL;
70 if (op == DMA_MAP) {
71 if (dir == DMA_FROM_DEVICE)
72 cflush.op = GNTTAB_CACHE_INVAL;
73 else
74 cflush.op = GNTTAB_CACHE_CLEAN;
76 if (cflush.op)
77 HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
79 offset = 0;
80 xen_pfn++;
81 left -= len;
82 } while (left);
85 static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
86 size_t size, enum dma_data_direction dir)
88 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
91 static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
92 size_t size, enum dma_data_direction dir)
94 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
97 void __xen_dma_map_page(struct device *hwdev, struct page *page,
98 dma_addr_t dev_addr, unsigned long offset, size_t size,
99 enum dma_data_direction dir, unsigned long attrs)
101 if (is_device_dma_coherent(hwdev))
102 return;
103 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
104 return;
106 __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
109 void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
110 size_t size, enum dma_data_direction dir,
111 unsigned long attrs)
114 if (is_device_dma_coherent(hwdev))
115 return;
116 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
117 return;
119 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
122 void __xen_dma_sync_single_for_cpu(struct device *hwdev,
123 dma_addr_t handle, size_t size, enum dma_data_direction dir)
125 if (is_device_dma_coherent(hwdev))
126 return;
127 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
130 void __xen_dma_sync_single_for_device(struct device *hwdev,
131 dma_addr_t handle, size_t size, enum dma_data_direction dir)
133 if (is_device_dma_coherent(hwdev))
134 return;
135 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
138 bool xen_arch_need_swiotlb(struct device *dev,
139 phys_addr_t phys,
140 dma_addr_t dev_addr)
142 unsigned int xen_pfn = XEN_PFN_DOWN(phys);
143 unsigned int bfn = XEN_PFN_DOWN(dev_addr);
146 * The swiotlb buffer should be used if
147 * - Xen doesn't have the cache flush hypercall
148 * - The Linux page refers to foreign memory
149 * - The device doesn't support coherent DMA request
151 * The Linux page may be spanned acrros multiple Xen page, although
152 * it's not possible to have a mix of local and foreign Xen page.
153 * Furthermore, range_straddles_page_boundary is already checking
154 * if buffer is physically contiguous in the host RAM.
156 * Therefore we only need to check the first Xen page to know if we
157 * require a bounce buffer because the device doesn't support coherent
158 * memory and we are not able to flush the cache.
160 return (!hypercall_cflush && (xen_pfn != bfn) &&
161 !is_device_dma_coherent(dev));
164 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
165 unsigned int address_bits,
166 dma_addr_t *dma_handle)
168 if (!xen_initial_domain())
169 return -EINVAL;
171 /* we assume that dom0 is mapped 1:1 for now */
172 *dma_handle = pstart;
173 return 0;
175 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
177 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
179 return;
181 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
183 const struct dma_map_ops *xen_dma_ops;
184 EXPORT_SYMBOL(xen_dma_ops);
186 int __init xen_mm_init(void)
188 struct gnttab_cache_flush cflush;
189 if (!xen_initial_domain())
190 return 0;
191 xen_swiotlb_init(1, false);
192 xen_dma_ops = &xen_swiotlb_dma_ops;
194 cflush.op = 0;
195 cflush.a.dev_bus_addr = 0;
196 cflush.offset = 0;
197 cflush.length = 0;
198 if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
199 hypercall_cflush = true;
200 return 0;
202 arch_initcall(xen_mm_init);