proc: test /proc/thread-self symlink
[linux/fpc-iii.git] / drivers / xen / swiotlb-xen.c
bloba6f9ba85dc4ba8df4dd9519b317664b9e2ece94f
1 /*
2 * Copyright 2010
3 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2.0 as published by
9 * the Free Software Foundation
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * PV guests under Xen are running in an non-contiguous memory architecture.
18 * When PCI pass-through is utilized, this necessitates an IOMMU for
19 * translating bus (DMA) to virtual and vice-versa and also providing a
20 * mechanism to have contiguous pages for device drivers operations (say DMA
21 * operations).
23 * Specifically, under Xen the Linux idea of pages is an illusion. It
24 * assumes that pages start at zero and go up to the available memory. To
25 * help with that, the Linux Xen MMU provides a lookup mechanism to
26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
28 * memory is not contiguous. Xen hypervisor stitches memory for guests
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
31 * allocated in descending order (high to low), meaning the guest might
32 * never get any MFN's under the 4GB mark.
36 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
38 #include <linux/bootmem.h>
39 #include <linux/dma-direct.h>
40 #include <linux/export.h>
41 #include <xen/swiotlb-xen.h>
42 #include <xen/page.h>
43 #include <xen/xen-ops.h>
44 #include <xen/hvc-console.h>
46 #include <asm/dma-mapping.h>
47 #include <asm/xen/page-coherent.h>
49 #include <trace/events/swiotlb.h>
51 * Used to do a quick range check in swiotlb_tbl_unmap_single and
52 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
53 * API.
56 #define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0)
58 static char *xen_io_tlb_start, *xen_io_tlb_end;
59 static unsigned long xen_io_tlb_nslabs;
61 * Quick lookup value of the bus address of the IOTLB.
64 static u64 start_dma_addr;
67 * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
68 * can be 32bit when dma_addr_t is 64bit leading to a loss in
69 * information if the shift is done before casting to 64bit.
71 static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
73 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
74 dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
76 dma |= paddr & ~XEN_PAGE_MASK;
78 return dma;
81 static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
83 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
84 dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
85 phys_addr_t paddr = dma;
87 paddr |= baddr & ~XEN_PAGE_MASK;
89 return paddr;
92 static inline dma_addr_t xen_virt_to_bus(void *address)
94 return xen_phys_to_bus(virt_to_phys(address));
97 static int check_pages_physically_contiguous(unsigned long xen_pfn,
98 unsigned int offset,
99 size_t length)
101 unsigned long next_bfn;
102 int i;
103 int nr_pages;
105 next_bfn = pfn_to_bfn(xen_pfn);
106 nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
108 for (i = 1; i < nr_pages; i++) {
109 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
110 return 0;
112 return 1;
115 static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
117 unsigned long xen_pfn = XEN_PFN_DOWN(p);
118 unsigned int offset = p & ~XEN_PAGE_MASK;
120 if (offset + size <= XEN_PAGE_SIZE)
121 return 0;
122 if (check_pages_physically_contiguous(xen_pfn, offset, size))
123 return 0;
124 return 1;
127 static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
129 unsigned long bfn = XEN_PFN_DOWN(dma_addr);
130 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
131 phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);
133 /* If the address is outside our domain, it CAN
134 * have the same virtual address as another address
135 * in our domain. Therefore _only_ check address within our domain.
137 if (pfn_valid(PFN_DOWN(paddr))) {
138 return paddr >= virt_to_phys(xen_io_tlb_start) &&
139 paddr < virt_to_phys(xen_io_tlb_end);
141 return 0;
144 static int max_dma_bits = 32;
146 static int
147 xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
149 int i, rc;
150 int dma_bits;
151 dma_addr_t dma_handle;
152 phys_addr_t p = virt_to_phys(buf);
154 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
156 i = 0;
157 do {
158 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
160 do {
161 rc = xen_create_contiguous_region(
162 p + (i << IO_TLB_SHIFT),
163 get_order(slabs << IO_TLB_SHIFT),
164 dma_bits, &dma_handle);
165 } while (rc && dma_bits++ < max_dma_bits);
166 if (rc)
167 return rc;
169 i += slabs;
170 } while (i < nslabs);
171 return 0;
173 static unsigned long xen_set_nslabs(unsigned long nr_tbl)
175 if (!nr_tbl) {
176 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
177 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
178 } else
179 xen_io_tlb_nslabs = nr_tbl;
181 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
184 enum xen_swiotlb_err {
185 XEN_SWIOTLB_UNKNOWN = 0,
186 XEN_SWIOTLB_ENOMEM,
187 XEN_SWIOTLB_EFIXUP
190 static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
192 switch (err) {
193 case XEN_SWIOTLB_ENOMEM:
194 return "Cannot allocate Xen-SWIOTLB buffer\n";
195 case XEN_SWIOTLB_EFIXUP:
196 return "Failed to get contiguous memory for DMA from Xen!\n"\
197 "You either: don't have the permissions, do not have"\
198 " enough free memory under 4GB, or the hypervisor memory"\
199 " is too fragmented!";
200 default:
201 break;
203 return "";
205 int __ref xen_swiotlb_init(int verbose, bool early)
207 unsigned long bytes, order;
208 int rc = -ENOMEM;
209 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
210 unsigned int repeat = 3;
212 xen_io_tlb_nslabs = swiotlb_nr_tbl();
213 retry:
214 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
215 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
217 * Get IO TLB memory from any location.
219 if (early)
220 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
221 else {
222 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
223 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
224 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
225 xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
226 if (xen_io_tlb_start)
227 break;
228 order--;
230 if (order != get_order(bytes)) {
231 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
232 (PAGE_SIZE << order) >> 20);
233 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
234 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
237 if (!xen_io_tlb_start) {
238 m_ret = XEN_SWIOTLB_ENOMEM;
239 goto error;
241 xen_io_tlb_end = xen_io_tlb_start + bytes;
243 * And replace that memory with pages under 4GB.
245 rc = xen_swiotlb_fixup(xen_io_tlb_start,
246 bytes,
247 xen_io_tlb_nslabs);
248 if (rc) {
249 if (early)
250 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
251 else {
252 free_pages((unsigned long)xen_io_tlb_start, order);
253 xen_io_tlb_start = NULL;
255 m_ret = XEN_SWIOTLB_EFIXUP;
256 goto error;
258 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
259 if (early) {
260 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
261 verbose))
262 panic("Cannot allocate SWIOTLB buffer");
263 rc = 0;
264 } else
265 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
267 if (!rc)
268 swiotlb_set_max_segment(PAGE_SIZE);
270 return rc;
271 error:
272 if (repeat--) {
273 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
274 (xen_io_tlb_nslabs >> 1));
275 pr_info("Lowering to %luMB\n",
276 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
277 goto retry;
279 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
280 if (early)
281 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
282 else
283 free_pages((unsigned long)xen_io_tlb_start, order);
284 return rc;
287 static void *
288 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
289 dma_addr_t *dma_handle, gfp_t flags,
290 unsigned long attrs)
292 void *ret;
293 int order = get_order(size);
294 u64 dma_mask = DMA_BIT_MASK(32);
295 phys_addr_t phys;
296 dma_addr_t dev_addr;
299 * Ignore region specifiers - the kernel's ideas of
300 * pseudo-phys memory layout has nothing to do with the
301 * machine physical layout. We can't allocate highmem
302 * because we can't return a pointer to it.
304 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
306 /* On ARM this function returns an ioremap'ped virtual address for
307 * which virt_to_phys doesn't return the corresponding physical
308 * address. In fact on ARM virt_to_phys only works for kernel direct
309 * mapped RAM memory. Also see comment below.
311 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
313 if (!ret)
314 return ret;
316 if (hwdev && hwdev->coherent_dma_mask)
317 dma_mask = hwdev->coherent_dma_mask;
319 /* At this point dma_handle is the physical address, next we are
320 * going to set it to the machine address.
321 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
322 * to *dma_handle. */
323 phys = *dma_handle;
324 dev_addr = xen_phys_to_bus(phys);
325 if (((dev_addr + size - 1 <= dma_mask)) &&
326 !range_straddles_page_boundary(phys, size))
327 *dma_handle = dev_addr;
328 else {
329 if (xen_create_contiguous_region(phys, order,
330 fls64(dma_mask), dma_handle) != 0) {
331 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
332 return NULL;
335 memset(ret, 0, size);
336 return ret;
339 static void
340 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
341 dma_addr_t dev_addr, unsigned long attrs)
343 int order = get_order(size);
344 phys_addr_t phys;
345 u64 dma_mask = DMA_BIT_MASK(32);
347 if (hwdev && hwdev->coherent_dma_mask)
348 dma_mask = hwdev->coherent_dma_mask;
350 /* do not use virt_to_phys because on ARM it doesn't return you the
351 * physical address */
352 phys = xen_bus_to_phys(dev_addr);
354 if (((dev_addr + size - 1 <= dma_mask)) ||
355 range_straddles_page_boundary(phys, size))
356 xen_destroy_contiguous_region(phys, order);
358 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
362 * Map a single buffer of the indicated size for DMA in streaming mode. The
363 * physical address to use is returned.
365 * Once the device is given the dma address, the device owns this memory until
366 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
368 static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
369 unsigned long offset, size_t size,
370 enum dma_data_direction dir,
371 unsigned long attrs)
373 phys_addr_t map, phys = page_to_phys(page) + offset;
374 dma_addr_t dev_addr = xen_phys_to_bus(phys);
376 BUG_ON(dir == DMA_NONE);
378 * If the address happens to be in the device's DMA window,
379 * we can safely return the device addr and not worry about bounce
380 * buffering it.
382 if (dma_capable(dev, dev_addr, size) &&
383 !range_straddles_page_boundary(phys, size) &&
384 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
385 (swiotlb_force != SWIOTLB_FORCE)) {
386 /* we are not interested in the dma_addr returned by
387 * xen_dma_map_page, only in the potential cache flushes executed
388 * by the function. */
389 xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
390 return dev_addr;
394 * Oh well, have to allocate and map a bounce buffer.
396 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
398 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
399 attrs);
400 if (map == SWIOTLB_MAP_ERROR)
401 return XEN_SWIOTLB_ERROR_CODE;
403 dev_addr = xen_phys_to_bus(map);
404 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
405 dev_addr, map & ~PAGE_MASK, size, dir, attrs);
408 * Ensure that the address returned is DMA'ble
410 if (dma_capable(dev, dev_addr, size))
411 return dev_addr;
413 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
414 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
416 return XEN_SWIOTLB_ERROR_CODE;
420 * Unmap a single streaming mode DMA translation. The dma_addr and size must
421 * match what was provided for in a previous xen_swiotlb_map_page call. All
422 * other usages are undefined.
424 * After this call, reads by the cpu to the buffer are guaranteed to see
425 * whatever the device wrote there.
427 static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
428 size_t size, enum dma_data_direction dir,
429 unsigned long attrs)
431 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
433 BUG_ON(dir == DMA_NONE);
435 xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
437 /* NOTE: We use dev_addr here, not paddr! */
438 if (is_xen_swiotlb_buffer(dev_addr)) {
439 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
440 return;
443 if (dir != DMA_FROM_DEVICE)
444 return;
447 * phys_to_virt doesn't work with hihgmem page but we could
448 * call dma_mark_clean() with hihgmem page here. However, we
449 * are fine since dma_mark_clean() is null on POWERPC. We can
450 * make dma_mark_clean() take a physical address if necessary.
452 dma_mark_clean(phys_to_virt(paddr), size);
455 static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
456 size_t size, enum dma_data_direction dir,
457 unsigned long attrs)
459 xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
463 * Make physical memory consistent for a single streaming mode DMA translation
464 * after a transfer.
466 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
467 * using the cpu, yet do not wish to teardown the dma mapping, you must
468 * call this function before doing so. At the next point you give the dma
469 * address back to the card, you must first perform a
470 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
472 static void
473 xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
474 size_t size, enum dma_data_direction dir,
475 enum dma_sync_target target)
477 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
479 BUG_ON(dir == DMA_NONE);
481 if (target == SYNC_FOR_CPU)
482 xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
484 /* NOTE: We use dev_addr here, not paddr! */
485 if (is_xen_swiotlb_buffer(dev_addr))
486 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
488 if (target == SYNC_FOR_DEVICE)
489 xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
491 if (dir != DMA_FROM_DEVICE)
492 return;
494 dma_mark_clean(phys_to_virt(paddr), size);
497 void
498 xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
499 size_t size, enum dma_data_direction dir)
501 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
504 void
505 xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
506 size_t size, enum dma_data_direction dir)
508 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
512 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
513 * concerning calls here are the same as for swiotlb_unmap_page() above.
515 static void
516 xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
517 int nelems, enum dma_data_direction dir,
518 unsigned long attrs)
520 struct scatterlist *sg;
521 int i;
523 BUG_ON(dir == DMA_NONE);
525 for_each_sg(sgl, sg, nelems, i)
526 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
531 * Map a set of buffers described by scatterlist in streaming mode for DMA.
532 * This is the scatter-gather version of the above xen_swiotlb_map_page
533 * interface. Here the scatter gather list elements are each tagged with the
534 * appropriate dma address and length. They are obtained via
535 * sg_dma_{address,length}(SG).
537 * NOTE: An implementation may be able to use a smaller number of
538 * DMA address/length pairs than there are SG table elements.
539 * (for example via virtual mapping capabilities)
540 * The routine returns the number of addr/length pairs actually
541 * used, at most nents.
543 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
544 * same here.
546 static int
547 xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
548 int nelems, enum dma_data_direction dir,
549 unsigned long attrs)
551 struct scatterlist *sg;
552 int i;
554 BUG_ON(dir == DMA_NONE);
556 for_each_sg(sgl, sg, nelems, i) {
557 phys_addr_t paddr = sg_phys(sg);
558 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
560 if (swiotlb_force == SWIOTLB_FORCE ||
561 xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
562 !dma_capable(hwdev, dev_addr, sg->length) ||
563 range_straddles_page_boundary(paddr, sg->length)) {
564 phys_addr_t map = swiotlb_tbl_map_single(hwdev,
565 start_dma_addr,
566 sg_phys(sg),
567 sg->length,
568 dir, attrs);
569 if (map == SWIOTLB_MAP_ERROR) {
570 dev_warn(hwdev, "swiotlb buffer is full\n");
571 /* Don't panic here, we expect map_sg users
572 to do proper error handling. */
573 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
574 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
575 attrs);
576 sg_dma_len(sgl) = 0;
577 return 0;
579 dev_addr = xen_phys_to_bus(map);
580 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
581 dev_addr,
582 map & ~PAGE_MASK,
583 sg->length,
584 dir,
585 attrs);
586 sg->dma_address = dev_addr;
587 } else {
588 /* we are not interested in the dma_addr returned by
589 * xen_dma_map_page, only in the potential cache flushes executed
590 * by the function. */
591 xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
592 dev_addr,
593 paddr & ~PAGE_MASK,
594 sg->length,
595 dir,
596 attrs);
597 sg->dma_address = dev_addr;
599 sg_dma_len(sg) = sg->length;
601 return nelems;
605 * Make physical memory consistent for a set of streaming mode DMA translations
606 * after a transfer.
608 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
609 * and usage.
611 static void
612 xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
613 int nelems, enum dma_data_direction dir,
614 enum dma_sync_target target)
616 struct scatterlist *sg;
617 int i;
619 for_each_sg(sgl, sg, nelems, i)
620 xen_swiotlb_sync_single(hwdev, sg->dma_address,
621 sg_dma_len(sg), dir, target);
624 static void
625 xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
626 int nelems, enum dma_data_direction dir)
628 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
631 static void
632 xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
633 int nelems, enum dma_data_direction dir)
635 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
639 * Return whether the given device DMA address mask can be supported
640 * properly. For example, if your device can only drive the low 24-bits
641 * during bus mastering, then you would pass 0x00ffffff as the mask to
642 * this function.
644 static int
645 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
647 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
651 * Create userspace mapping for the DMA-coherent memory.
652 * This function should be called with the pages from the current domain only,
653 * passing pages mapped from other domains would lead to memory corruption.
655 static int
656 xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
657 void *cpu_addr, dma_addr_t dma_addr, size_t size,
658 unsigned long attrs)
660 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
661 if (xen_get_dma_ops(dev)->mmap)
662 return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
663 dma_addr, size, attrs);
664 #endif
665 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
669 * This function should be called with the pages from the current domain only,
670 * passing pages mapped from other domains would lead to memory corruption.
672 static int
673 xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
674 void *cpu_addr, dma_addr_t handle, size_t size,
675 unsigned long attrs)
677 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
678 if (xen_get_dma_ops(dev)->get_sgtable) {
679 #if 0
681 * This check verifies that the page belongs to the current domain and
682 * is not one mapped from another domain.
683 * This check is for debug only, and should not go to production build
685 unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
686 BUG_ON (!page_is_ram(bfn));
687 #endif
688 return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
689 handle, size, attrs);
691 #endif
692 return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
695 static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr)
697 return dma_addr == XEN_SWIOTLB_ERROR_CODE;
700 const struct dma_map_ops xen_swiotlb_dma_ops = {
701 .alloc = xen_swiotlb_alloc_coherent,
702 .free = xen_swiotlb_free_coherent,
703 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
704 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
705 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
706 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
707 .map_sg = xen_swiotlb_map_sg_attrs,
708 .unmap_sg = xen_swiotlb_unmap_sg_attrs,
709 .map_page = xen_swiotlb_map_page,
710 .unmap_page = xen_swiotlb_unmap_page,
711 .dma_supported = xen_swiotlb_dma_supported,
712 .mmap = xen_swiotlb_dma_mmap,
713 .get_sgtable = xen_swiotlb_get_sgtable,
714 .mapping_error = xen_swiotlb_mapping_error,