3 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2.0 as published by
9 * the Free Software Foundation
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * PV guests under Xen are running in an non-contiguous memory architecture.
18 * When PCI pass-through is utilized, this necessitates an IOMMU for
19 * translating bus (DMA) to virtual and vice-versa and also providing a
20 * mechanism to have contiguous pages for device drivers operations (say DMA
23 * Specifically, under Xen the Linux idea of pages is an illusion. It
24 * assumes that pages start at zero and go up to the available memory. To
25 * help with that, the Linux Xen MMU provides a lookup mechanism to
26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
28 * memory is not contiguous. Xen hypervisor stitches memory for guests
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
31 * allocated in descending order (high to low), meaning the guest might
32 * never get any MFN's under the 4GB mark.
36 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
38 #include <linux/bootmem.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/export.h>
41 #include <xen/swiotlb-xen.h>
43 #include <xen/xen-ops.h>
44 #include <xen/hvc-console.h>
46 #include <asm/dma-mapping.h>
47 #include <asm/xen/page-coherent.h>
49 #include <trace/events/swiotlb.h>
51 * Used to do a quick range check in swiotlb_tbl_unmap_single and
52 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
57 static unsigned long dma_alloc_coherent_mask(struct device
*dev
,
60 unsigned long dma_mask
= 0;
62 dma_mask
= dev
->coherent_dma_mask
;
64 dma_mask
= (gfp
& GFP_DMA
) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
70 static char *xen_io_tlb_start
, *xen_io_tlb_end
;
71 static unsigned long xen_io_tlb_nslabs
;
73 * Quick lookup value of the bus address of the IOTLB.
76 static u64 start_dma_addr
;
79 * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
80 * can be 32bit when dma_addr_t is 64bit leading to a loss in
81 * information if the shift is done before casting to 64bit.
83 static inline dma_addr_t
xen_phys_to_bus(phys_addr_t paddr
)
85 unsigned long bfn
= pfn_to_bfn(XEN_PFN_DOWN(paddr
));
86 dma_addr_t dma
= (dma_addr_t
)bfn
<< XEN_PAGE_SHIFT
;
88 dma
|= paddr
& ~XEN_PAGE_MASK
;
93 static inline phys_addr_t
xen_bus_to_phys(dma_addr_t baddr
)
95 unsigned long xen_pfn
= bfn_to_pfn(XEN_PFN_DOWN(baddr
));
96 dma_addr_t dma
= (dma_addr_t
)xen_pfn
<< XEN_PAGE_SHIFT
;
97 phys_addr_t paddr
= dma
;
99 paddr
|= baddr
& ~XEN_PAGE_MASK
;
104 static inline dma_addr_t
xen_virt_to_bus(void *address
)
106 return xen_phys_to_bus(virt_to_phys(address
));
109 static int check_pages_physically_contiguous(unsigned long xen_pfn
,
113 unsigned long next_bfn
;
117 next_bfn
= pfn_to_bfn(xen_pfn
);
118 nr_pages
= (offset
+ length
+ XEN_PAGE_SIZE
-1) >> XEN_PAGE_SHIFT
;
120 for (i
= 1; i
< nr_pages
; i
++) {
121 if (pfn_to_bfn(++xen_pfn
) != ++next_bfn
)
127 static inline int range_straddles_page_boundary(phys_addr_t p
, size_t size
)
129 unsigned long xen_pfn
= XEN_PFN_DOWN(p
);
130 unsigned int offset
= p
& ~XEN_PAGE_MASK
;
132 if (offset
+ size
<= XEN_PAGE_SIZE
)
134 if (check_pages_physically_contiguous(xen_pfn
, offset
, size
))
139 static int is_xen_swiotlb_buffer(dma_addr_t dma_addr
)
141 unsigned long bfn
= XEN_PFN_DOWN(dma_addr
);
142 unsigned long xen_pfn
= bfn_to_local_pfn(bfn
);
143 phys_addr_t paddr
= XEN_PFN_PHYS(xen_pfn
);
145 /* If the address is outside our domain, it CAN
146 * have the same virtual address as another address
147 * in our domain. Therefore _only_ check address within our domain.
149 if (pfn_valid(PFN_DOWN(paddr
))) {
150 return paddr
>= virt_to_phys(xen_io_tlb_start
) &&
151 paddr
< virt_to_phys(xen_io_tlb_end
);
156 static int max_dma_bits
= 32;
159 xen_swiotlb_fixup(void *buf
, size_t size
, unsigned long nslabs
)
163 dma_addr_t dma_handle
;
164 phys_addr_t p
= virt_to_phys(buf
);
166 dma_bits
= get_order(IO_TLB_SEGSIZE
<< IO_TLB_SHIFT
) + PAGE_SHIFT
;
170 int slabs
= min(nslabs
- i
, (unsigned long)IO_TLB_SEGSIZE
);
173 rc
= xen_create_contiguous_region(
174 p
+ (i
<< IO_TLB_SHIFT
),
175 get_order(slabs
<< IO_TLB_SHIFT
),
176 dma_bits
, &dma_handle
);
177 } while (rc
&& dma_bits
++ < max_dma_bits
);
182 } while (i
< nslabs
);
185 static unsigned long xen_set_nslabs(unsigned long nr_tbl
)
188 xen_io_tlb_nslabs
= (64 * 1024 * 1024 >> IO_TLB_SHIFT
);
189 xen_io_tlb_nslabs
= ALIGN(xen_io_tlb_nslabs
, IO_TLB_SEGSIZE
);
191 xen_io_tlb_nslabs
= nr_tbl
;
193 return xen_io_tlb_nslabs
<< IO_TLB_SHIFT
;
196 enum xen_swiotlb_err
{
197 XEN_SWIOTLB_UNKNOWN
= 0,
202 static const char *xen_swiotlb_error(enum xen_swiotlb_err err
)
205 case XEN_SWIOTLB_ENOMEM
:
206 return "Cannot allocate Xen-SWIOTLB buffer\n";
207 case XEN_SWIOTLB_EFIXUP
:
208 return "Failed to get contiguous memory for DMA from Xen!\n"\
209 "You either: don't have the permissions, do not have"\
210 " enough free memory under 4GB, or the hypervisor memory"\
211 " is too fragmented!";
217 int __ref
xen_swiotlb_init(int verbose
, bool early
)
219 unsigned long bytes
, order
;
221 enum xen_swiotlb_err m_ret
= XEN_SWIOTLB_UNKNOWN
;
222 unsigned int repeat
= 3;
224 xen_io_tlb_nslabs
= swiotlb_nr_tbl();
226 bytes
= xen_set_nslabs(xen_io_tlb_nslabs
);
227 order
= get_order(xen_io_tlb_nslabs
<< IO_TLB_SHIFT
);
229 * Get IO TLB memory from any location.
232 xen_io_tlb_start
= alloc_bootmem_pages(PAGE_ALIGN(bytes
));
234 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
235 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
236 while ((SLABS_PER_PAGE
<< order
) > IO_TLB_MIN_SLABS
) {
237 xen_io_tlb_start
= (void *)xen_get_swiotlb_free_pages(order
);
238 if (xen_io_tlb_start
)
242 if (order
!= get_order(bytes
)) {
243 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
244 (PAGE_SIZE
<< order
) >> 20);
245 xen_io_tlb_nslabs
= SLABS_PER_PAGE
<< order
;
246 bytes
= xen_io_tlb_nslabs
<< IO_TLB_SHIFT
;
249 if (!xen_io_tlb_start
) {
250 m_ret
= XEN_SWIOTLB_ENOMEM
;
253 xen_io_tlb_end
= xen_io_tlb_start
+ bytes
;
255 * And replace that memory with pages under 4GB.
257 rc
= xen_swiotlb_fixup(xen_io_tlb_start
,
262 free_bootmem(__pa(xen_io_tlb_start
), PAGE_ALIGN(bytes
));
264 free_pages((unsigned long)xen_io_tlb_start
, order
);
265 xen_io_tlb_start
= NULL
;
267 m_ret
= XEN_SWIOTLB_EFIXUP
;
270 start_dma_addr
= xen_virt_to_bus(xen_io_tlb_start
);
272 if (swiotlb_init_with_tbl(xen_io_tlb_start
, xen_io_tlb_nslabs
,
274 panic("Cannot allocate SWIOTLB buffer");
277 rc
= swiotlb_late_init_with_tbl(xen_io_tlb_start
, xen_io_tlb_nslabs
);
280 swiotlb_set_max_segment(PAGE_SIZE
);
285 xen_io_tlb_nslabs
= max(1024UL, /* Min is 2MB */
286 (xen_io_tlb_nslabs
>> 1));
287 pr_info("Lowering to %luMB\n",
288 (xen_io_tlb_nslabs
<< IO_TLB_SHIFT
) >> 20);
291 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret
), rc
);
293 panic("%s (rc:%d)", xen_swiotlb_error(m_ret
), rc
);
295 free_pages((unsigned long)xen_io_tlb_start
, order
);
299 xen_swiotlb_alloc_coherent(struct device
*hwdev
, size_t size
,
300 dma_addr_t
*dma_handle
, gfp_t flags
,
304 int order
= get_order(size
);
305 u64 dma_mask
= DMA_BIT_MASK(32);
310 * Ignore region specifiers - the kernel's ideas of
311 * pseudo-phys memory layout has nothing to do with the
312 * machine physical layout. We can't allocate highmem
313 * because we can't return a pointer to it.
315 flags
&= ~(__GFP_DMA
| __GFP_HIGHMEM
);
317 /* On ARM this function returns an ioremap'ped virtual address for
318 * which virt_to_phys doesn't return the corresponding physical
319 * address. In fact on ARM virt_to_phys only works for kernel direct
320 * mapped RAM memory. Also see comment below.
322 ret
= xen_alloc_coherent_pages(hwdev
, size
, dma_handle
, flags
, attrs
);
327 if (hwdev
&& hwdev
->coherent_dma_mask
)
328 dma_mask
= dma_alloc_coherent_mask(hwdev
, flags
);
330 /* At this point dma_handle is the physical address, next we are
331 * going to set it to the machine address.
332 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
335 dev_addr
= xen_phys_to_bus(phys
);
336 if (((dev_addr
+ size
- 1 <= dma_mask
)) &&
337 !range_straddles_page_boundary(phys
, size
))
338 *dma_handle
= dev_addr
;
340 if (xen_create_contiguous_region(phys
, order
,
341 fls64(dma_mask
), dma_handle
) != 0) {
342 xen_free_coherent_pages(hwdev
, size
, ret
, (dma_addr_t
)phys
, attrs
);
346 memset(ret
, 0, size
);
349 EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent
);
352 xen_swiotlb_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
353 dma_addr_t dev_addr
, unsigned long attrs
)
355 int order
= get_order(size
);
357 u64 dma_mask
= DMA_BIT_MASK(32);
359 if (hwdev
&& hwdev
->coherent_dma_mask
)
360 dma_mask
= hwdev
->coherent_dma_mask
;
362 /* do not use virt_to_phys because on ARM it doesn't return you the
363 * physical address */
364 phys
= xen_bus_to_phys(dev_addr
);
366 if (((dev_addr
+ size
- 1 > dma_mask
)) ||
367 range_straddles_page_boundary(phys
, size
))
368 xen_destroy_contiguous_region(phys
, order
);
370 xen_free_coherent_pages(hwdev
, size
, vaddr
, (dma_addr_t
)phys
, attrs
);
372 EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent
);
376 * Map a single buffer of the indicated size for DMA in streaming mode. The
377 * physical address to use is returned.
379 * Once the device is given the dma address, the device owns this memory until
380 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
382 dma_addr_t
xen_swiotlb_map_page(struct device
*dev
, struct page
*page
,
383 unsigned long offset
, size_t size
,
384 enum dma_data_direction dir
,
387 phys_addr_t map
, phys
= page_to_phys(page
) + offset
;
388 dma_addr_t dev_addr
= xen_phys_to_bus(phys
);
390 BUG_ON(dir
== DMA_NONE
);
392 * If the address happens to be in the device's DMA window,
393 * we can safely return the device addr and not worry about bounce
396 if (dma_capable(dev
, dev_addr
, size
) &&
397 !range_straddles_page_boundary(phys
, size
) &&
398 !xen_arch_need_swiotlb(dev
, phys
, dev_addr
) &&
399 (swiotlb_force
!= SWIOTLB_FORCE
)) {
400 /* we are not interested in the dma_addr returned by
401 * xen_dma_map_page, only in the potential cache flushes executed
402 * by the function. */
403 xen_dma_map_page(dev
, page
, dev_addr
, offset
, size
, dir
, attrs
);
408 * Oh well, have to allocate and map a bounce buffer.
410 trace_swiotlb_bounced(dev
, dev_addr
, size
, swiotlb_force
);
412 map
= swiotlb_tbl_map_single(dev
, start_dma_addr
, phys
, size
, dir
,
414 if (map
== SWIOTLB_MAP_ERROR
)
415 return DMA_ERROR_CODE
;
417 dev_addr
= xen_phys_to_bus(map
);
418 xen_dma_map_page(dev
, pfn_to_page(map
>> PAGE_SHIFT
),
419 dev_addr
, map
& ~PAGE_MASK
, size
, dir
, attrs
);
422 * Ensure that the address returned is DMA'ble
424 if (dma_capable(dev
, dev_addr
, size
))
427 attrs
|= DMA_ATTR_SKIP_CPU_SYNC
;
428 swiotlb_tbl_unmap_single(dev
, map
, size
, dir
, attrs
);
430 return DMA_ERROR_CODE
;
432 EXPORT_SYMBOL_GPL(xen_swiotlb_map_page
);
435 * Unmap a single streaming mode DMA translation. The dma_addr and size must
436 * match what was provided for in a previous xen_swiotlb_map_page call. All
437 * other usages are undefined.
439 * After this call, reads by the cpu to the buffer are guaranteed to see
440 * whatever the device wrote there.
442 static void xen_unmap_single(struct device
*hwdev
, dma_addr_t dev_addr
,
443 size_t size
, enum dma_data_direction dir
,
446 phys_addr_t paddr
= xen_bus_to_phys(dev_addr
);
448 BUG_ON(dir
== DMA_NONE
);
450 xen_dma_unmap_page(hwdev
, dev_addr
, size
, dir
, attrs
);
452 /* NOTE: We use dev_addr here, not paddr! */
453 if (is_xen_swiotlb_buffer(dev_addr
)) {
454 swiotlb_tbl_unmap_single(hwdev
, paddr
, size
, dir
, attrs
);
458 if (dir
!= DMA_FROM_DEVICE
)
462 * phys_to_virt doesn't work with hihgmem page but we could
463 * call dma_mark_clean() with hihgmem page here. However, we
464 * are fine since dma_mark_clean() is null on POWERPC. We can
465 * make dma_mark_clean() take a physical address if necessary.
467 dma_mark_clean(phys_to_virt(paddr
), size
);
470 void xen_swiotlb_unmap_page(struct device
*hwdev
, dma_addr_t dev_addr
,
471 size_t size
, enum dma_data_direction dir
,
474 xen_unmap_single(hwdev
, dev_addr
, size
, dir
, attrs
);
476 EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page
);
479 * Make physical memory consistent for a single streaming mode DMA translation
482 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
483 * using the cpu, yet do not wish to teardown the dma mapping, you must
484 * call this function before doing so. At the next point you give the dma
485 * address back to the card, you must first perform a
486 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
489 xen_swiotlb_sync_single(struct device
*hwdev
, dma_addr_t dev_addr
,
490 size_t size
, enum dma_data_direction dir
,
491 enum dma_sync_target target
)
493 phys_addr_t paddr
= xen_bus_to_phys(dev_addr
);
495 BUG_ON(dir
== DMA_NONE
);
497 if (target
== SYNC_FOR_CPU
)
498 xen_dma_sync_single_for_cpu(hwdev
, dev_addr
, size
, dir
);
500 /* NOTE: We use dev_addr here, not paddr! */
501 if (is_xen_swiotlb_buffer(dev_addr
))
502 swiotlb_tbl_sync_single(hwdev
, paddr
, size
, dir
, target
);
504 if (target
== SYNC_FOR_DEVICE
)
505 xen_dma_sync_single_for_device(hwdev
, dev_addr
, size
, dir
);
507 if (dir
!= DMA_FROM_DEVICE
)
510 dma_mark_clean(phys_to_virt(paddr
), size
);
514 xen_swiotlb_sync_single_for_cpu(struct device
*hwdev
, dma_addr_t dev_addr
,
515 size_t size
, enum dma_data_direction dir
)
517 xen_swiotlb_sync_single(hwdev
, dev_addr
, size
, dir
, SYNC_FOR_CPU
);
519 EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu
);
522 xen_swiotlb_sync_single_for_device(struct device
*hwdev
, dma_addr_t dev_addr
,
523 size_t size
, enum dma_data_direction dir
)
525 xen_swiotlb_sync_single(hwdev
, dev_addr
, size
, dir
, SYNC_FOR_DEVICE
);
527 EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device
);
530 * Map a set of buffers described by scatterlist in streaming mode for DMA.
531 * This is the scatter-gather version of the above xen_swiotlb_map_page
532 * interface. Here the scatter gather list elements are each tagged with the
533 * appropriate dma address and length. They are obtained via
534 * sg_dma_{address,length}(SG).
536 * NOTE: An implementation may be able to use a smaller number of
537 * DMA address/length pairs than there are SG table elements.
538 * (for example via virtual mapping capabilities)
539 * The routine returns the number of addr/length pairs actually
540 * used, at most nents.
542 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
546 xen_swiotlb_map_sg_attrs(struct device
*hwdev
, struct scatterlist
*sgl
,
547 int nelems
, enum dma_data_direction dir
,
550 struct scatterlist
*sg
;
553 BUG_ON(dir
== DMA_NONE
);
555 for_each_sg(sgl
, sg
, nelems
, i
) {
556 phys_addr_t paddr
= sg_phys(sg
);
557 dma_addr_t dev_addr
= xen_phys_to_bus(paddr
);
559 if (swiotlb_force
== SWIOTLB_FORCE
||
560 xen_arch_need_swiotlb(hwdev
, paddr
, dev_addr
) ||
561 !dma_capable(hwdev
, dev_addr
, sg
->length
) ||
562 range_straddles_page_boundary(paddr
, sg
->length
)) {
563 phys_addr_t map
= swiotlb_tbl_map_single(hwdev
,
568 if (map
== SWIOTLB_MAP_ERROR
) {
569 dev_warn(hwdev
, "swiotlb buffer is full\n");
570 /* Don't panic here, we expect map_sg users
571 to do proper error handling. */
572 attrs
|= DMA_ATTR_SKIP_CPU_SYNC
;
573 xen_swiotlb_unmap_sg_attrs(hwdev
, sgl
, i
, dir
,
578 dev_addr
= xen_phys_to_bus(map
);
579 xen_dma_map_page(hwdev
, pfn_to_page(map
>> PAGE_SHIFT
),
585 sg
->dma_address
= dev_addr
;
587 /* we are not interested in the dma_addr returned by
588 * xen_dma_map_page, only in the potential cache flushes executed
589 * by the function. */
590 xen_dma_map_page(hwdev
, pfn_to_page(paddr
>> PAGE_SHIFT
),
596 sg
->dma_address
= dev_addr
;
598 sg_dma_len(sg
) = sg
->length
;
602 EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs
);
605 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
606 * concerning calls here are the same as for swiotlb_unmap_page() above.
609 xen_swiotlb_unmap_sg_attrs(struct device
*hwdev
, struct scatterlist
*sgl
,
610 int nelems
, enum dma_data_direction dir
,
613 struct scatterlist
*sg
;
616 BUG_ON(dir
== DMA_NONE
);
618 for_each_sg(sgl
, sg
, nelems
, i
)
619 xen_unmap_single(hwdev
, sg
->dma_address
, sg_dma_len(sg
), dir
, attrs
);
622 EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs
);
625 * Make physical memory consistent for a set of streaming mode DMA translations
628 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
632 xen_swiotlb_sync_sg(struct device
*hwdev
, struct scatterlist
*sgl
,
633 int nelems
, enum dma_data_direction dir
,
634 enum dma_sync_target target
)
636 struct scatterlist
*sg
;
639 for_each_sg(sgl
, sg
, nelems
, i
)
640 xen_swiotlb_sync_single(hwdev
, sg
->dma_address
,
641 sg_dma_len(sg
), dir
, target
);
645 xen_swiotlb_sync_sg_for_cpu(struct device
*hwdev
, struct scatterlist
*sg
,
646 int nelems
, enum dma_data_direction dir
)
648 xen_swiotlb_sync_sg(hwdev
, sg
, nelems
, dir
, SYNC_FOR_CPU
);
650 EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu
);
653 xen_swiotlb_sync_sg_for_device(struct device
*hwdev
, struct scatterlist
*sg
,
654 int nelems
, enum dma_data_direction dir
)
656 xen_swiotlb_sync_sg(hwdev
, sg
, nelems
, dir
, SYNC_FOR_DEVICE
);
658 EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device
);
661 * Return whether the given device DMA address mask can be supported
662 * properly. For example, if your device can only drive the low 24-bits
663 * during bus mastering, then you would pass 0x00ffffff as the mask to
667 xen_swiotlb_dma_supported(struct device
*hwdev
, u64 mask
)
669 return xen_virt_to_bus(xen_io_tlb_end
- 1) <= mask
;
671 EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported
);
674 xen_swiotlb_set_dma_mask(struct device
*dev
, u64 dma_mask
)
676 if (!dev
->dma_mask
|| !xen_swiotlb_dma_supported(dev
, dma_mask
))
679 *dev
->dma_mask
= dma_mask
;
683 EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask
);
686 * Create userspace mapping for the DMA-coherent memory.
687 * This function should be called with the pages from the current domain only,
688 * passing pages mapped from other domains would lead to memory corruption.
691 xen_swiotlb_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
692 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
695 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
696 if (__generic_dma_ops(dev
)->mmap
)
697 return __generic_dma_ops(dev
)->mmap(dev
, vma
, cpu_addr
,
698 dma_addr
, size
, attrs
);
700 return dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
702 EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap
);
705 * This function should be called with the pages from the current domain only,
706 * passing pages mapped from other domains would lead to memory corruption.
709 xen_swiotlb_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
710 void *cpu_addr
, dma_addr_t handle
, size_t size
,
713 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
714 if (__generic_dma_ops(dev
)->get_sgtable
) {
717 * This check verifies that the page belongs to the current domain and
718 * is not one mapped from another domain.
719 * This check is for debug only, and should not go to production build
721 unsigned long bfn
= PHYS_PFN(dma_to_phys(dev
, handle
));
722 BUG_ON (!page_is_ram(bfn
));
724 return __generic_dma_ops(dev
)->get_sgtable(dev
, sgt
, cpu_addr
,
725 handle
, size
, attrs
);
728 return dma_common_get_sgtable(dev
, sgt
, cpu_addr
, handle
, size
);
730 EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable
);