3 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2.0 as published by
9 * the Free Software Foundation
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * PV guests under Xen are running in an non-contiguous memory architecture.
18 * When PCI pass-through is utilized, this necessitates an IOMMU for
19 * translating bus (DMA) to virtual and vice-versa and also providing a
20 * mechanism to have contiguous pages for device drivers operations (say DMA
23 * Specifically, under Xen the Linux idea of pages is an illusion. It
24 * assumes that pages start at zero and go up to the available memory. To
25 * help with that, the Linux Xen MMU provides a lookup mechanism to
26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
28 * memory is not contiguous. Xen hypervisor stitches memory for guests
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
31 * allocated in descending order (high to low), meaning the guest might
32 * never get any MFN's under the 4GB mark.
36 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
38 #include <linux/memblock.h>
39 #include <linux/dma-direct.h>
40 #include <linux/export.h>
41 #include <xen/swiotlb-xen.h>
43 #include <xen/xen-ops.h>
44 #include <xen/hvc-console.h>
46 #include <asm/dma-mapping.h>
47 #include <asm/xen/page-coherent.h>
49 #include <trace/events/swiotlb.h>
51 * Used to do a quick range check in swiotlb_tbl_unmap_single and
52 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
56 static char *xen_io_tlb_start
, *xen_io_tlb_end
;
57 static unsigned long xen_io_tlb_nslabs
;
59 * Quick lookup value of the bus address of the IOTLB.
62 static u64 start_dma_addr
;
65 * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
66 * can be 32bit when dma_addr_t is 64bit leading to a loss in
67 * information if the shift is done before casting to 64bit.
69 static inline dma_addr_t
xen_phys_to_bus(phys_addr_t paddr
)
71 unsigned long bfn
= pfn_to_bfn(XEN_PFN_DOWN(paddr
));
72 dma_addr_t dma
= (dma_addr_t
)bfn
<< XEN_PAGE_SHIFT
;
74 dma
|= paddr
& ~XEN_PAGE_MASK
;
79 static inline phys_addr_t
xen_bus_to_phys(dma_addr_t baddr
)
81 unsigned long xen_pfn
= bfn_to_pfn(XEN_PFN_DOWN(baddr
));
82 dma_addr_t dma
= (dma_addr_t
)xen_pfn
<< XEN_PAGE_SHIFT
;
83 phys_addr_t paddr
= dma
;
85 paddr
|= baddr
& ~XEN_PAGE_MASK
;
90 static inline dma_addr_t
xen_virt_to_bus(void *address
)
92 return xen_phys_to_bus(virt_to_phys(address
));
95 static int check_pages_physically_contiguous(unsigned long xen_pfn
,
99 unsigned long next_bfn
;
103 next_bfn
= pfn_to_bfn(xen_pfn
);
104 nr_pages
= (offset
+ length
+ XEN_PAGE_SIZE
-1) >> XEN_PAGE_SHIFT
;
106 for (i
= 1; i
< nr_pages
; i
++) {
107 if (pfn_to_bfn(++xen_pfn
) != ++next_bfn
)
113 static inline int range_straddles_page_boundary(phys_addr_t p
, size_t size
)
115 unsigned long xen_pfn
= XEN_PFN_DOWN(p
);
116 unsigned int offset
= p
& ~XEN_PAGE_MASK
;
118 if (offset
+ size
<= XEN_PAGE_SIZE
)
120 if (check_pages_physically_contiguous(xen_pfn
, offset
, size
))
125 static int is_xen_swiotlb_buffer(dma_addr_t dma_addr
)
127 unsigned long bfn
= XEN_PFN_DOWN(dma_addr
);
128 unsigned long xen_pfn
= bfn_to_local_pfn(bfn
);
129 phys_addr_t paddr
= XEN_PFN_PHYS(xen_pfn
);
131 /* If the address is outside our domain, it CAN
132 * have the same virtual address as another address
133 * in our domain. Therefore _only_ check address within our domain.
135 if (pfn_valid(PFN_DOWN(paddr
))) {
136 return paddr
>= virt_to_phys(xen_io_tlb_start
) &&
137 paddr
< virt_to_phys(xen_io_tlb_end
);
142 static int max_dma_bits
= 32;
145 xen_swiotlb_fixup(void *buf
, size_t size
, unsigned long nslabs
)
149 dma_addr_t dma_handle
;
150 phys_addr_t p
= virt_to_phys(buf
);
152 dma_bits
= get_order(IO_TLB_SEGSIZE
<< IO_TLB_SHIFT
) + PAGE_SHIFT
;
156 int slabs
= min(nslabs
- i
, (unsigned long)IO_TLB_SEGSIZE
);
159 rc
= xen_create_contiguous_region(
160 p
+ (i
<< IO_TLB_SHIFT
),
161 get_order(slabs
<< IO_TLB_SHIFT
),
162 dma_bits
, &dma_handle
);
163 } while (rc
&& dma_bits
++ < max_dma_bits
);
168 } while (i
< nslabs
);
171 static unsigned long xen_set_nslabs(unsigned long nr_tbl
)
174 xen_io_tlb_nslabs
= (64 * 1024 * 1024 >> IO_TLB_SHIFT
);
175 xen_io_tlb_nslabs
= ALIGN(xen_io_tlb_nslabs
, IO_TLB_SEGSIZE
);
177 xen_io_tlb_nslabs
= nr_tbl
;
179 return xen_io_tlb_nslabs
<< IO_TLB_SHIFT
;
182 enum xen_swiotlb_err
{
183 XEN_SWIOTLB_UNKNOWN
= 0,
188 static const char *xen_swiotlb_error(enum xen_swiotlb_err err
)
191 case XEN_SWIOTLB_ENOMEM
:
192 return "Cannot allocate Xen-SWIOTLB buffer\n";
193 case XEN_SWIOTLB_EFIXUP
:
194 return "Failed to get contiguous memory for DMA from Xen!\n"\
195 "You either: don't have the permissions, do not have"\
196 " enough free memory under 4GB, or the hypervisor memory"\
197 " is too fragmented!";
203 int __ref
xen_swiotlb_init(int verbose
, bool early
)
205 unsigned long bytes
, order
;
207 enum xen_swiotlb_err m_ret
= XEN_SWIOTLB_UNKNOWN
;
208 unsigned int repeat
= 3;
210 xen_io_tlb_nslabs
= swiotlb_nr_tbl();
212 bytes
= xen_set_nslabs(xen_io_tlb_nslabs
);
213 order
= get_order(xen_io_tlb_nslabs
<< IO_TLB_SHIFT
);
215 * Get IO TLB memory from any location.
218 xen_io_tlb_start
= memblock_alloc(PAGE_ALIGN(bytes
),
220 if (!xen_io_tlb_start
)
221 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
222 __func__
, PAGE_ALIGN(bytes
), PAGE_SIZE
);
224 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
225 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
226 while ((SLABS_PER_PAGE
<< order
) > IO_TLB_MIN_SLABS
) {
227 xen_io_tlb_start
= (void *)xen_get_swiotlb_free_pages(order
);
228 if (xen_io_tlb_start
)
232 if (order
!= get_order(bytes
)) {
233 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
234 (PAGE_SIZE
<< order
) >> 20);
235 xen_io_tlb_nslabs
= SLABS_PER_PAGE
<< order
;
236 bytes
= xen_io_tlb_nslabs
<< IO_TLB_SHIFT
;
239 if (!xen_io_tlb_start
) {
240 m_ret
= XEN_SWIOTLB_ENOMEM
;
243 xen_io_tlb_end
= xen_io_tlb_start
+ bytes
;
245 * And replace that memory with pages under 4GB.
247 rc
= xen_swiotlb_fixup(xen_io_tlb_start
,
252 memblock_free(__pa(xen_io_tlb_start
),
255 free_pages((unsigned long)xen_io_tlb_start
, order
);
256 xen_io_tlb_start
= NULL
;
258 m_ret
= XEN_SWIOTLB_EFIXUP
;
261 start_dma_addr
= xen_virt_to_bus(xen_io_tlb_start
);
263 if (swiotlb_init_with_tbl(xen_io_tlb_start
, xen_io_tlb_nslabs
,
265 panic("Cannot allocate SWIOTLB buffer");
268 rc
= swiotlb_late_init_with_tbl(xen_io_tlb_start
, xen_io_tlb_nslabs
);
271 swiotlb_set_max_segment(PAGE_SIZE
);
276 xen_io_tlb_nslabs
= max(1024UL, /* Min is 2MB */
277 (xen_io_tlb_nslabs
>> 1));
278 pr_info("Lowering to %luMB\n",
279 (xen_io_tlb_nslabs
<< IO_TLB_SHIFT
) >> 20);
282 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret
), rc
);
284 panic("%s (rc:%d)", xen_swiotlb_error(m_ret
), rc
);
286 free_pages((unsigned long)xen_io_tlb_start
, order
);
291 xen_swiotlb_alloc_coherent(struct device
*hwdev
, size_t size
,
292 dma_addr_t
*dma_handle
, gfp_t flags
,
296 int order
= get_order(size
);
297 u64 dma_mask
= DMA_BIT_MASK(32);
302 * Ignore region specifiers - the kernel's ideas of
303 * pseudo-phys memory layout has nothing to do with the
304 * machine physical layout. We can't allocate highmem
305 * because we can't return a pointer to it.
307 flags
&= ~(__GFP_DMA
| __GFP_HIGHMEM
);
309 /* Convert the size to actually allocated. */
310 size
= 1UL << (order
+ XEN_PAGE_SHIFT
);
312 /* On ARM this function returns an ioremap'ped virtual address for
313 * which virt_to_phys doesn't return the corresponding physical
314 * address. In fact on ARM virt_to_phys only works for kernel direct
315 * mapped RAM memory. Also see comment below.
317 ret
= xen_alloc_coherent_pages(hwdev
, size
, dma_handle
, flags
, attrs
);
322 if (hwdev
&& hwdev
->coherent_dma_mask
)
323 dma_mask
= hwdev
->coherent_dma_mask
;
325 /* At this point dma_handle is the physical address, next we are
326 * going to set it to the machine address.
327 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
330 dev_addr
= xen_phys_to_bus(phys
);
331 if (((dev_addr
+ size
- 1 <= dma_mask
)) &&
332 !range_straddles_page_boundary(phys
, size
))
333 *dma_handle
= dev_addr
;
335 if (xen_create_contiguous_region(phys
, order
,
336 fls64(dma_mask
), dma_handle
) != 0) {
337 xen_free_coherent_pages(hwdev
, size
, ret
, (dma_addr_t
)phys
, attrs
);
341 memset(ret
, 0, size
);
346 xen_swiotlb_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
347 dma_addr_t dev_addr
, unsigned long attrs
)
349 int order
= get_order(size
);
351 u64 dma_mask
= DMA_BIT_MASK(32);
353 if (hwdev
&& hwdev
->coherent_dma_mask
)
354 dma_mask
= hwdev
->coherent_dma_mask
;
356 /* do not use virt_to_phys because on ARM it doesn't return you the
357 * physical address */
358 phys
= xen_bus_to_phys(dev_addr
);
360 /* Convert the size to actually allocated. */
361 size
= 1UL << (order
+ XEN_PAGE_SHIFT
);
363 if (((dev_addr
+ size
- 1 <= dma_mask
)) ||
364 range_straddles_page_boundary(phys
, size
))
365 xen_destroy_contiguous_region(phys
, order
);
367 xen_free_coherent_pages(hwdev
, size
, vaddr
, (dma_addr_t
)phys
, attrs
);
371 * Map a single buffer of the indicated size for DMA in streaming mode. The
372 * physical address to use is returned.
374 * Once the device is given the dma address, the device owns this memory until
375 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
377 static dma_addr_t
xen_swiotlb_map_page(struct device
*dev
, struct page
*page
,
378 unsigned long offset
, size_t size
,
379 enum dma_data_direction dir
,
382 phys_addr_t map
, phys
= page_to_phys(page
) + offset
;
383 dma_addr_t dev_addr
= xen_phys_to_bus(phys
);
385 BUG_ON(dir
== DMA_NONE
);
387 * If the address happens to be in the device's DMA window,
388 * we can safely return the device addr and not worry about bounce
391 if (dma_capable(dev
, dev_addr
, size
) &&
392 !range_straddles_page_boundary(phys
, size
) &&
393 !xen_arch_need_swiotlb(dev
, phys
, dev_addr
) &&
394 (swiotlb_force
!= SWIOTLB_FORCE
)) {
395 /* we are not interested in the dma_addr returned by
396 * xen_dma_map_page, only in the potential cache flushes executed
397 * by the function. */
398 xen_dma_map_page(dev
, page
, dev_addr
, offset
, size
, dir
, attrs
);
403 * Oh well, have to allocate and map a bounce buffer.
405 trace_swiotlb_bounced(dev
, dev_addr
, size
, swiotlb_force
);
407 map
= swiotlb_tbl_map_single(dev
, start_dma_addr
, phys
, size
, dir
,
409 if (map
== DMA_MAPPING_ERROR
)
410 return DMA_MAPPING_ERROR
;
412 dev_addr
= xen_phys_to_bus(map
);
413 xen_dma_map_page(dev
, pfn_to_page(map
>> PAGE_SHIFT
),
414 dev_addr
, map
& ~PAGE_MASK
, size
, dir
, attrs
);
417 * Ensure that the address returned is DMA'ble
419 if (dma_capable(dev
, dev_addr
, size
))
422 attrs
|= DMA_ATTR_SKIP_CPU_SYNC
;
423 swiotlb_tbl_unmap_single(dev
, map
, size
, dir
, attrs
);
425 return DMA_MAPPING_ERROR
;
429 * Unmap a single streaming mode DMA translation. The dma_addr and size must
430 * match what was provided for in a previous xen_swiotlb_map_page call. All
431 * other usages are undefined.
433 * After this call, reads by the cpu to the buffer are guaranteed to see
434 * whatever the device wrote there.
436 static void xen_unmap_single(struct device
*hwdev
, dma_addr_t dev_addr
,
437 size_t size
, enum dma_data_direction dir
,
440 phys_addr_t paddr
= xen_bus_to_phys(dev_addr
);
442 BUG_ON(dir
== DMA_NONE
);
444 xen_dma_unmap_page(hwdev
, dev_addr
, size
, dir
, attrs
);
446 /* NOTE: We use dev_addr here, not paddr! */
447 if (is_xen_swiotlb_buffer(dev_addr
))
448 swiotlb_tbl_unmap_single(hwdev
, paddr
, size
, dir
, attrs
);
451 static void xen_swiotlb_unmap_page(struct device
*hwdev
, dma_addr_t dev_addr
,
452 size_t size
, enum dma_data_direction dir
,
455 xen_unmap_single(hwdev
, dev_addr
, size
, dir
, attrs
);
459 * Make physical memory consistent for a single streaming mode DMA translation
462 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
463 * using the cpu, yet do not wish to teardown the dma mapping, you must
464 * call this function before doing so. At the next point you give the dma
465 * address back to the card, you must first perform a
466 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
469 xen_swiotlb_sync_single(struct device
*hwdev
, dma_addr_t dev_addr
,
470 size_t size
, enum dma_data_direction dir
,
471 enum dma_sync_target target
)
473 phys_addr_t paddr
= xen_bus_to_phys(dev_addr
);
475 BUG_ON(dir
== DMA_NONE
);
477 if (target
== SYNC_FOR_CPU
)
478 xen_dma_sync_single_for_cpu(hwdev
, dev_addr
, size
, dir
);
480 /* NOTE: We use dev_addr here, not paddr! */
481 if (is_xen_swiotlb_buffer(dev_addr
))
482 swiotlb_tbl_sync_single(hwdev
, paddr
, size
, dir
, target
);
484 if (target
== SYNC_FOR_DEVICE
)
485 xen_dma_sync_single_for_device(hwdev
, dev_addr
, size
, dir
);
489 xen_swiotlb_sync_single_for_cpu(struct device
*hwdev
, dma_addr_t dev_addr
,
490 size_t size
, enum dma_data_direction dir
)
492 xen_swiotlb_sync_single(hwdev
, dev_addr
, size
, dir
, SYNC_FOR_CPU
);
496 xen_swiotlb_sync_single_for_device(struct device
*hwdev
, dma_addr_t dev_addr
,
497 size_t size
, enum dma_data_direction dir
)
499 xen_swiotlb_sync_single(hwdev
, dev_addr
, size
, dir
, SYNC_FOR_DEVICE
);
503 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
504 * concerning calls here are the same as for swiotlb_unmap_page() above.
507 xen_swiotlb_unmap_sg_attrs(struct device
*hwdev
, struct scatterlist
*sgl
,
508 int nelems
, enum dma_data_direction dir
,
511 struct scatterlist
*sg
;
514 BUG_ON(dir
== DMA_NONE
);
516 for_each_sg(sgl
, sg
, nelems
, i
)
517 xen_unmap_single(hwdev
, sg
->dma_address
, sg_dma_len(sg
), dir
, attrs
);
522 * Map a set of buffers described by scatterlist in streaming mode for DMA.
523 * This is the scatter-gather version of the above xen_swiotlb_map_page
524 * interface. Here the scatter gather list elements are each tagged with the
525 * appropriate dma address and length. They are obtained via
526 * sg_dma_{address,length}(SG).
528 * NOTE: An implementation may be able to use a smaller number of
529 * DMA address/length pairs than there are SG table elements.
530 * (for example via virtual mapping capabilities)
531 * The routine returns the number of addr/length pairs actually
532 * used, at most nents.
534 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
538 xen_swiotlb_map_sg_attrs(struct device
*hwdev
, struct scatterlist
*sgl
,
539 int nelems
, enum dma_data_direction dir
,
542 struct scatterlist
*sg
;
545 BUG_ON(dir
== DMA_NONE
);
547 for_each_sg(sgl
, sg
, nelems
, i
) {
548 phys_addr_t paddr
= sg_phys(sg
);
549 dma_addr_t dev_addr
= xen_phys_to_bus(paddr
);
551 if (swiotlb_force
== SWIOTLB_FORCE
||
552 xen_arch_need_swiotlb(hwdev
, paddr
, dev_addr
) ||
553 !dma_capable(hwdev
, dev_addr
, sg
->length
) ||
554 range_straddles_page_boundary(paddr
, sg
->length
)) {
555 phys_addr_t map
= swiotlb_tbl_map_single(hwdev
,
560 if (map
== DMA_MAPPING_ERROR
) {
561 dev_warn(hwdev
, "swiotlb buffer is full\n");
562 /* Don't panic here, we expect map_sg users
563 to do proper error handling. */
564 attrs
|= DMA_ATTR_SKIP_CPU_SYNC
;
565 xen_swiotlb_unmap_sg_attrs(hwdev
, sgl
, i
, dir
,
570 dev_addr
= xen_phys_to_bus(map
);
571 xen_dma_map_page(hwdev
, pfn_to_page(map
>> PAGE_SHIFT
),
577 sg
->dma_address
= dev_addr
;
579 /* we are not interested in the dma_addr returned by
580 * xen_dma_map_page, only in the potential cache flushes executed
581 * by the function. */
582 xen_dma_map_page(hwdev
, pfn_to_page(paddr
>> PAGE_SHIFT
),
588 sg
->dma_address
= dev_addr
;
590 sg_dma_len(sg
) = sg
->length
;
596 * Make physical memory consistent for a set of streaming mode DMA translations
599 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
603 xen_swiotlb_sync_sg(struct device
*hwdev
, struct scatterlist
*sgl
,
604 int nelems
, enum dma_data_direction dir
,
605 enum dma_sync_target target
)
607 struct scatterlist
*sg
;
610 for_each_sg(sgl
, sg
, nelems
, i
)
611 xen_swiotlb_sync_single(hwdev
, sg
->dma_address
,
612 sg_dma_len(sg
), dir
, target
);
616 xen_swiotlb_sync_sg_for_cpu(struct device
*hwdev
, struct scatterlist
*sg
,
617 int nelems
, enum dma_data_direction dir
)
619 xen_swiotlb_sync_sg(hwdev
, sg
, nelems
, dir
, SYNC_FOR_CPU
);
623 xen_swiotlb_sync_sg_for_device(struct device
*hwdev
, struct scatterlist
*sg
,
624 int nelems
, enum dma_data_direction dir
)
626 xen_swiotlb_sync_sg(hwdev
, sg
, nelems
, dir
, SYNC_FOR_DEVICE
);
630 * Return whether the given device DMA address mask can be supported
631 * properly. For example, if your device can only drive the low 24-bits
632 * during bus mastering, then you would pass 0x00ffffff as the mask to
636 xen_swiotlb_dma_supported(struct device
*hwdev
, u64 mask
)
638 return xen_virt_to_bus(xen_io_tlb_end
- 1) <= mask
;
642 * Create userspace mapping for the DMA-coherent memory.
643 * This function should be called with the pages from the current domain only,
644 * passing pages mapped from other domains would lead to memory corruption.
647 xen_swiotlb_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
648 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
652 if (xen_get_dma_ops(dev
)->mmap
)
653 return xen_get_dma_ops(dev
)->mmap(dev
, vma
, cpu_addr
,
654 dma_addr
, size
, attrs
);
656 return dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
660 * This function should be called with the pages from the current domain only,
661 * passing pages mapped from other domains would lead to memory corruption.
664 xen_swiotlb_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
665 void *cpu_addr
, dma_addr_t handle
, size_t size
,
669 if (xen_get_dma_ops(dev
)->get_sgtable
) {
672 * This check verifies that the page belongs to the current domain and
673 * is not one mapped from another domain.
674 * This check is for debug only, and should not go to production build
676 unsigned long bfn
= PHYS_PFN(dma_to_phys(dev
, handle
));
677 BUG_ON (!page_is_ram(bfn
));
679 return xen_get_dma_ops(dev
)->get_sgtable(dev
, sgt
, cpu_addr
,
680 handle
, size
, attrs
);
683 return dma_common_get_sgtable(dev
, sgt
, cpu_addr
, handle
, size
, attrs
);
686 const struct dma_map_ops xen_swiotlb_dma_ops
= {
687 .alloc
= xen_swiotlb_alloc_coherent
,
688 .free
= xen_swiotlb_free_coherent
,
689 .sync_single_for_cpu
= xen_swiotlb_sync_single_for_cpu
,
690 .sync_single_for_device
= xen_swiotlb_sync_single_for_device
,
691 .sync_sg_for_cpu
= xen_swiotlb_sync_sg_for_cpu
,
692 .sync_sg_for_device
= xen_swiotlb_sync_sg_for_device
,
693 .map_sg
= xen_swiotlb_map_sg_attrs
,
694 .unmap_sg
= xen_swiotlb_unmap_sg_attrs
,
695 .map_page
= xen_swiotlb_map_page
,
696 .unmap_page
= xen_swiotlb_unmap_page
,
697 .dma_supported
= xen_swiotlb_dma_supported
,
698 .mmap
= xen_swiotlb_dma_mmap
,
699 .get_sgtable
= xen_swiotlb_get_sgtable
,