3 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2.0 as published by
9 * the Free Software Foundation
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * PV guests under Xen are running in an non-contiguous memory architecture.
18 * When PCI pass-through is utilized, this necessitates an IOMMU for
19 * translating bus (DMA) to virtual and vice-versa and also providing a
20 * mechanism to have contiguous pages for device drivers operations (say DMA
23 * Specifically, under Xen the Linux idea of pages is an illusion. It
24 * assumes that pages start at zero and go up to the available memory. To
25 * help with that, the Linux Xen MMU provides a lookup mechanism to
26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
28 * memory is not contiguous. Xen hypervisor stitches memory for guests
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
31 * allocated in descending order (high to low), meaning the guest might
32 * never get any MFN's under the 4GB mark.
36 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
38 #include <linux/bootmem.h>
39 #include <linux/dma-direct.h>
40 #include <linux/export.h>
41 #include <xen/swiotlb-xen.h>
43 #include <xen/xen-ops.h>
44 #include <xen/hvc-console.h>
46 #include <asm/dma-mapping.h>
47 #include <asm/xen/page-coherent.h>
49 #include <trace/events/swiotlb.h>
51 * Used to do a quick range check in swiotlb_tbl_unmap_single and
52 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
56 #define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0)
58 static char *xen_io_tlb_start
, *xen_io_tlb_end
;
59 static unsigned long xen_io_tlb_nslabs
;
61 * Quick lookup value of the bus address of the IOTLB.
64 static u64 start_dma_addr
;
67 * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
68 * can be 32bit when dma_addr_t is 64bit leading to a loss in
69 * information if the shift is done before casting to 64bit.
71 static inline dma_addr_t
xen_phys_to_bus(phys_addr_t paddr
)
73 unsigned long bfn
= pfn_to_bfn(XEN_PFN_DOWN(paddr
));
74 dma_addr_t dma
= (dma_addr_t
)bfn
<< XEN_PAGE_SHIFT
;
76 dma
|= paddr
& ~XEN_PAGE_MASK
;
81 static inline phys_addr_t
xen_bus_to_phys(dma_addr_t baddr
)
83 unsigned long xen_pfn
= bfn_to_pfn(XEN_PFN_DOWN(baddr
));
84 dma_addr_t dma
= (dma_addr_t
)xen_pfn
<< XEN_PAGE_SHIFT
;
85 phys_addr_t paddr
= dma
;
87 paddr
|= baddr
& ~XEN_PAGE_MASK
;
92 static inline dma_addr_t
xen_virt_to_bus(void *address
)
94 return xen_phys_to_bus(virt_to_phys(address
));
97 static int check_pages_physically_contiguous(unsigned long xen_pfn
,
101 unsigned long next_bfn
;
105 next_bfn
= pfn_to_bfn(xen_pfn
);
106 nr_pages
= (offset
+ length
+ XEN_PAGE_SIZE
-1) >> XEN_PAGE_SHIFT
;
108 for (i
= 1; i
< nr_pages
; i
++) {
109 if (pfn_to_bfn(++xen_pfn
) != ++next_bfn
)
115 static inline int range_straddles_page_boundary(phys_addr_t p
, size_t size
)
117 unsigned long xen_pfn
= XEN_PFN_DOWN(p
);
118 unsigned int offset
= p
& ~XEN_PAGE_MASK
;
120 if (offset
+ size
<= XEN_PAGE_SIZE
)
122 if (check_pages_physically_contiguous(xen_pfn
, offset
, size
))
127 static int is_xen_swiotlb_buffer(dma_addr_t dma_addr
)
129 unsigned long bfn
= XEN_PFN_DOWN(dma_addr
);
130 unsigned long xen_pfn
= bfn_to_local_pfn(bfn
);
131 phys_addr_t paddr
= XEN_PFN_PHYS(xen_pfn
);
133 /* If the address is outside our domain, it CAN
134 * have the same virtual address as another address
135 * in our domain. Therefore _only_ check address within our domain.
137 if (pfn_valid(PFN_DOWN(paddr
))) {
138 return paddr
>= virt_to_phys(xen_io_tlb_start
) &&
139 paddr
< virt_to_phys(xen_io_tlb_end
);
144 static int max_dma_bits
= 32;
147 xen_swiotlb_fixup(void *buf
, size_t size
, unsigned long nslabs
)
151 dma_addr_t dma_handle
;
152 phys_addr_t p
= virt_to_phys(buf
);
154 dma_bits
= get_order(IO_TLB_SEGSIZE
<< IO_TLB_SHIFT
) + PAGE_SHIFT
;
158 int slabs
= min(nslabs
- i
, (unsigned long)IO_TLB_SEGSIZE
);
161 rc
= xen_create_contiguous_region(
162 p
+ (i
<< IO_TLB_SHIFT
),
163 get_order(slabs
<< IO_TLB_SHIFT
),
164 dma_bits
, &dma_handle
);
165 } while (rc
&& dma_bits
++ < max_dma_bits
);
170 } while (i
< nslabs
);
173 static unsigned long xen_set_nslabs(unsigned long nr_tbl
)
176 xen_io_tlb_nslabs
= (64 * 1024 * 1024 >> IO_TLB_SHIFT
);
177 xen_io_tlb_nslabs
= ALIGN(xen_io_tlb_nslabs
, IO_TLB_SEGSIZE
);
179 xen_io_tlb_nslabs
= nr_tbl
;
181 return xen_io_tlb_nslabs
<< IO_TLB_SHIFT
;
184 enum xen_swiotlb_err
{
185 XEN_SWIOTLB_UNKNOWN
= 0,
190 static const char *xen_swiotlb_error(enum xen_swiotlb_err err
)
193 case XEN_SWIOTLB_ENOMEM
:
194 return "Cannot allocate Xen-SWIOTLB buffer\n";
195 case XEN_SWIOTLB_EFIXUP
:
196 return "Failed to get contiguous memory for DMA from Xen!\n"\
197 "You either: don't have the permissions, do not have"\
198 " enough free memory under 4GB, or the hypervisor memory"\
199 " is too fragmented!";
205 int __ref
xen_swiotlb_init(int verbose
, bool early
)
207 unsigned long bytes
, order
;
209 enum xen_swiotlb_err m_ret
= XEN_SWIOTLB_UNKNOWN
;
210 unsigned int repeat
= 3;
212 xen_io_tlb_nslabs
= swiotlb_nr_tbl();
214 bytes
= xen_set_nslabs(xen_io_tlb_nslabs
);
215 order
= get_order(xen_io_tlb_nslabs
<< IO_TLB_SHIFT
);
217 * Get IO TLB memory from any location.
220 xen_io_tlb_start
= alloc_bootmem_pages(PAGE_ALIGN(bytes
));
222 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
223 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
224 while ((SLABS_PER_PAGE
<< order
) > IO_TLB_MIN_SLABS
) {
225 xen_io_tlb_start
= (void *)xen_get_swiotlb_free_pages(order
);
226 if (xen_io_tlb_start
)
230 if (order
!= get_order(bytes
)) {
231 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
232 (PAGE_SIZE
<< order
) >> 20);
233 xen_io_tlb_nslabs
= SLABS_PER_PAGE
<< order
;
234 bytes
= xen_io_tlb_nslabs
<< IO_TLB_SHIFT
;
237 if (!xen_io_tlb_start
) {
238 m_ret
= XEN_SWIOTLB_ENOMEM
;
241 xen_io_tlb_end
= xen_io_tlb_start
+ bytes
;
243 * And replace that memory with pages under 4GB.
245 rc
= xen_swiotlb_fixup(xen_io_tlb_start
,
250 free_bootmem(__pa(xen_io_tlb_start
), PAGE_ALIGN(bytes
));
252 free_pages((unsigned long)xen_io_tlb_start
, order
);
253 xen_io_tlb_start
= NULL
;
255 m_ret
= XEN_SWIOTLB_EFIXUP
;
258 start_dma_addr
= xen_virt_to_bus(xen_io_tlb_start
);
260 if (swiotlb_init_with_tbl(xen_io_tlb_start
, xen_io_tlb_nslabs
,
262 panic("Cannot allocate SWIOTLB buffer");
265 rc
= swiotlb_late_init_with_tbl(xen_io_tlb_start
, xen_io_tlb_nslabs
);
268 swiotlb_set_max_segment(PAGE_SIZE
);
273 xen_io_tlb_nslabs
= max(1024UL, /* Min is 2MB */
274 (xen_io_tlb_nslabs
>> 1));
275 pr_info("Lowering to %luMB\n",
276 (xen_io_tlb_nslabs
<< IO_TLB_SHIFT
) >> 20);
279 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret
), rc
);
281 panic("%s (rc:%d)", xen_swiotlb_error(m_ret
), rc
);
283 free_pages((unsigned long)xen_io_tlb_start
, order
);
288 xen_swiotlb_alloc_coherent(struct device
*hwdev
, size_t size
,
289 dma_addr_t
*dma_handle
, gfp_t flags
,
293 int order
= get_order(size
);
294 u64 dma_mask
= DMA_BIT_MASK(32);
299 * Ignore region specifiers - the kernel's ideas of
300 * pseudo-phys memory layout has nothing to do with the
301 * machine physical layout. We can't allocate highmem
302 * because we can't return a pointer to it.
304 flags
&= ~(__GFP_DMA
| __GFP_HIGHMEM
);
306 /* Convert the size to actually allocated. */
307 size
= 1UL << (order
+ XEN_PAGE_SHIFT
);
309 /* On ARM this function returns an ioremap'ped virtual address for
310 * which virt_to_phys doesn't return the corresponding physical
311 * address. In fact on ARM virt_to_phys only works for kernel direct
312 * mapped RAM memory. Also see comment below.
314 ret
= xen_alloc_coherent_pages(hwdev
, size
, dma_handle
, flags
, attrs
);
319 if (hwdev
&& hwdev
->coherent_dma_mask
)
320 dma_mask
= hwdev
->coherent_dma_mask
;
322 /* At this point dma_handle is the physical address, next we are
323 * going to set it to the machine address.
324 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
327 dev_addr
= xen_phys_to_bus(phys
);
328 if (((dev_addr
+ size
- 1 <= dma_mask
)) &&
329 !range_straddles_page_boundary(phys
, size
))
330 *dma_handle
= dev_addr
;
332 if (xen_create_contiguous_region(phys
, order
,
333 fls64(dma_mask
), dma_handle
) != 0) {
334 xen_free_coherent_pages(hwdev
, size
, ret
, (dma_addr_t
)phys
, attrs
);
338 memset(ret
, 0, size
);
343 xen_swiotlb_free_coherent(struct device
*hwdev
, size_t size
, void *vaddr
,
344 dma_addr_t dev_addr
, unsigned long attrs
)
346 int order
= get_order(size
);
348 u64 dma_mask
= DMA_BIT_MASK(32);
350 if (hwdev
&& hwdev
->coherent_dma_mask
)
351 dma_mask
= hwdev
->coherent_dma_mask
;
353 /* do not use virt_to_phys because on ARM it doesn't return you the
354 * physical address */
355 phys
= xen_bus_to_phys(dev_addr
);
357 /* Convert the size to actually allocated. */
358 size
= 1UL << (order
+ XEN_PAGE_SHIFT
);
360 if (!WARN_ON((dev_addr
+ size
- 1 > dma_mask
) ||
361 range_straddles_page_boundary(phys
, size
)))
362 xen_destroy_contiguous_region(phys
, order
);
364 xen_free_coherent_pages(hwdev
, size
, vaddr
, (dma_addr_t
)phys
, attrs
);
368 * Map a single buffer of the indicated size for DMA in streaming mode. The
369 * physical address to use is returned.
371 * Once the device is given the dma address, the device owns this memory until
372 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
374 static dma_addr_t
xen_swiotlb_map_page(struct device
*dev
, struct page
*page
,
375 unsigned long offset
, size_t size
,
376 enum dma_data_direction dir
,
379 phys_addr_t map
, phys
= page_to_phys(page
) + offset
;
380 dma_addr_t dev_addr
= xen_phys_to_bus(phys
);
382 BUG_ON(dir
== DMA_NONE
);
384 * If the address happens to be in the device's DMA window,
385 * we can safely return the device addr and not worry about bounce
388 if (dma_capable(dev
, dev_addr
, size
) &&
389 !range_straddles_page_boundary(phys
, size
) &&
390 !xen_arch_need_swiotlb(dev
, phys
, dev_addr
) &&
391 (swiotlb_force
!= SWIOTLB_FORCE
)) {
392 /* we are not interested in the dma_addr returned by
393 * xen_dma_map_page, only in the potential cache flushes executed
394 * by the function. */
395 xen_dma_map_page(dev
, page
, dev_addr
, offset
, size
, dir
, attrs
);
400 * Oh well, have to allocate and map a bounce buffer.
402 trace_swiotlb_bounced(dev
, dev_addr
, size
, swiotlb_force
);
404 map
= swiotlb_tbl_map_single(dev
, start_dma_addr
, phys
, size
, dir
,
406 if (map
== SWIOTLB_MAP_ERROR
)
407 return XEN_SWIOTLB_ERROR_CODE
;
409 dev_addr
= xen_phys_to_bus(map
);
410 xen_dma_map_page(dev
, pfn_to_page(map
>> PAGE_SHIFT
),
411 dev_addr
, map
& ~PAGE_MASK
, size
, dir
, attrs
);
414 * Ensure that the address returned is DMA'ble
416 if (dma_capable(dev
, dev_addr
, size
))
419 attrs
|= DMA_ATTR_SKIP_CPU_SYNC
;
420 swiotlb_tbl_unmap_single(dev
, map
, size
, dir
, attrs
);
422 return XEN_SWIOTLB_ERROR_CODE
;
426 * Unmap a single streaming mode DMA translation. The dma_addr and size must
427 * match what was provided for in a previous xen_swiotlb_map_page call. All
428 * other usages are undefined.
430 * After this call, reads by the cpu to the buffer are guaranteed to see
431 * whatever the device wrote there.
433 static void xen_unmap_single(struct device
*hwdev
, dma_addr_t dev_addr
,
434 size_t size
, enum dma_data_direction dir
,
437 phys_addr_t paddr
= xen_bus_to_phys(dev_addr
);
439 BUG_ON(dir
== DMA_NONE
);
441 xen_dma_unmap_page(hwdev
, dev_addr
, size
, dir
, attrs
);
443 /* NOTE: We use dev_addr here, not paddr! */
444 if (is_xen_swiotlb_buffer(dev_addr
)) {
445 swiotlb_tbl_unmap_single(hwdev
, paddr
, size
, dir
, attrs
);
449 if (dir
!= DMA_FROM_DEVICE
)
453 * phys_to_virt doesn't work with hihgmem page but we could
454 * call dma_mark_clean() with hihgmem page here. However, we
455 * are fine since dma_mark_clean() is null on POWERPC. We can
456 * make dma_mark_clean() take a physical address if necessary.
458 dma_mark_clean(phys_to_virt(paddr
), size
);
461 static void xen_swiotlb_unmap_page(struct device
*hwdev
, dma_addr_t dev_addr
,
462 size_t size
, enum dma_data_direction dir
,
465 xen_unmap_single(hwdev
, dev_addr
, size
, dir
, attrs
);
469 * Make physical memory consistent for a single streaming mode DMA translation
472 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
473 * using the cpu, yet do not wish to teardown the dma mapping, you must
474 * call this function before doing so. At the next point you give the dma
475 * address back to the card, you must first perform a
476 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
479 xen_swiotlb_sync_single(struct device
*hwdev
, dma_addr_t dev_addr
,
480 size_t size
, enum dma_data_direction dir
,
481 enum dma_sync_target target
)
483 phys_addr_t paddr
= xen_bus_to_phys(dev_addr
);
485 BUG_ON(dir
== DMA_NONE
);
487 if (target
== SYNC_FOR_CPU
)
488 xen_dma_sync_single_for_cpu(hwdev
, dev_addr
, size
, dir
);
490 /* NOTE: We use dev_addr here, not paddr! */
491 if (is_xen_swiotlb_buffer(dev_addr
))
492 swiotlb_tbl_sync_single(hwdev
, paddr
, size
, dir
, target
);
494 if (target
== SYNC_FOR_DEVICE
)
495 xen_dma_sync_single_for_device(hwdev
, dev_addr
, size
, dir
);
497 if (dir
!= DMA_FROM_DEVICE
)
500 dma_mark_clean(phys_to_virt(paddr
), size
);
504 xen_swiotlb_sync_single_for_cpu(struct device
*hwdev
, dma_addr_t dev_addr
,
505 size_t size
, enum dma_data_direction dir
)
507 xen_swiotlb_sync_single(hwdev
, dev_addr
, size
, dir
, SYNC_FOR_CPU
);
511 xen_swiotlb_sync_single_for_device(struct device
*hwdev
, dma_addr_t dev_addr
,
512 size_t size
, enum dma_data_direction dir
)
514 xen_swiotlb_sync_single(hwdev
, dev_addr
, size
, dir
, SYNC_FOR_DEVICE
);
518 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
519 * concerning calls here are the same as for swiotlb_unmap_page() above.
522 xen_swiotlb_unmap_sg_attrs(struct device
*hwdev
, struct scatterlist
*sgl
,
523 int nelems
, enum dma_data_direction dir
,
526 struct scatterlist
*sg
;
529 BUG_ON(dir
== DMA_NONE
);
531 for_each_sg(sgl
, sg
, nelems
, i
)
532 xen_unmap_single(hwdev
, sg
->dma_address
, sg_dma_len(sg
), dir
, attrs
);
537 * Map a set of buffers described by scatterlist in streaming mode for DMA.
538 * This is the scatter-gather version of the above xen_swiotlb_map_page
539 * interface. Here the scatter gather list elements are each tagged with the
540 * appropriate dma address and length. They are obtained via
541 * sg_dma_{address,length}(SG).
543 * NOTE: An implementation may be able to use a smaller number of
544 * DMA address/length pairs than there are SG table elements.
545 * (for example via virtual mapping capabilities)
546 * The routine returns the number of addr/length pairs actually
547 * used, at most nents.
549 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
553 xen_swiotlb_map_sg_attrs(struct device
*hwdev
, struct scatterlist
*sgl
,
554 int nelems
, enum dma_data_direction dir
,
557 struct scatterlist
*sg
;
560 BUG_ON(dir
== DMA_NONE
);
562 for_each_sg(sgl
, sg
, nelems
, i
) {
563 phys_addr_t paddr
= sg_phys(sg
);
564 dma_addr_t dev_addr
= xen_phys_to_bus(paddr
);
566 if (swiotlb_force
== SWIOTLB_FORCE
||
567 xen_arch_need_swiotlb(hwdev
, paddr
, dev_addr
) ||
568 !dma_capable(hwdev
, dev_addr
, sg
->length
) ||
569 range_straddles_page_boundary(paddr
, sg
->length
)) {
570 phys_addr_t map
= swiotlb_tbl_map_single(hwdev
,
575 if (map
== SWIOTLB_MAP_ERROR
) {
576 dev_warn(hwdev
, "swiotlb buffer is full\n");
577 /* Don't panic here, we expect map_sg users
578 to do proper error handling. */
579 attrs
|= DMA_ATTR_SKIP_CPU_SYNC
;
580 xen_swiotlb_unmap_sg_attrs(hwdev
, sgl
, i
, dir
,
585 dev_addr
= xen_phys_to_bus(map
);
586 xen_dma_map_page(hwdev
, pfn_to_page(map
>> PAGE_SHIFT
),
592 sg
->dma_address
= dev_addr
;
594 /* we are not interested in the dma_addr returned by
595 * xen_dma_map_page, only in the potential cache flushes executed
596 * by the function. */
597 xen_dma_map_page(hwdev
, pfn_to_page(paddr
>> PAGE_SHIFT
),
603 sg
->dma_address
= dev_addr
;
605 sg_dma_len(sg
) = sg
->length
;
611 * Make physical memory consistent for a set of streaming mode DMA translations
614 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
618 xen_swiotlb_sync_sg(struct device
*hwdev
, struct scatterlist
*sgl
,
619 int nelems
, enum dma_data_direction dir
,
620 enum dma_sync_target target
)
622 struct scatterlist
*sg
;
625 for_each_sg(sgl
, sg
, nelems
, i
)
626 xen_swiotlb_sync_single(hwdev
, sg
->dma_address
,
627 sg_dma_len(sg
), dir
, target
);
631 xen_swiotlb_sync_sg_for_cpu(struct device
*hwdev
, struct scatterlist
*sg
,
632 int nelems
, enum dma_data_direction dir
)
634 xen_swiotlb_sync_sg(hwdev
, sg
, nelems
, dir
, SYNC_FOR_CPU
);
638 xen_swiotlb_sync_sg_for_device(struct device
*hwdev
, struct scatterlist
*sg
,
639 int nelems
, enum dma_data_direction dir
)
641 xen_swiotlb_sync_sg(hwdev
, sg
, nelems
, dir
, SYNC_FOR_DEVICE
);
645 * Return whether the given device DMA address mask can be supported
646 * properly. For example, if your device can only drive the low 24-bits
647 * during bus mastering, then you would pass 0x00ffffff as the mask to
651 xen_swiotlb_dma_supported(struct device
*hwdev
, u64 mask
)
653 return xen_virt_to_bus(xen_io_tlb_end
- 1) <= mask
;
657 * Create userspace mapping for the DMA-coherent memory.
658 * This function should be called with the pages from the current domain only,
659 * passing pages mapped from other domains would lead to memory corruption.
662 xen_swiotlb_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
663 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
666 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
667 if (xen_get_dma_ops(dev
)->mmap
)
668 return xen_get_dma_ops(dev
)->mmap(dev
, vma
, cpu_addr
,
669 dma_addr
, size
, attrs
);
671 return dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
);
675 * This function should be called with the pages from the current domain only,
676 * passing pages mapped from other domains would lead to memory corruption.
679 xen_swiotlb_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
680 void *cpu_addr
, dma_addr_t handle
, size_t size
,
683 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
684 if (xen_get_dma_ops(dev
)->get_sgtable
) {
687 * This check verifies that the page belongs to the current domain and
688 * is not one mapped from another domain.
689 * This check is for debug only, and should not go to production build
691 unsigned long bfn
= PHYS_PFN(dma_to_phys(dev
, handle
));
692 BUG_ON (!page_is_ram(bfn
));
694 return xen_get_dma_ops(dev
)->get_sgtable(dev
, sgt
, cpu_addr
,
695 handle
, size
, attrs
);
698 return dma_common_get_sgtable(dev
, sgt
, cpu_addr
, handle
, size
);
701 static int xen_swiotlb_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
703 return dma_addr
== XEN_SWIOTLB_ERROR_CODE
;
706 const struct dma_map_ops xen_swiotlb_dma_ops
= {
707 .alloc
= xen_swiotlb_alloc_coherent
,
708 .free
= xen_swiotlb_free_coherent
,
709 .sync_single_for_cpu
= xen_swiotlb_sync_single_for_cpu
,
710 .sync_single_for_device
= xen_swiotlb_sync_single_for_device
,
711 .sync_sg_for_cpu
= xen_swiotlb_sync_sg_for_cpu
,
712 .sync_sg_for_device
= xen_swiotlb_sync_sg_for_device
,
713 .map_sg
= xen_swiotlb_map_sg_attrs
,
714 .unmap_sg
= xen_swiotlb_unmap_sg_attrs
,
715 .map_page
= xen_swiotlb_map_page
,
716 .unmap_page
= xen_swiotlb_unmap_page
,
717 .dma_supported
= xen_swiotlb_dma_supported
,
718 .mmap
= xen_swiotlb_dma_mmap
,
719 .get_sgtable
= xen_swiotlb_get_sgtable
,
720 .mapping_error
= xen_swiotlb_mapping_error
,