1 /* Glue code to lib/swiotlb.c */
4 #include <linux/cache.h>
5 #include <linux/module.h>
6 #include <linux/swiotlb.h>
7 #include <linux/bootmem.h>
8 #include <linux/dma-mapping.h>
10 #include <asm/iommu.h>
11 #include <asm/swiotlb.h>
14 int swiotlb __read_mostly
;
16 void * __init
swiotlb_alloc_boot(size_t size
, unsigned long nslabs
)
18 return alloc_bootmem_low_pages(size
);
21 void *swiotlb_alloc(unsigned order
, unsigned long nslabs
)
23 return (void *)__get_free_pages(GFP_DMA
| __GFP_NOWARN
, order
);
26 dma_addr_t
swiotlb_phys_to_bus(struct device
*hwdev
, phys_addr_t paddr
)
31 phys_addr_t
swiotlb_bus_to_phys(struct device
*hwdev
, dma_addr_t baddr
)
36 int __weak
swiotlb_arch_range_needs_mapping(phys_addr_t paddr
, size_t size
)
41 static void *x86_swiotlb_alloc_coherent(struct device
*hwdev
, size_t size
,
42 dma_addr_t
*dma_handle
, gfp_t flags
)
46 vaddr
= dma_generic_alloc_coherent(hwdev
, size
, dma_handle
, flags
);
50 return swiotlb_alloc_coherent(hwdev
, size
, dma_handle
, flags
);
53 static struct dma_map_ops swiotlb_dma_ops
= {
54 .mapping_error
= swiotlb_dma_mapping_error
,
55 .alloc_coherent
= x86_swiotlb_alloc_coherent
,
56 .free_coherent
= swiotlb_free_coherent
,
57 .sync_single_for_cpu
= swiotlb_sync_single_for_cpu
,
58 .sync_single_for_device
= swiotlb_sync_single_for_device
,
59 .sync_single_range_for_cpu
= swiotlb_sync_single_range_for_cpu
,
60 .sync_single_range_for_device
= swiotlb_sync_single_range_for_device
,
61 .sync_sg_for_cpu
= swiotlb_sync_sg_for_cpu
,
62 .sync_sg_for_device
= swiotlb_sync_sg_for_device
,
63 .map_sg
= swiotlb_map_sg_attrs
,
64 .unmap_sg
= swiotlb_unmap_sg_attrs
,
65 .map_page
= swiotlb_map_page
,
66 .unmap_page
= swiotlb_unmap_page
,
67 .dma_supported
= NULL
,
70 void __init
pci_swiotlb_init(void)
72 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
74 if ((!iommu_detected
&& !no_iommu
&& max_pfn
> MAX_DMA32_PFN
) ||
81 printk(KERN_INFO
"PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
83 dma_ops
= &swiotlb_dma_ops
;