1 /* Glue code to lib/swiotlb.c */
4 #include <linux/cache.h>
5 #include <linux/module.h>
6 #include <linux/swiotlb.h>
7 #include <linux/bootmem.h>
8 #include <linux/dma-mapping.h>
10 #include <asm/iommu.h>
11 #include <asm/swiotlb.h>
14 int swiotlb __read_mostly
;
16 void *swiotlb_alloc_boot(size_t size
, unsigned long nslabs
)
18 return alloc_bootmem_low_pages(size
);
21 void *swiotlb_alloc(unsigned order
, unsigned long nslabs
)
23 return (void *)__get_free_pages(GFP_DMA
| __GFP_NOWARN
, order
);
26 dma_addr_t
swiotlb_phys_to_bus(phys_addr_t paddr
)
31 phys_addr_t
swiotlb_bus_to_phys(dma_addr_t baddr
)
37 swiotlb_map_single_phys(struct device
*hwdev
, phys_addr_t paddr
, size_t size
,
40 return swiotlb_map_single(hwdev
, phys_to_virt(paddr
), size
, direction
);
43 static void *x86_swiotlb_alloc_coherent(struct device
*hwdev
, size_t size
,
44 dma_addr_t
*dma_handle
, gfp_t flags
)
48 vaddr
= dma_generic_alloc_coherent(hwdev
, size
, dma_handle
, flags
);
52 return swiotlb_alloc_coherent(hwdev
, size
, dma_handle
, flags
);
55 struct dma_mapping_ops swiotlb_dma_ops
= {
56 .mapping_error
= swiotlb_dma_mapping_error
,
57 .alloc_coherent
= x86_swiotlb_alloc_coherent
,
58 .free_coherent
= swiotlb_free_coherent
,
59 .map_single
= swiotlb_map_single_phys
,
60 .unmap_single
= swiotlb_unmap_single
,
61 .sync_single_for_cpu
= swiotlb_sync_single_for_cpu
,
62 .sync_single_for_device
= swiotlb_sync_single_for_device
,
63 .sync_single_range_for_cpu
= swiotlb_sync_single_range_for_cpu
,
64 .sync_single_range_for_device
= swiotlb_sync_single_range_for_device
,
65 .sync_sg_for_cpu
= swiotlb_sync_sg_for_cpu
,
66 .sync_sg_for_device
= swiotlb_sync_sg_for_device
,
67 .map_sg
= swiotlb_map_sg
,
68 .unmap_sg
= swiotlb_unmap_sg
,
69 .dma_supported
= NULL
,
72 void __init
pci_swiotlb_init(void)
74 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
76 if (!iommu_detected
&& !no_iommu
&& max_pfn
> MAX_DMA32_PFN
)
82 printk(KERN_INFO
"PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
84 dma_ops
= &swiotlb_dma_ops
;