1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2018 Christoph Hellwig.
5 * DMA operations that map physical memory directly without using an IOMMU.
7 #ifndef _KERNEL_DMA_DIRECT_H
8 #define _KERNEL_DMA_DIRECT_H
10 #include <linux/dma-direct.h>
12 int dma_direct_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
13 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
15 bool dma_direct_can_mmap(struct device
*dev
);
16 int dma_direct_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
17 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
19 bool dma_direct_need_sync(struct device
*dev
, dma_addr_t dma_addr
);
20 int dma_direct_map_sg(struct device
*dev
, struct scatterlist
*sgl
, int nents
,
21 enum dma_data_direction dir
, unsigned long attrs
);
22 size_t dma_direct_max_mapping_size(struct device
*dev
);
24 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
25 defined(CONFIG_SWIOTLB)
26 void dma_direct_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sgl
,
27 int nents
, enum dma_data_direction dir
);
29 static inline void dma_direct_sync_sg_for_device(struct device
*dev
,
30 struct scatterlist
*sgl
, int nents
, enum dma_data_direction dir
)
35 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
36 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
37 defined(CONFIG_SWIOTLB)
38 void dma_direct_unmap_sg(struct device
*dev
, struct scatterlist
*sgl
,
39 int nents
, enum dma_data_direction dir
, unsigned long attrs
);
40 void dma_direct_sync_sg_for_cpu(struct device
*dev
,
41 struct scatterlist
*sgl
, int nents
, enum dma_data_direction dir
);
43 static inline void dma_direct_unmap_sg(struct device
*dev
,
44 struct scatterlist
*sgl
, int nents
, enum dma_data_direction dir
,
48 static inline void dma_direct_sync_sg_for_cpu(struct device
*dev
,
49 struct scatterlist
*sgl
, int nents
, enum dma_data_direction dir
)
54 static inline void dma_direct_sync_single_for_device(struct device
*dev
,
55 dma_addr_t addr
, size_t size
, enum dma_data_direction dir
)
57 phys_addr_t paddr
= dma_to_phys(dev
, addr
);
59 if (unlikely(is_swiotlb_buffer(paddr
)))
60 swiotlb_tbl_sync_single(dev
, paddr
, size
, dir
, SYNC_FOR_DEVICE
);
62 if (!dev_is_dma_coherent(dev
))
63 arch_sync_dma_for_device(paddr
, size
, dir
);
66 static inline void dma_direct_sync_single_for_cpu(struct device
*dev
,
67 dma_addr_t addr
, size_t size
, enum dma_data_direction dir
)
69 phys_addr_t paddr
= dma_to_phys(dev
, addr
);
71 if (!dev_is_dma_coherent(dev
)) {
72 arch_sync_dma_for_cpu(paddr
, size
, dir
);
73 arch_sync_dma_for_cpu_all();
76 if (unlikely(is_swiotlb_buffer(paddr
)))
77 swiotlb_tbl_sync_single(dev
, paddr
, size
, dir
, SYNC_FOR_CPU
);
79 if (dir
== DMA_FROM_DEVICE
)
80 arch_dma_mark_clean(paddr
, size
);
83 static inline dma_addr_t
dma_direct_map_page(struct device
*dev
,
84 struct page
*page
, unsigned long offset
, size_t size
,
85 enum dma_data_direction dir
, unsigned long attrs
)
87 phys_addr_t phys
= page_to_phys(page
) + offset
;
88 dma_addr_t dma_addr
= phys_to_dma(dev
, phys
);
90 if (unlikely(swiotlb_force
== SWIOTLB_FORCE
))
91 return swiotlb_map(dev
, phys
, size
, dir
, attrs
);
93 if (unlikely(!dma_capable(dev
, dma_addr
, size
, true))) {
94 if (swiotlb_force
!= SWIOTLB_NO_FORCE
)
95 return swiotlb_map(dev
, phys
, size
, dir
, attrs
);
98 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
99 &dma_addr
, size
, *dev
->dma_mask
, dev
->bus_dma_limit
);
100 return DMA_MAPPING_ERROR
;
103 if (!dev_is_dma_coherent(dev
) && !(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
104 arch_sync_dma_for_device(phys
, size
, dir
);
108 static inline void dma_direct_unmap_page(struct device
*dev
, dma_addr_t addr
,
109 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
111 phys_addr_t phys
= dma_to_phys(dev
, addr
);
113 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
114 dma_direct_sync_single_for_cpu(dev
, addr
, size
, dir
);
116 if (unlikely(is_swiotlb_buffer(phys
)))
117 swiotlb_tbl_unmap_single(dev
, phys
, size
, size
, dir
, attrs
);
119 #endif /* _KERNEL_DMA_DIRECT_H */