1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Christoph Hellwig.
5 * DMA operations that map physical memory directly without providing cache
8 #include <linux/export.h>
10 #include <linux/dma-direct.h>
11 #include <linux/dma-noncoherent.h>
12 #include <linux/scatterlist.h>
14 static void dma_noncoherent_sync_single_for_device(struct device
*dev
,
15 dma_addr_t addr
, size_t size
, enum dma_data_direction dir
)
17 arch_sync_dma_for_device(dev
, dma_to_phys(dev
, addr
), size
, dir
);
20 static void dma_noncoherent_sync_sg_for_device(struct device
*dev
,
21 struct scatterlist
*sgl
, int nents
, enum dma_data_direction dir
)
23 struct scatterlist
*sg
;
26 for_each_sg(sgl
, sg
, nents
, i
)
27 arch_sync_dma_for_device(dev
, sg_phys(sg
), sg
->length
, dir
);
30 static dma_addr_t
dma_noncoherent_map_page(struct device
*dev
, struct page
*page
,
31 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
36 addr
= dma_direct_map_page(dev
, page
, offset
, size
, dir
, attrs
);
37 if (!dma_mapping_error(dev
, addr
) && !(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
38 arch_sync_dma_for_device(dev
, page_to_phys(page
) + offset
,
43 static int dma_noncoherent_map_sg(struct device
*dev
, struct scatterlist
*sgl
,
44 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
46 nents
= dma_direct_map_sg(dev
, sgl
, nents
, dir
, attrs
);
47 if (nents
> 0 && !(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
48 dma_noncoherent_sync_sg_for_device(dev
, sgl
, nents
, dir
);
52 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
53 static void dma_noncoherent_sync_single_for_cpu(struct device
*dev
,
54 dma_addr_t addr
, size_t size
, enum dma_data_direction dir
)
56 arch_sync_dma_for_cpu(dev
, dma_to_phys(dev
, addr
), size
, dir
);
59 static void dma_noncoherent_sync_sg_for_cpu(struct device
*dev
,
60 struct scatterlist
*sgl
, int nents
, enum dma_data_direction dir
)
62 struct scatterlist
*sg
;
65 for_each_sg(sgl
, sg
, nents
, i
)
66 arch_sync_dma_for_cpu(dev
, sg_phys(sg
), sg
->length
, dir
);
69 static void dma_noncoherent_unmap_page(struct device
*dev
, dma_addr_t addr
,
70 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
72 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
73 dma_noncoherent_sync_single_for_cpu(dev
, addr
, size
, dir
);
76 static void dma_noncoherent_unmap_sg(struct device
*dev
, struct scatterlist
*sgl
,
77 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
79 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
80 dma_noncoherent_sync_sg_for_cpu(dev
, sgl
, nents
, dir
);
84 const struct dma_map_ops dma_noncoherent_ops
= {
85 .alloc
= arch_dma_alloc
,
86 .free
= arch_dma_free
,
87 .mmap
= arch_dma_mmap
,
88 .sync_single_for_device
= dma_noncoherent_sync_single_for_device
,
89 .sync_sg_for_device
= dma_noncoherent_sync_sg_for_device
,
90 .map_page
= dma_noncoherent_map_page
,
91 .map_sg
= dma_noncoherent_map_sg
,
92 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
93 .sync_single_for_cpu
= dma_noncoherent_sync_single_for_cpu
,
94 .sync_sg_for_cpu
= dma_noncoherent_sync_sg_for_cpu
,
95 .unmap_page
= dma_noncoherent_unmap_page
,
96 .unmap_sg
= dma_noncoherent_unmap_sg
,
98 .dma_supported
= dma_direct_supported
,
99 .mapping_error
= dma_direct_mapping_error
,
100 .cache_sync
= arch_dma_cache_sync
,
102 EXPORT_SYMBOL(dma_noncoherent_ops
);