1 // SPDX-License-Identifier: GPL-2.0
3 * DMA operations that map physical memory directly without using an IOMMU or
6 #include <linux/export.h>
8 #include <linux/dma-direct.h>
9 #include <linux/scatterlist.h>
10 #include <linux/dma-contiguous.h>
11 #include <linux/pfn.h>
12 #include <linux/set_memory.h>
14 #define DIRECT_MAPPING_ERROR 0
17 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
18 * some use it for entirely different regions:
20 #ifndef ARCH_ZONE_DMA_BITS
21 #define ARCH_ZONE_DMA_BITS 24
25 * For AMD SEV all DMA must be to unencrypted addresses.
27 static inline bool force_dma_unencrypted(void)
33 check_addr(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
36 if (unlikely(dev
&& !dma_capable(dev
, dma_addr
, size
))) {
39 "%s: call on device without dma_mask\n",
44 if (*dev
->dma_mask
>= DMA_BIT_MASK(32)) {
46 "%s: overflow %pad+%zu of device mask %llx\n",
47 caller
, &dma_addr
, size
, *dev
->dma_mask
);
54 static bool dma_coherent_ok(struct device
*dev
, phys_addr_t phys
, size_t size
)
56 dma_addr_t addr
= force_dma_unencrypted() ?
57 __phys_to_dma(dev
, phys
) : phys_to_dma(dev
, phys
);
58 return addr
+ size
- 1 <= dev
->coherent_dma_mask
;
61 void *dma_direct_alloc(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
62 gfp_t gfp
, unsigned long attrs
)
64 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
65 int page_order
= get_order(size
);
66 struct page
*page
= NULL
;
69 /* we always manually zero the memory once we are done: */
72 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
73 if (dev
->coherent_dma_mask
<= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS
))
75 if (dev
->coherent_dma_mask
<= DMA_BIT_MASK(32) && !(gfp
& GFP_DMA
))
79 /* CMA can be used only in the context which permits sleeping */
80 if (gfpflags_allow_blocking(gfp
)) {
81 page
= dma_alloc_from_contiguous(dev
, count
, page_order
, gfp
);
82 if (page
&& !dma_coherent_ok(dev
, page_to_phys(page
), size
)) {
83 dma_release_from_contiguous(dev
, page
, count
);
88 page
= alloc_pages_node(dev_to_node(dev
), gfp
, page_order
);
90 if (page
&& !dma_coherent_ok(dev
, page_to_phys(page
), size
)) {
91 __free_pages(page
, page_order
);
94 if (IS_ENABLED(CONFIG_ZONE_DMA32
) &&
95 dev
->coherent_dma_mask
< DMA_BIT_MASK(64) &&
96 !(gfp
& (GFP_DMA32
| GFP_DMA
))) {
101 if (IS_ENABLED(CONFIG_ZONE_DMA
) &&
102 dev
->coherent_dma_mask
< DMA_BIT_MASK(32) &&
104 gfp
= (gfp
& ~GFP_DMA32
) | GFP_DMA
;
111 ret
= page_address(page
);
112 if (force_dma_unencrypted()) {
113 set_memory_decrypted((unsigned long)ret
, 1 << page_order
);
114 *dma_handle
= __phys_to_dma(dev
, page_to_phys(page
));
116 *dma_handle
= phys_to_dma(dev
, page_to_phys(page
));
118 memset(ret
, 0, size
);
123 * NOTE: this function must never look at the dma_addr argument, because we want
124 * to be able to use it as a helper for iommu implementations as well.
126 void dma_direct_free(struct device
*dev
, size_t size
, void *cpu_addr
,
127 dma_addr_t dma_addr
, unsigned long attrs
)
129 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
130 unsigned int page_order
= get_order(size
);
132 if (force_dma_unencrypted())
133 set_memory_encrypted((unsigned long)cpu_addr
, 1 << page_order
);
134 if (!dma_release_from_contiguous(dev
, virt_to_page(cpu_addr
), count
))
135 free_pages((unsigned long)cpu_addr
, page_order
);
138 dma_addr_t
dma_direct_map_page(struct device
*dev
, struct page
*page
,
139 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
142 dma_addr_t dma_addr
= phys_to_dma(dev
, page_to_phys(page
)) + offset
;
144 if (!check_addr(dev
, dma_addr
, size
, __func__
))
145 return DIRECT_MAPPING_ERROR
;
149 int dma_direct_map_sg(struct device
*dev
, struct scatterlist
*sgl
, int nents
,
150 enum dma_data_direction dir
, unsigned long attrs
)
153 struct scatterlist
*sg
;
155 for_each_sg(sgl
, sg
, nents
, i
) {
156 BUG_ON(!sg_page(sg
));
158 sg_dma_address(sg
) = phys_to_dma(dev
, sg_phys(sg
));
159 if (!check_addr(dev
, sg_dma_address(sg
), sg
->length
, __func__
))
161 sg_dma_len(sg
) = sg
->length
;
167 int dma_direct_supported(struct device
*dev
, u64 mask
)
169 #ifdef CONFIG_ZONE_DMA
170 if (mask
< DMA_BIT_MASK(ARCH_ZONE_DMA_BITS
))
174 * Because 32-bit DMA masks are so common we expect every architecture
175 * to be able to satisfy them - either by not supporting more physical
176 * memory, or by providing a ZONE_DMA32. If neither is the case, the
177 * architecture needs to use an IOMMU instead of the direct mapping.
179 if (mask
< DMA_BIT_MASK(32))
183 * Various PCI/PCIe bridges have broken support for > 32bit DMA even
184 * if the device itself might support it.
186 if (dev
->dma_32bit_limit
&& mask
> DMA_BIT_MASK(32))
191 int dma_direct_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
193 return dma_addr
== DIRECT_MAPPING_ERROR
;
196 const struct dma_map_ops dma_direct_ops
= {
197 .alloc
= dma_direct_alloc
,
198 .free
= dma_direct_free
,
199 .map_page
= dma_direct_map_page
,
200 .map_sg
= dma_direct_map_sg
,
201 .dma_supported
= dma_direct_supported
,
202 .mapping_error
= dma_direct_mapping_error
,
204 EXPORT_SYMBOL(dma_direct_ops
);