1 // SPDX-License-Identifier: GPL-2.0
3 * DMA operations that map physical memory directly without using an IOMMU or
6 #include <linux/export.h>
8 #include <linux/dma-direct.h>
9 #include <linux/scatterlist.h>
10 #include <linux/dma-contiguous.h>
11 #include <linux/pfn.h>
13 #define DIRECT_MAPPING_ERROR 0
16 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
17 * some use it for entirely different regions:
19 #ifndef ARCH_ZONE_DMA_BITS
20 #define ARCH_ZONE_DMA_BITS 24
24 check_addr(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
27 if (unlikely(dev
&& !dma_capable(dev
, dma_addr
, size
))) {
28 if (*dev
->dma_mask
>= DMA_BIT_MASK(32)) {
30 "%s: overflow %pad+%zu of device mask %llx\n",
31 caller
, &dma_addr
, size
, *dev
->dma_mask
);
38 static bool dma_coherent_ok(struct device
*dev
, phys_addr_t phys
, size_t size
)
40 return phys_to_dma(dev
, phys
) + size
- 1 <= dev
->coherent_dma_mask
;
43 void *dma_direct_alloc(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
44 gfp_t gfp
, unsigned long attrs
)
46 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
47 int page_order
= get_order(size
);
48 struct page
*page
= NULL
;
50 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
51 if (dev
->coherent_dma_mask
<= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS
))
53 if (dev
->coherent_dma_mask
<= DMA_BIT_MASK(32) && !(gfp
& GFP_DMA
))
57 /* CMA can be used only in the context which permits sleeping */
58 if (gfpflags_allow_blocking(gfp
)) {
59 page
= dma_alloc_from_contiguous(dev
, count
, page_order
, gfp
);
60 if (page
&& !dma_coherent_ok(dev
, page_to_phys(page
), size
)) {
61 dma_release_from_contiguous(dev
, page
, count
);
66 page
= alloc_pages_node(dev_to_node(dev
), gfp
, page_order
);
68 if (page
&& !dma_coherent_ok(dev
, page_to_phys(page
), size
)) {
69 __free_pages(page
, page_order
);
72 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(32) &&
74 gfp
= (gfp
& ~GFP_DMA32
) | GFP_DMA
;
82 *dma_handle
= phys_to_dma(dev
, page_to_phys(page
));
83 memset(page_address(page
), 0, size
);
84 return page_address(page
);
88 * NOTE: this function must never look at the dma_addr argument, because we want
89 * to be able to use it as a helper for iommu implementations as well.
91 void dma_direct_free(struct device
*dev
, size_t size
, void *cpu_addr
,
92 dma_addr_t dma_addr
, unsigned long attrs
)
94 unsigned int count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
96 if (!dma_release_from_contiguous(dev
, virt_to_page(cpu_addr
), count
))
97 free_pages((unsigned long)cpu_addr
, get_order(size
));
100 static dma_addr_t
dma_direct_map_page(struct device
*dev
, struct page
*page
,
101 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
104 dma_addr_t dma_addr
= phys_to_dma(dev
, page_to_phys(page
)) + offset
;
106 if (!check_addr(dev
, dma_addr
, size
, __func__
))
107 return DIRECT_MAPPING_ERROR
;
111 static int dma_direct_map_sg(struct device
*dev
, struct scatterlist
*sgl
,
112 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
115 struct scatterlist
*sg
;
117 for_each_sg(sgl
, sg
, nents
, i
) {
118 BUG_ON(!sg_page(sg
));
120 sg_dma_address(sg
) = phys_to_dma(dev
, sg_phys(sg
));
121 if (!check_addr(dev
, sg_dma_address(sg
), sg
->length
, __func__
))
123 sg_dma_len(sg
) = sg
->length
;
129 int dma_direct_supported(struct device
*dev
, u64 mask
)
131 #ifdef CONFIG_ZONE_DMA
132 if (mask
< DMA_BIT_MASK(ARCH_ZONE_DMA_BITS
))
136 * Because 32-bit DMA masks are so common we expect every architecture
137 * to be able to satisfy them - either by not supporting more physical
138 * memory, or by providing a ZONE_DMA32. If neither is the case, the
139 * architecture needs to use an IOMMU instead of the direct mapping.
141 if (mask
< DMA_BIT_MASK(32))
147 static int dma_direct_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
149 return dma_addr
== DIRECT_MAPPING_ERROR
;
152 const struct dma_map_ops dma_direct_ops
= {
153 .alloc
= dma_direct_alloc
,
154 .free
= dma_direct_free
,
155 .map_page
= dma_direct_map_page
,
156 .map_sg
= dma_direct_map_sg
,
157 .dma_supported
= dma_direct_supported
,
158 .mapping_error
= dma_direct_mapping_error
,
161 EXPORT_SYMBOL(dma_direct_ops
);