Merge tag 'tty-4.16-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty
[linux/fpc-iii.git] / lib / dma-direct.c
blobc9e8e21cb33406f33b539cfd25d82b5749071422
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DMA operations that map physical memory directly without using an IOMMU or
4 * flushing caches.
5 */
6 #include <linux/export.h>
7 #include <linux/mm.h>
8 #include <linux/dma-direct.h>
9 #include <linux/scatterlist.h>
10 #include <linux/dma-contiguous.h>
11 #include <linux/pfn.h>
13 #define DIRECT_MAPPING_ERROR 0
16 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
17 * some use it for entirely different regions:
19 #ifndef ARCH_ZONE_DMA_BITS
20 #define ARCH_ZONE_DMA_BITS 24
21 #endif
23 static bool
24 check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
25 const char *caller)
27 if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
28 if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
29 dev_err(dev,
30 "%s: overflow %pad+%zu of device mask %llx\n",
31 caller, &dma_addr, size, *dev->dma_mask);
33 return false;
35 return true;
38 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
40 return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask;
43 void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
44 gfp_t gfp, unsigned long attrs)
46 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
47 int page_order = get_order(size);
48 struct page *page = NULL;
50 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
51 if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
52 gfp |= GFP_DMA;
53 if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
54 gfp |= GFP_DMA32;
56 again:
57 /* CMA can be used only in the context which permits sleeping */
58 if (gfpflags_allow_blocking(gfp)) {
59 page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
60 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
61 dma_release_from_contiguous(dev, page, count);
62 page = NULL;
65 if (!page)
66 page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
68 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
69 __free_pages(page, page_order);
70 page = NULL;
72 if (dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
73 !(gfp & GFP_DMA)) {
74 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
75 goto again;
79 if (!page)
80 return NULL;
82 *dma_handle = phys_to_dma(dev, page_to_phys(page));
83 memset(page_address(page), 0, size);
84 return page_address(page);
88 * NOTE: this function must never look at the dma_addr argument, because we want
89 * to be able to use it as a helper for iommu implementations as well.
91 void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
92 dma_addr_t dma_addr, unsigned long attrs)
94 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
96 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
97 free_pages((unsigned long)cpu_addr, get_order(size));
100 static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
101 unsigned long offset, size_t size, enum dma_data_direction dir,
102 unsigned long attrs)
104 dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
106 if (!check_addr(dev, dma_addr, size, __func__))
107 return DIRECT_MAPPING_ERROR;
108 return dma_addr;
111 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
112 int nents, enum dma_data_direction dir, unsigned long attrs)
114 int i;
115 struct scatterlist *sg;
117 for_each_sg(sgl, sg, nents, i) {
118 BUG_ON(!sg_page(sg));
120 sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
121 if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
122 return 0;
123 sg_dma_len(sg) = sg->length;
126 return nents;
129 int dma_direct_supported(struct device *dev, u64 mask)
131 #ifdef CONFIG_ZONE_DMA
132 if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
133 return 0;
134 #else
136 * Because 32-bit DMA masks are so common we expect every architecture
137 * to be able to satisfy them - either by not supporting more physical
138 * memory, or by providing a ZONE_DMA32. If neither is the case, the
139 * architecture needs to use an IOMMU instead of the direct mapping.
141 if (mask < DMA_BIT_MASK(32))
142 return 0;
143 #endif
144 return 1;
147 static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
149 return dma_addr == DIRECT_MAPPING_ERROR;
152 const struct dma_map_ops dma_direct_ops = {
153 .alloc = dma_direct_alloc,
154 .free = dma_direct_free,
155 .map_page = dma_direct_map_page,
156 .map_sg = dma_direct_map_sg,
157 .dma_supported = dma_direct_supported,
158 .mapping_error = dma_direct_mapping_error,
159 .is_phys = 1,
161 EXPORT_SYMBOL(dma_direct_ops);