1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Christoph Hellwig.
5 * DMA operations that map physical memory directly without using an IOMMU.
7 #include <linux/memblock.h> /* for max_pfn */
8 #include <linux/export.h>
10 #include <linux/dma-direct.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-contiguous.h>
13 #include <linux/dma-noncoherent.h>
14 #include <linux/pfn.h>
15 #include <linux/vmalloc.h>
16 #include <linux/set_memory.h>
17 #include <linux/swiotlb.h>
20 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it
21 * it for entirely different regions. In that case the arch code needs to
22 * override the variable below for dma-direct to work properly.
24 unsigned int zone_dma_bits __ro_after_init
= 24;
26 static inline dma_addr_t
phys_to_dma_direct(struct device
*dev
,
29 if (force_dma_unencrypted(dev
))
30 return __phys_to_dma(dev
, phys
);
31 return phys_to_dma(dev
, phys
);
34 static inline struct page
*dma_direct_to_page(struct device
*dev
,
37 return pfn_to_page(PHYS_PFN(dma_to_phys(dev
, dma_addr
)));
40 u64
dma_direct_get_required_mask(struct device
*dev
)
42 phys_addr_t phys
= (phys_addr_t
)(max_pfn
- 1) << PAGE_SHIFT
;
43 u64 max_dma
= phys_to_dma_direct(dev
, phys
);
45 return (1ULL << (fls64(max_dma
) - 1)) * 2 - 1;
48 gfp_t
dma_direct_optimal_gfp_mask(struct device
*dev
, u64 dma_mask
,
51 u64 dma_limit
= min_not_zero(dma_mask
, dev
->bus_dma_limit
);
53 if (force_dma_unencrypted(dev
))
54 *phys_limit
= __dma_to_phys(dev
, dma_limit
);
56 *phys_limit
= dma_to_phys(dev
, dma_limit
);
59 * Optimistically try the zone that the physical address mask falls
60 * into first. If that returns memory that isn't actually addressable
61 * we will fallback to the next lower zone and try again.
63 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
66 if (*phys_limit
<= DMA_BIT_MASK(zone_dma_bits
))
68 if (*phys_limit
<= DMA_BIT_MASK(32))
73 static bool dma_coherent_ok(struct device
*dev
, phys_addr_t phys
, size_t size
)
75 return phys_to_dma_direct(dev
, phys
) + size
- 1 <=
76 min_not_zero(dev
->coherent_dma_mask
, dev
->bus_dma_limit
);
80 * Decrypting memory is allowed to block, so if this device requires
81 * unencrypted memory it must come from atomic pools.
83 static inline bool dma_should_alloc_from_pool(struct device
*dev
, gfp_t gfp
,
86 if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL
))
88 if (gfpflags_allow_blocking(gfp
))
90 if (force_dma_unencrypted(dev
))
92 if (!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
))
94 if (dma_alloc_need_uncached(dev
, attrs
))
99 static inline bool dma_should_free_from_pool(struct device
*dev
,
102 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL
))
104 if ((attrs
& DMA_ATTR_NO_KERNEL_MAPPING
) &&
105 !force_dma_unencrypted(dev
))
107 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
))
112 static struct page
*__dma_direct_alloc_pages(struct device
*dev
, size_t size
,
113 gfp_t gfp
, unsigned long attrs
)
115 int node
= dev_to_node(dev
);
116 struct page
*page
= NULL
;
119 WARN_ON_ONCE(!PAGE_ALIGNED(size
));
121 if (attrs
& DMA_ATTR_NO_WARN
)
124 /* we always manually zero the memory once we are done: */
126 gfp
|= dma_direct_optimal_gfp_mask(dev
, dev
->coherent_dma_mask
,
128 page
= dma_alloc_contiguous(dev
, size
, gfp
);
129 if (page
&& !dma_coherent_ok(dev
, page_to_phys(page
), size
)) {
130 dma_free_contiguous(dev
, page
, size
);
135 page
= alloc_pages_node(node
, gfp
, get_order(size
));
136 if (page
&& !dma_coherent_ok(dev
, page_to_phys(page
), size
)) {
137 dma_free_contiguous(dev
, page
, size
);
140 if (IS_ENABLED(CONFIG_ZONE_DMA32
) &&
141 phys_limit
< DMA_BIT_MASK(64) &&
142 !(gfp
& (GFP_DMA32
| GFP_DMA
))) {
147 if (IS_ENABLED(CONFIG_ZONE_DMA
) && !(gfp
& GFP_DMA
)) {
148 gfp
= (gfp
& ~GFP_DMA32
) | GFP_DMA
;
156 void *dma_direct_alloc_pages(struct device
*dev
, size_t size
,
157 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
163 size
= PAGE_ALIGN(size
);
165 if (dma_should_alloc_from_pool(dev
, gfp
, attrs
)) {
166 ret
= dma_alloc_from_pool(dev
, size
, &page
, gfp
);
172 page
= __dma_direct_alloc_pages(dev
, size
, gfp
, attrs
);
176 if ((attrs
& DMA_ATTR_NO_KERNEL_MAPPING
) &&
177 !force_dma_unencrypted(dev
)) {
178 /* remove any dirty cache lines on the kernel alias */
179 if (!PageHighMem(page
))
180 arch_dma_prep_coherent(page
, size
);
181 /* return the page pointer as the opaque cookie */
186 if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
187 dma_alloc_need_uncached(dev
, attrs
)) ||
188 (IS_ENABLED(CONFIG_DMA_REMAP
) && PageHighMem(page
))) {
189 /* remove any dirty cache lines on the kernel alias */
190 arch_dma_prep_coherent(page
, size
);
192 /* create a coherent mapping */
193 ret
= dma_common_contiguous_remap(page
, size
,
194 dma_pgprot(dev
, PAGE_KERNEL
, attrs
),
195 __builtin_return_address(0));
198 if (force_dma_unencrypted(dev
)) {
199 err
= set_memory_decrypted((unsigned long)ret
,
200 1 << get_order(size
));
204 memset(ret
, 0, size
);
208 if (PageHighMem(page
)) {
210 * Depending on the cma= arguments and per-arch setup
211 * dma_alloc_contiguous could return highmem pages.
212 * Without remapping there is no way to return them here,
213 * so log an error and fail.
215 dev_info(dev
, "Rejecting highmem page from CMA.\n");
219 ret
= page_address(page
);
220 if (force_dma_unencrypted(dev
)) {
221 err
= set_memory_decrypted((unsigned long)ret
,
222 1 << get_order(size
));
227 memset(ret
, 0, size
);
229 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED
) &&
230 dma_alloc_need_uncached(dev
, attrs
)) {
231 arch_dma_prep_coherent(page
, size
);
232 ret
= arch_dma_set_uncached(ret
, size
);
234 goto out_encrypt_pages
;
237 if (force_dma_unencrypted(dev
))
238 *dma_handle
= __phys_to_dma(dev
, page_to_phys(page
));
240 *dma_handle
= phys_to_dma(dev
, page_to_phys(page
));
244 if (force_dma_unencrypted(dev
)) {
245 err
= set_memory_encrypted((unsigned long)page_address(page
),
246 1 << get_order(size
));
247 /* If memory cannot be re-encrypted, it must be leaked */
252 dma_free_contiguous(dev
, page
, size
);
256 void dma_direct_free_pages(struct device
*dev
, size_t size
, void *cpu_addr
,
257 dma_addr_t dma_addr
, unsigned long attrs
)
259 unsigned int page_order
= get_order(size
);
261 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
262 if (dma_should_free_from_pool(dev
, attrs
) &&
263 dma_free_from_pool(dev
, cpu_addr
, PAGE_ALIGN(size
)))
266 if ((attrs
& DMA_ATTR_NO_KERNEL_MAPPING
) &&
267 !force_dma_unencrypted(dev
)) {
268 /* cpu_addr is a struct page cookie, not a kernel address */
269 dma_free_contiguous(dev
, cpu_addr
, size
);
273 if (force_dma_unencrypted(dev
))
274 set_memory_encrypted((unsigned long)cpu_addr
, 1 << page_order
);
276 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
))
278 else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED
))
279 arch_dma_clear_uncached(cpu_addr
, size
);
281 dma_free_contiguous(dev
, dma_direct_to_page(dev
, dma_addr
), size
);
284 void *dma_direct_alloc(struct device
*dev
, size_t size
,
285 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
287 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED
) &&
288 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
289 dma_alloc_need_uncached(dev
, attrs
))
290 return arch_dma_alloc(dev
, size
, dma_handle
, gfp
, attrs
);
291 return dma_direct_alloc_pages(dev
, size
, dma_handle
, gfp
, attrs
);
294 void dma_direct_free(struct device
*dev
, size_t size
,
295 void *cpu_addr
, dma_addr_t dma_addr
, unsigned long attrs
)
297 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED
) &&
298 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
299 dma_alloc_need_uncached(dev
, attrs
))
300 arch_dma_free(dev
, size
, cpu_addr
, dma_addr
, attrs
);
302 dma_direct_free_pages(dev
, size
, cpu_addr
, dma_addr
, attrs
);
305 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
306 defined(CONFIG_SWIOTLB)
307 void dma_direct_sync_single_for_device(struct device
*dev
,
308 dma_addr_t addr
, size_t size
, enum dma_data_direction dir
)
310 phys_addr_t paddr
= dma_to_phys(dev
, addr
);
312 if (unlikely(is_swiotlb_buffer(paddr
)))
313 swiotlb_tbl_sync_single(dev
, paddr
, size
, dir
, SYNC_FOR_DEVICE
);
315 if (!dev_is_dma_coherent(dev
))
316 arch_sync_dma_for_device(paddr
, size
, dir
);
318 EXPORT_SYMBOL(dma_direct_sync_single_for_device
);
320 void dma_direct_sync_sg_for_device(struct device
*dev
,
321 struct scatterlist
*sgl
, int nents
, enum dma_data_direction dir
)
323 struct scatterlist
*sg
;
326 for_each_sg(sgl
, sg
, nents
, i
) {
327 phys_addr_t paddr
= dma_to_phys(dev
, sg_dma_address(sg
));
329 if (unlikely(is_swiotlb_buffer(paddr
)))
330 swiotlb_tbl_sync_single(dev
, paddr
, sg
->length
,
331 dir
, SYNC_FOR_DEVICE
);
333 if (!dev_is_dma_coherent(dev
))
334 arch_sync_dma_for_device(paddr
, sg
->length
,
338 EXPORT_SYMBOL(dma_direct_sync_sg_for_device
);
341 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
342 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
343 defined(CONFIG_SWIOTLB)
344 void dma_direct_sync_single_for_cpu(struct device
*dev
,
345 dma_addr_t addr
, size_t size
, enum dma_data_direction dir
)
347 phys_addr_t paddr
= dma_to_phys(dev
, addr
);
349 if (!dev_is_dma_coherent(dev
)) {
350 arch_sync_dma_for_cpu(paddr
, size
, dir
);
351 arch_sync_dma_for_cpu_all();
354 if (unlikely(is_swiotlb_buffer(paddr
)))
355 swiotlb_tbl_sync_single(dev
, paddr
, size
, dir
, SYNC_FOR_CPU
);
357 EXPORT_SYMBOL(dma_direct_sync_single_for_cpu
);
359 void dma_direct_sync_sg_for_cpu(struct device
*dev
,
360 struct scatterlist
*sgl
, int nents
, enum dma_data_direction dir
)
362 struct scatterlist
*sg
;
365 for_each_sg(sgl
, sg
, nents
, i
) {
366 phys_addr_t paddr
= dma_to_phys(dev
, sg_dma_address(sg
));
368 if (!dev_is_dma_coherent(dev
))
369 arch_sync_dma_for_cpu(paddr
, sg
->length
, dir
);
371 if (unlikely(is_swiotlb_buffer(paddr
)))
372 swiotlb_tbl_sync_single(dev
, paddr
, sg
->length
, dir
,
376 if (!dev_is_dma_coherent(dev
))
377 arch_sync_dma_for_cpu_all();
379 EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu
);
381 void dma_direct_unmap_page(struct device
*dev
, dma_addr_t addr
,
382 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
384 phys_addr_t phys
= dma_to_phys(dev
, addr
);
386 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
387 dma_direct_sync_single_for_cpu(dev
, addr
, size
, dir
);
389 if (unlikely(is_swiotlb_buffer(phys
)))
390 swiotlb_tbl_unmap_single(dev
, phys
, size
, size
, dir
, attrs
);
392 EXPORT_SYMBOL(dma_direct_unmap_page
);
394 void dma_direct_unmap_sg(struct device
*dev
, struct scatterlist
*sgl
,
395 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
397 struct scatterlist
*sg
;
400 for_each_sg(sgl
, sg
, nents
, i
)
401 dma_direct_unmap_page(dev
, sg
->dma_address
, sg_dma_len(sg
), dir
,
404 EXPORT_SYMBOL(dma_direct_unmap_sg
);
407 dma_addr_t
dma_direct_map_page(struct device
*dev
, struct page
*page
,
408 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
411 phys_addr_t phys
= page_to_phys(page
) + offset
;
412 dma_addr_t dma_addr
= phys_to_dma(dev
, phys
);
414 if (unlikely(swiotlb_force
== SWIOTLB_FORCE
))
415 return swiotlb_map(dev
, phys
, size
, dir
, attrs
);
417 if (unlikely(!dma_capable(dev
, dma_addr
, size
, true))) {
418 if (swiotlb_force
!= SWIOTLB_NO_FORCE
)
419 return swiotlb_map(dev
, phys
, size
, dir
, attrs
);
421 dev_WARN_ONCE(dev
, 1,
422 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
423 &dma_addr
, size
, *dev
->dma_mask
, dev
->bus_dma_limit
);
424 return DMA_MAPPING_ERROR
;
427 if (!dev_is_dma_coherent(dev
) && !(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
428 arch_sync_dma_for_device(phys
, size
, dir
);
431 EXPORT_SYMBOL(dma_direct_map_page
);
433 int dma_direct_map_sg(struct device
*dev
, struct scatterlist
*sgl
, int nents
,
434 enum dma_data_direction dir
, unsigned long attrs
)
437 struct scatterlist
*sg
;
439 for_each_sg(sgl
, sg
, nents
, i
) {
440 sg
->dma_address
= dma_direct_map_page(dev
, sg_page(sg
),
441 sg
->offset
, sg
->length
, dir
, attrs
);
442 if (sg
->dma_address
== DMA_MAPPING_ERROR
)
444 sg_dma_len(sg
) = sg
->length
;
450 dma_direct_unmap_sg(dev
, sgl
, i
, dir
, attrs
| DMA_ATTR_SKIP_CPU_SYNC
);
453 EXPORT_SYMBOL(dma_direct_map_sg
);
455 dma_addr_t
dma_direct_map_resource(struct device
*dev
, phys_addr_t paddr
,
456 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
458 dma_addr_t dma_addr
= paddr
;
460 if (unlikely(!dma_capable(dev
, dma_addr
, size
, false))) {
462 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
463 &dma_addr
, size
, *dev
->dma_mask
, dev
->bus_dma_limit
);
465 return DMA_MAPPING_ERROR
;
470 EXPORT_SYMBOL(dma_direct_map_resource
);
472 int dma_direct_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
473 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
476 struct page
*page
= dma_direct_to_page(dev
, dma_addr
);
479 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
481 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
485 bool dma_direct_can_mmap(struct device
*dev
)
487 return dev_is_dma_coherent(dev
) ||
488 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP
);
491 int dma_direct_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
492 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
495 unsigned long user_count
= vma_pages(vma
);
496 unsigned long count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
497 unsigned long pfn
= PHYS_PFN(dma_to_phys(dev
, dma_addr
));
500 vma
->vm_page_prot
= dma_pgprot(dev
, vma
->vm_page_prot
, attrs
);
502 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
505 if (vma
->vm_pgoff
>= count
|| user_count
> count
- vma
->vm_pgoff
)
507 return remap_pfn_range(vma
, vma
->vm_start
, pfn
+ vma
->vm_pgoff
,
508 user_count
<< PAGE_SHIFT
, vma
->vm_page_prot
);
511 int dma_direct_supported(struct device
*dev
, u64 mask
)
513 u64 min_mask
= (max_pfn
- 1) << PAGE_SHIFT
;
516 * Because 32-bit DMA masks are so common we expect every architecture
517 * to be able to satisfy them - either by not supporting more physical
518 * memory, or by providing a ZONE_DMA32. If neither is the case, the
519 * architecture needs to use an IOMMU instead of the direct mapping.
521 if (mask
>= DMA_BIT_MASK(32))
525 * This check needs to be against the actual bit mask value, so
526 * use __phys_to_dma() here so that the SME encryption mask isn't
529 if (IS_ENABLED(CONFIG_ZONE_DMA
))
530 min_mask
= min_t(u64
, min_mask
, DMA_BIT_MASK(zone_dma_bits
));
531 return mask
>= __phys_to_dma(dev
, min_mask
);
534 size_t dma_direct_max_mapping_size(struct device
*dev
)
536 /* If SWIOTLB is active, use its maximum mapping size */
537 if (is_swiotlb_active() &&
538 (dma_addressing_limited(dev
) || swiotlb_force
== SWIOTLB_FORCE
))
539 return swiotlb_max_mapping_size(dev
);