1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Christoph Hellwig.
5 * DMA operations that map physical memory directly without using an IOMMU.
7 #include <linux/memblock.h> /* for max_pfn */
8 #include <linux/export.h>
10 #include <linux/dma-map-ops.h>
11 #include <linux/scatterlist.h>
12 #include <linux/pfn.h>
13 #include <linux/vmalloc.h>
14 #include <linux/set_memory.h>
15 #include <linux/slab.h>
19 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
20 * it for entirely different regions. In that case the arch code needs to
21 * override the variable below for dma-direct to work properly.
23 u64 zone_dma_limit __ro_after_init
= DMA_BIT_MASK(24);
25 static inline dma_addr_t
phys_to_dma_direct(struct device
*dev
,
28 if (force_dma_unencrypted(dev
))
29 return phys_to_dma_unencrypted(dev
, phys
);
30 return phys_to_dma(dev
, phys
);
33 static inline struct page
*dma_direct_to_page(struct device
*dev
,
36 return pfn_to_page(PHYS_PFN(dma_to_phys(dev
, dma_addr
)));
39 u64
dma_direct_get_required_mask(struct device
*dev
)
41 phys_addr_t phys
= (phys_addr_t
)(max_pfn
- 1) << PAGE_SHIFT
;
42 u64 max_dma
= phys_to_dma_direct(dev
, phys
);
44 return (1ULL << (fls64(max_dma
) - 1)) * 2 - 1;
47 static gfp_t
dma_direct_optimal_gfp_mask(struct device
*dev
, u64
*phys_limit
)
49 u64 dma_limit
= min_not_zero(
50 dev
->coherent_dma_mask
,
54 * Optimistically try the zone that the physical address mask falls
55 * into first. If that returns memory that isn't actually addressable
56 * we will fallback to the next lower zone and try again.
58 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
61 *phys_limit
= dma_to_phys(dev
, dma_limit
);
62 if (*phys_limit
<= zone_dma_limit
)
64 if (*phys_limit
<= DMA_BIT_MASK(32))
69 bool dma_coherent_ok(struct device
*dev
, phys_addr_t phys
, size_t size
)
71 dma_addr_t dma_addr
= phys_to_dma_direct(dev
, phys
);
73 if (dma_addr
== DMA_MAPPING_ERROR
)
75 return dma_addr
+ size
- 1 <=
76 min_not_zero(dev
->coherent_dma_mask
, dev
->bus_dma_limit
);
79 static int dma_set_decrypted(struct device
*dev
, void *vaddr
, size_t size
)
81 if (!force_dma_unencrypted(dev
))
83 return set_memory_decrypted((unsigned long)vaddr
, PFN_UP(size
));
86 static int dma_set_encrypted(struct device
*dev
, void *vaddr
, size_t size
)
90 if (!force_dma_unencrypted(dev
))
92 ret
= set_memory_encrypted((unsigned long)vaddr
, PFN_UP(size
));
94 pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
98 static void __dma_direct_free_pages(struct device
*dev
, struct page
*page
,
101 if (swiotlb_free(dev
, page
, size
))
103 dma_free_contiguous(dev
, page
, size
);
106 static struct page
*dma_direct_alloc_swiotlb(struct device
*dev
, size_t size
)
108 struct page
*page
= swiotlb_alloc(dev
, size
);
110 if (page
&& !dma_coherent_ok(dev
, page_to_phys(page
), size
)) {
111 swiotlb_free(dev
, page
, size
);
118 static struct page
*__dma_direct_alloc_pages(struct device
*dev
, size_t size
,
119 gfp_t gfp
, bool allow_highmem
)
121 int node
= dev_to_node(dev
);
122 struct page
*page
= NULL
;
125 WARN_ON_ONCE(!PAGE_ALIGNED(size
));
127 if (is_swiotlb_for_alloc(dev
))
128 return dma_direct_alloc_swiotlb(dev
, size
);
130 gfp
|= dma_direct_optimal_gfp_mask(dev
, &phys_limit
);
131 page
= dma_alloc_contiguous(dev
, size
, gfp
);
133 if (!dma_coherent_ok(dev
, page_to_phys(page
), size
) ||
134 (!allow_highmem
&& PageHighMem(page
))) {
135 dma_free_contiguous(dev
, page
, size
);
141 page
= alloc_pages_node(node
, gfp
, get_order(size
));
142 if (page
&& !dma_coherent_ok(dev
, page_to_phys(page
), size
)) {
143 __free_pages(page
, get_order(size
));
146 if (IS_ENABLED(CONFIG_ZONE_DMA32
) &&
147 phys_limit
< DMA_BIT_MASK(64) &&
148 !(gfp
& (GFP_DMA32
| GFP_DMA
))) {
153 if (IS_ENABLED(CONFIG_ZONE_DMA
) && !(gfp
& GFP_DMA
)) {
154 gfp
= (gfp
& ~GFP_DMA32
) | GFP_DMA
;
163 * Check if a potentially blocking operations needs to dip into the atomic
164 * pools for the given device/gfp.
166 static bool dma_direct_use_pool(struct device
*dev
, gfp_t gfp
)
168 return !gfpflags_allow_blocking(gfp
) && !is_swiotlb_for_alloc(dev
);
171 static void *dma_direct_alloc_from_pool(struct device
*dev
, size_t size
,
172 dma_addr_t
*dma_handle
, gfp_t gfp
)
178 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL
)))
181 gfp
|= dma_direct_optimal_gfp_mask(dev
, &phys_limit
);
182 page
= dma_alloc_from_pool(dev
, size
, &ret
, gfp
, dma_coherent_ok
);
185 *dma_handle
= phys_to_dma_direct(dev
, page_to_phys(page
));
189 static void *dma_direct_alloc_no_mapping(struct device
*dev
, size_t size
,
190 dma_addr_t
*dma_handle
, gfp_t gfp
)
194 page
= __dma_direct_alloc_pages(dev
, size
, gfp
& ~__GFP_ZERO
, true);
198 /* remove any dirty cache lines on the kernel alias */
199 if (!PageHighMem(page
))
200 arch_dma_prep_coherent(page
, size
);
202 /* return the page pointer as the opaque cookie */
203 *dma_handle
= phys_to_dma_direct(dev
, page_to_phys(page
));
207 void *dma_direct_alloc(struct device
*dev
, size_t size
,
208 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
210 bool remap
= false, set_uncached
= false;
214 size
= PAGE_ALIGN(size
);
215 if (attrs
& DMA_ATTR_NO_WARN
)
218 if ((attrs
& DMA_ATTR_NO_KERNEL_MAPPING
) &&
219 !force_dma_unencrypted(dev
) && !is_swiotlb_for_alloc(dev
))
220 return dma_direct_alloc_no_mapping(dev
, size
, dma_handle
, gfp
);
222 if (!dev_is_dma_coherent(dev
)) {
223 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC
) &&
224 !is_swiotlb_for_alloc(dev
))
225 return arch_dma_alloc(dev
, size
, dma_handle
, gfp
,
229 * If there is a global pool, always allocate from it for
230 * non-coherent devices.
232 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL
))
233 return dma_alloc_from_global_coherent(dev
, size
,
237 * Otherwise we require the architecture to either be able to
238 * mark arbitrary parts of the kernel direct mapping uncached,
239 * or remapped it uncached.
241 set_uncached
= IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED
);
242 remap
= IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
);
243 if (!set_uncached
&& !remap
) {
244 pr_warn_once("coherent DMA allocations not supported on this platform.\n");
250 * Remapping or decrypting memory may block, allocate the memory from
251 * the atomic pools instead if we aren't allowed block.
253 if ((remap
|| force_dma_unencrypted(dev
)) &&
254 dma_direct_use_pool(dev
, gfp
))
255 return dma_direct_alloc_from_pool(dev
, size
, dma_handle
, gfp
);
257 /* we always manually zero the memory once we are done */
258 page
= __dma_direct_alloc_pages(dev
, size
, gfp
& ~__GFP_ZERO
, true);
263 * dma_alloc_contiguous can return highmem pages depending on a
264 * combination the cma= arguments and per-arch setup. These need to be
265 * remapped to return a kernel virtual address.
267 if (PageHighMem(page
)) {
269 set_uncached
= false;
273 pgprot_t prot
= dma_pgprot(dev
, PAGE_KERNEL
, attrs
);
275 if (force_dma_unencrypted(dev
))
276 prot
= pgprot_decrypted(prot
);
278 /* remove any dirty cache lines on the kernel alias */
279 arch_dma_prep_coherent(page
, size
);
281 /* create a coherent mapping */
282 ret
= dma_common_contiguous_remap(page
, size
, prot
,
283 __builtin_return_address(0));
287 ret
= page_address(page
);
288 if (dma_set_decrypted(dev
, ret
, size
))
292 memset(ret
, 0, size
);
295 arch_dma_prep_coherent(page
, size
);
296 ret
= arch_dma_set_uncached(ret
, size
);
298 goto out_encrypt_pages
;
301 *dma_handle
= phys_to_dma_direct(dev
, page_to_phys(page
));
305 if (dma_set_encrypted(dev
, page_address(page
), size
))
308 __dma_direct_free_pages(dev
, page
, size
);
314 void dma_direct_free(struct device
*dev
, size_t size
,
315 void *cpu_addr
, dma_addr_t dma_addr
, unsigned long attrs
)
317 unsigned int page_order
= get_order(size
);
319 if ((attrs
& DMA_ATTR_NO_KERNEL_MAPPING
) &&
320 !force_dma_unencrypted(dev
) && !is_swiotlb_for_alloc(dev
)) {
321 /* cpu_addr is a struct page cookie, not a kernel address */
322 dma_free_contiguous(dev
, cpu_addr
, size
);
326 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC
) &&
327 !dev_is_dma_coherent(dev
) &&
328 !is_swiotlb_for_alloc(dev
)) {
329 arch_dma_free(dev
, size
, cpu_addr
, dma_addr
, attrs
);
333 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL
) &&
334 !dev_is_dma_coherent(dev
)) {
335 if (!dma_release_from_global_coherent(page_order
, cpu_addr
))
340 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
341 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL
) &&
342 dma_free_from_pool(dev
, cpu_addr
, PAGE_ALIGN(size
)))
345 if (is_vmalloc_addr(cpu_addr
)) {
348 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED
))
349 arch_dma_clear_uncached(cpu_addr
, size
);
350 if (dma_set_encrypted(dev
, cpu_addr
, size
))
354 __dma_direct_free_pages(dev
, dma_direct_to_page(dev
, dma_addr
), size
);
357 struct page
*dma_direct_alloc_pages(struct device
*dev
, size_t size
,
358 dma_addr_t
*dma_handle
, enum dma_data_direction dir
, gfp_t gfp
)
363 if (force_dma_unencrypted(dev
) && dma_direct_use_pool(dev
, gfp
))
364 return dma_direct_alloc_from_pool(dev
, size
, dma_handle
, gfp
);
366 page
= __dma_direct_alloc_pages(dev
, size
, gfp
, false);
370 ret
= page_address(page
);
371 if (dma_set_decrypted(dev
, ret
, size
))
373 memset(ret
, 0, size
);
374 *dma_handle
= phys_to_dma_direct(dev
, page_to_phys(page
));
380 void dma_direct_free_pages(struct device
*dev
, size_t size
,
381 struct page
*page
, dma_addr_t dma_addr
,
382 enum dma_data_direction dir
)
384 void *vaddr
= page_address(page
);
386 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
387 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL
) &&
388 dma_free_from_pool(dev
, vaddr
, size
))
391 if (dma_set_encrypted(dev
, vaddr
, size
))
393 __dma_direct_free_pages(dev
, page
, size
);
396 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
397 defined(CONFIG_SWIOTLB)
398 void dma_direct_sync_sg_for_device(struct device
*dev
,
399 struct scatterlist
*sgl
, int nents
, enum dma_data_direction dir
)
401 struct scatterlist
*sg
;
404 for_each_sg(sgl
, sg
, nents
, i
) {
405 phys_addr_t paddr
= dma_to_phys(dev
, sg_dma_address(sg
));
407 swiotlb_sync_single_for_device(dev
, paddr
, sg
->length
, dir
);
409 if (!dev_is_dma_coherent(dev
))
410 arch_sync_dma_for_device(paddr
, sg
->length
,
416 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
417 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
418 defined(CONFIG_SWIOTLB)
419 void dma_direct_sync_sg_for_cpu(struct device
*dev
,
420 struct scatterlist
*sgl
, int nents
, enum dma_data_direction dir
)
422 struct scatterlist
*sg
;
425 for_each_sg(sgl
, sg
, nents
, i
) {
426 phys_addr_t paddr
= dma_to_phys(dev
, sg_dma_address(sg
));
428 if (!dev_is_dma_coherent(dev
))
429 arch_sync_dma_for_cpu(paddr
, sg
->length
, dir
);
431 swiotlb_sync_single_for_cpu(dev
, paddr
, sg
->length
, dir
);
433 if (dir
== DMA_FROM_DEVICE
)
434 arch_dma_mark_clean(paddr
, sg
->length
);
437 if (!dev_is_dma_coherent(dev
))
438 arch_sync_dma_for_cpu_all();
442 * Unmaps segments, except for ones marked as pci_p2pdma which do not
443 * require any further action as they contain a bus address.
445 void dma_direct_unmap_sg(struct device
*dev
, struct scatterlist
*sgl
,
446 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
448 struct scatterlist
*sg
;
451 for_each_sg(sgl
, sg
, nents
, i
) {
452 if (sg_dma_is_bus_address(sg
))
453 sg_dma_unmark_bus_address(sg
);
455 dma_direct_unmap_page(dev
, sg
->dma_address
,
456 sg_dma_len(sg
), dir
, attrs
);
461 int dma_direct_map_sg(struct device
*dev
, struct scatterlist
*sgl
, int nents
,
462 enum dma_data_direction dir
, unsigned long attrs
)
464 struct pci_p2pdma_map_state p2pdma_state
= {};
465 enum pci_p2pdma_map_type map
;
466 struct scatterlist
*sg
;
469 for_each_sg(sgl
, sg
, nents
, i
) {
470 if (is_pci_p2pdma_page(sg_page(sg
))) {
471 map
= pci_p2pdma_map_segment(&p2pdma_state
, dev
, sg
);
473 case PCI_P2PDMA_MAP_BUS_ADDR
:
475 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE
:
477 * Any P2P mapping that traverses the PCI
478 * host bridge must be mapped with CPU physical
479 * address and not PCI bus addresses. This is
480 * done with dma_direct_map_page() below.
489 sg
->dma_address
= dma_direct_map_page(dev
, sg_page(sg
),
490 sg
->offset
, sg
->length
, dir
, attrs
);
491 if (sg
->dma_address
== DMA_MAPPING_ERROR
) {
495 sg_dma_len(sg
) = sg
->length
;
501 dma_direct_unmap_sg(dev
, sgl
, i
, dir
, attrs
| DMA_ATTR_SKIP_CPU_SYNC
);
505 dma_addr_t
dma_direct_map_resource(struct device
*dev
, phys_addr_t paddr
,
506 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
508 dma_addr_t dma_addr
= paddr
;
510 if (unlikely(!dma_capable(dev
, dma_addr
, size
, false))) {
512 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
513 &dma_addr
, size
, *dev
->dma_mask
, dev
->bus_dma_limit
);
515 return DMA_MAPPING_ERROR
;
521 int dma_direct_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
522 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
525 struct page
*page
= dma_direct_to_page(dev
, dma_addr
);
528 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
530 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
534 bool dma_direct_can_mmap(struct device
*dev
)
536 return dev_is_dma_coherent(dev
) ||
537 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP
);
540 int dma_direct_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
541 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
544 unsigned long user_count
= vma_pages(vma
);
545 unsigned long count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
546 unsigned long pfn
= PHYS_PFN(dma_to_phys(dev
, dma_addr
));
549 vma
->vm_page_prot
= dma_pgprot(dev
, vma
->vm_page_prot
, attrs
);
550 if (force_dma_unencrypted(dev
))
551 vma
->vm_page_prot
= pgprot_decrypted(vma
->vm_page_prot
);
553 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
555 if (dma_mmap_from_global_coherent(vma
, cpu_addr
, size
, &ret
))
558 if (vma
->vm_pgoff
>= count
|| user_count
> count
- vma
->vm_pgoff
)
560 return remap_pfn_range(vma
, vma
->vm_start
, pfn
+ vma
->vm_pgoff
,
561 user_count
<< PAGE_SHIFT
, vma
->vm_page_prot
);
564 int dma_direct_supported(struct device
*dev
, u64 mask
)
566 u64 min_mask
= (max_pfn
- 1) << PAGE_SHIFT
;
569 * Because 32-bit DMA masks are so common we expect every architecture
570 * to be able to satisfy them - either by not supporting more physical
571 * memory, or by providing a ZONE_DMA32. If neither is the case, the
572 * architecture needs to use an IOMMU instead of the direct mapping.
574 if (mask
>= DMA_BIT_MASK(32))
578 * This check needs to be against the actual bit mask value, so use
579 * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
582 if (IS_ENABLED(CONFIG_ZONE_DMA
))
583 min_mask
= min_t(u64
, min_mask
, zone_dma_limit
);
584 return mask
>= phys_to_dma_unencrypted(dev
, min_mask
);
588 * To check whether all ram resource ranges are covered by dma range map
589 * Returns 0 when further check is needed
590 * Returns 1 if there is some RAM range can't be covered by dma_range_map
592 static int check_ram_in_range_map(unsigned long start_pfn
,
593 unsigned long nr_pages
, void *data
)
595 unsigned long end_pfn
= start_pfn
+ nr_pages
;
596 const struct bus_dma_region
*bdr
= NULL
;
597 const struct bus_dma_region
*m
;
598 struct device
*dev
= data
;
600 while (start_pfn
< end_pfn
) {
601 for (m
= dev
->dma_range_map
; PFN_DOWN(m
->size
); m
++) {
602 unsigned long cpu_start_pfn
= PFN_DOWN(m
->cpu_start
);
604 if (start_pfn
>= cpu_start_pfn
&&
605 start_pfn
- cpu_start_pfn
< PFN_DOWN(m
->size
)) {
613 start_pfn
= PFN_DOWN(bdr
->cpu_start
) + PFN_DOWN(bdr
->size
);
619 bool dma_direct_all_ram_mapped(struct device
*dev
)
621 if (!dev
->dma_range_map
)
623 return !walk_system_ram_range(0, PFN_DOWN(ULONG_MAX
) + 1, dev
,
624 check_ram_in_range_map
);
627 size_t dma_direct_max_mapping_size(struct device
*dev
)
629 /* If SWIOTLB is active, use its maximum mapping size */
630 if (is_swiotlb_active(dev
) &&
631 (dma_addressing_limited(dev
) || is_swiotlb_force_bounce(dev
)))
632 return swiotlb_max_mapping_size(dev
);
636 bool dma_direct_need_sync(struct device
*dev
, dma_addr_t dma_addr
)
638 return !dev_is_dma_coherent(dev
) ||
639 swiotlb_find_pool(dev
, dma_to_phys(dev
, dma_addr
));
643 * dma_direct_set_offset - Assign scalar offset for a single DMA range.
644 * @dev: device pointer; needed to "own" the alloced memory.
645 * @cpu_start: beginning of memory region covered by this offset.
646 * @dma_start: beginning of DMA/PCI region covered by this offset.
647 * @size: size of the region.
649 * This is for the simple case of a uniform offset which cannot
650 * be discovered by "dma-ranges".
652 * It returns -ENOMEM if out of memory, -EINVAL if a map
653 * already exists, 0 otherwise.
655 * Note: any call to this from a driver is a bug. The mapping needs
656 * to be described by the device tree or other firmware interfaces.
658 int dma_direct_set_offset(struct device
*dev
, phys_addr_t cpu_start
,
659 dma_addr_t dma_start
, u64 size
)
661 struct bus_dma_region
*map
;
662 u64 offset
= (u64
)cpu_start
- (u64
)dma_start
;
664 if (dev
->dma_range_map
) {
665 dev_err(dev
, "attempt to add DMA range to existing map\n");
672 map
= kcalloc(2, sizeof(*map
), GFP_KERNEL
);
675 map
[0].cpu_start
= cpu_start
;
676 map
[0].dma_start
= dma_start
;
678 dev
->dma_range_map
= map
;