1 # SPDX-License-Identifier: GPL-2.0-only
16 # IOMMU drivers that can bypass the IOMMU code and optionally use the direct
17 # mapping fast path should select this option and set the dma_ops_bypass
18 # flag in struct device where applicable
23 # Lets platform IOMMU driver choose between bypass and IOMMU
24 config ARCH_HAS_DMA_MAP_DIRECT
27 config NEED_SG_DMA_LENGTH
30 config NEED_DMA_MAP_STATE
33 config ARCH_DMA_ADDR_T_64BIT
34 def_bool 64BIT || PHYS_ADDR_T_64BIT
36 config ARCH_HAS_DMA_COHERENCE_H
39 config ARCH_HAS_DMA_SET_MASK
43 # Select this option if the architecture needs special handling for
44 # DMA_ATTR_WRITE_COMBINE. Normally the "uncached" mapping should be what
45 # people thing of when saying write combine, so very few platforms should
46 # need to enable this.
48 config ARCH_HAS_DMA_WRITE_COMBINE
52 # Select if the architectures provides the arch_dma_mark_clean hook
54 config ARCH_HAS_DMA_MARK_CLEAN
57 config DMA_DECLARE_COHERENT
60 config ARCH_HAS_SETUP_DMA_OPS
63 config ARCH_HAS_TEARDOWN_DMA_OPS
66 config ARCH_HAS_SYNC_DMA_FOR_DEVICE
69 config ARCH_HAS_SYNC_DMA_FOR_CPU
71 select NEED_DMA_MAP_STATE
73 config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
76 config ARCH_HAS_DMA_PREP_COHERENT
79 config ARCH_HAS_FORCE_DMA_UNENCRYPTED
84 select NEED_DMA_MAP_STATE
87 # Should be selected if we can mmap non-coherent mappings to userspace.
88 # The only thing that is really required is a way to set an uncached bit
91 config DMA_NONCOHERENT_MMAP
95 config DMA_COHERENT_POOL
96 select GENERIC_ALLOCATOR
102 select DMA_NONCOHERENT_MMAP
104 config DMA_DIRECT_REMAP
107 select DMA_COHERENT_POOL
110 bool "DMA Contiguous Memory Allocator"
111 depends on HAVE_DMA_CONTIGUOUS && CMA
113 This enables the Contiguous Memory Allocator which allows drivers
114 to allocate big physically-contiguous blocks of memory for use with
115 hardware components that do not support I/O map nor scatter-gather.
117 You can disable CMA by specifying "cma=0" on the kernel's command
120 For more information see <kernel/dma/contiguous.c>.
125 config DMA_PERNUMA_CMA
126 bool "Enable separate DMA Contiguous Memory Area for each NUMA Node"
127 default NUMA && ARM64
129 Enable this option to get pernuma CMA areas so that devices like
130 ARM64 SMMU can get local memory by DMA coherent APIs.
132 You can set the size of pernuma CMA by specifying "cma_pernuma=size"
133 on the kernel's command line.
135 comment "Default contiguous memory area size:"
137 config CMA_SIZE_MBYTES
138 int "Size in Mega Bytes"
139 depends on !CMA_SIZE_SEL_PERCENTAGE
143 Defines the size (in MiB) of the default memory area for Contiguous
144 Memory Allocator. If the size of 0 is selected, CMA is disabled by
145 default, but it can be enabled by passing cma=size[MG] to the kernel.
148 config CMA_SIZE_PERCENTAGE
149 int "Percentage of total memory"
150 depends on !CMA_SIZE_SEL_MBYTES
154 Defines the size of the default memory area for Contiguous Memory
155 Allocator as a percentage of the total memory in the system.
156 If 0 percent is selected, CMA is disabled by default, but it can be
157 enabled by passing cma=size[MG] to the kernel.
160 prompt "Selected region size"
161 default CMA_SIZE_SEL_MBYTES
163 config CMA_SIZE_SEL_MBYTES
164 bool "Use mega bytes value only"
166 config CMA_SIZE_SEL_PERCENTAGE
167 bool "Use percentage value only"
169 config CMA_SIZE_SEL_MIN
170 bool "Use lower value (minimum)"
172 config CMA_SIZE_SEL_MAX
173 bool "Use higher value (maximum)"
178 int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
182 DMA mapping framework by default aligns all buffers to the smallest
183 PAGE_SIZE order which is greater than or equal to the requested buffer
184 size. This works well for buffers up to a few hundreds kilobytes, but
185 for larger buffers it just a memory waste. With this parameter you can
186 specify the maximum PAGE_SIZE order for contiguous buffers. Larger
187 buffers will be aligned only to this specified order. The order is
188 expressed as a power of two multiplied by the PAGE_SIZE.
190 For example, if your system defaults to 4KiB pages, the order value
191 of 8 means that the buffers will be aligned up to 1MiB only.
193 If unsure, leave the default value "8".
198 bool "Enable debugging of DMA-API usage"
199 select NEED_DMA_MAP_STATE
201 Enable this option to debug the use of the DMA API by device drivers.
202 With this option you will be able to detect common bugs in device
203 drivers like double-freeing of DMA mappings or freeing mappings that
204 were never allocated.
206 This option causes a performance degradation. Use only if you want to
207 debug device drivers and dma interactions.
211 config DMA_API_DEBUG_SG
212 bool "Debug DMA scatter-gather usage"
214 depends on DMA_API_DEBUG
216 Perform extra checking that callers of dma_map_sg() have respected the
217 appropriate segment length/boundary limits for the given device when
218 preparing DMA scatterlists.
220 This is particularly likely to have been overlooked in cases where the
221 dma_map_sg() API is used for general bulk mapping of pages rather than
222 preparing literal scatter-gather descriptors, where there is a risk of
223 unexpected behaviour from DMA API implementations if the scatterlist
224 is technically out-of-spec.
228 config DMA_MAP_BENCHMARK
229 bool "Enable benchmarking of streaming DMA mapping"
232 Provides /sys/kernel/debug/dma_map_benchmark that helps with testing
233 performance of dma_(un)map_page.
235 See tools/testing/selftests/dma/dma_map_benchmark.c