1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 ARM Ltd.
4 * Copyright (C) 2020 Google LLC
7 #include <linux/debugfs.h>
8 #include <linux/dma-map-ops.h>
9 #include <linux/dma-direct.h>
10 #include <linux/init.h>
11 #include <linux/genalloc.h>
12 #include <linux/set_memory.h>
13 #include <linux/slab.h>
14 #include <linux/workqueue.h>
16 static struct gen_pool
*atomic_pool_dma __ro_after_init
;
17 static unsigned long pool_size_dma
;
18 static struct gen_pool
*atomic_pool_dma32 __ro_after_init
;
19 static unsigned long pool_size_dma32
;
20 static struct gen_pool
*atomic_pool_kernel __ro_after_init
;
21 static unsigned long pool_size_kernel
;
23 /* Size can be defined by the coherent_pool command line */
24 static size_t atomic_pool_size
;
26 /* Dynamic background expansion when the atomic pool is near capacity */
27 static struct work_struct atomic_pool_work
;
29 static int __init
early_coherent_pool(char *p
)
31 atomic_pool_size
= memparse(p
, &p
);
34 early_param("coherent_pool", early_coherent_pool
);
36 static void __init
dma_atomic_pool_debugfs_init(void)
40 root
= debugfs_create_dir("dma_pools", NULL
);
41 debugfs_create_ulong("pool_size_dma", 0400, root
, &pool_size_dma
);
42 debugfs_create_ulong("pool_size_dma32", 0400, root
, &pool_size_dma32
);
43 debugfs_create_ulong("pool_size_kernel", 0400, root
, &pool_size_kernel
);
46 static void dma_atomic_pool_size_add(gfp_t gfp
, size_t size
)
49 pool_size_dma
+= size
;
50 else if (gfp
& __GFP_DMA32
)
51 pool_size_dma32
+= size
;
53 pool_size_kernel
+= size
;
56 static bool cma_in_zone(gfp_t gfp
)
62 cma
= dev_get_cma_area(NULL
);
66 size
= cma_get_size(cma
);
70 /* CMA can't cross zone boundaries, see cma_activate_area() */
71 end
= cma_get_base(cma
) + size
- 1;
72 if (IS_ENABLED(CONFIG_ZONE_DMA
) && (gfp
& GFP_DMA
))
73 return end
<= DMA_BIT_MASK(zone_dma_bits
);
74 if (IS_ENABLED(CONFIG_ZONE_DMA32
) && (gfp
& GFP_DMA32
))
75 return end
<= DMA_BIT_MASK(32);
79 static int atomic_pool_expand(struct gen_pool
*pool
, size_t pool_size
,
83 struct page
*page
= NULL
;
87 /* Cannot allocate larger than MAX_ORDER-1 */
88 order
= min(get_order(pool_size
), MAX_ORDER
-1);
91 pool_size
= 1 << (PAGE_SHIFT
+ order
);
93 page
= dma_alloc_from_contiguous(NULL
, 1 << order
,
96 page
= alloc_pages(gfp
, order
);
97 } while (!page
&& order
-- > 0);
101 arch_dma_prep_coherent(page
, pool_size
);
103 #ifdef CONFIG_DMA_DIRECT_REMAP
104 addr
= dma_common_contiguous_remap(page
, pool_size
,
105 pgprot_dmacoherent(PAGE_KERNEL
),
106 __builtin_return_address(0));
110 addr
= page_to_virt(page
);
113 * Memory in the atomic DMA pools must be unencrypted, the pools do not
114 * shrink so no re-encryption occurs in dma_direct_free().
116 ret
= set_memory_decrypted((unsigned long)page_to_virt(page
),
120 ret
= gen_pool_add_virt(pool
, (unsigned long)addr
, page_to_phys(page
),
121 pool_size
, NUMA_NO_NODE
);
123 goto encrypt_mapping
;
125 dma_atomic_pool_size_add(gfp
, pool_size
);
129 ret
= set_memory_encrypted((unsigned long)page_to_virt(page
),
131 if (WARN_ON_ONCE(ret
)) {
132 /* Decrypt succeeded but encrypt failed, purposely leak */
136 #ifdef CONFIG_DMA_DIRECT_REMAP
137 dma_common_free_remap(addr
, pool_size
);
139 free_page
: __maybe_unused
140 __free_pages(page
, order
);
145 static void atomic_pool_resize(struct gen_pool
*pool
, gfp_t gfp
)
147 if (pool
&& gen_pool_avail(pool
) < atomic_pool_size
)
148 atomic_pool_expand(pool
, gen_pool_size(pool
), gfp
);
151 static void atomic_pool_work_fn(struct work_struct
*work
)
153 if (IS_ENABLED(CONFIG_ZONE_DMA
))
154 atomic_pool_resize(atomic_pool_dma
,
155 GFP_KERNEL
| GFP_DMA
);
156 if (IS_ENABLED(CONFIG_ZONE_DMA32
))
157 atomic_pool_resize(atomic_pool_dma32
,
158 GFP_KERNEL
| GFP_DMA32
);
159 atomic_pool_resize(atomic_pool_kernel
, GFP_KERNEL
);
162 static __init
struct gen_pool
*__dma_atomic_pool_init(size_t pool_size
,
165 struct gen_pool
*pool
;
168 pool
= gen_pool_create(PAGE_SHIFT
, NUMA_NO_NODE
);
172 gen_pool_set_algo(pool
, gen_pool_first_fit_order_align
, NULL
);
174 ret
= atomic_pool_expand(pool
, pool_size
, gfp
);
176 gen_pool_destroy(pool
);
177 pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
178 pool_size
>> 10, &gfp
);
182 pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
183 gen_pool_size(pool
) >> 10, &gfp
);
187 static int __init
dma_atomic_pool_init(void)
192 * If coherent_pool was not used on the command line, default the pool
193 * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
195 if (!atomic_pool_size
) {
196 unsigned long pages
= totalram_pages() / (SZ_1G
/ SZ_128K
);
197 pages
= min_t(unsigned long, pages
, MAX_ORDER_NR_PAGES
);
198 atomic_pool_size
= max_t(size_t, pages
<< PAGE_SHIFT
, SZ_128K
);
200 INIT_WORK(&atomic_pool_work
, atomic_pool_work_fn
);
202 atomic_pool_kernel
= __dma_atomic_pool_init(atomic_pool_size
,
204 if (!atomic_pool_kernel
)
206 if (IS_ENABLED(CONFIG_ZONE_DMA
)) {
207 atomic_pool_dma
= __dma_atomic_pool_init(atomic_pool_size
,
208 GFP_KERNEL
| GFP_DMA
);
209 if (!atomic_pool_dma
)
212 if (IS_ENABLED(CONFIG_ZONE_DMA32
)) {
213 atomic_pool_dma32
= __dma_atomic_pool_init(atomic_pool_size
,
214 GFP_KERNEL
| GFP_DMA32
);
215 if (!atomic_pool_dma32
)
219 dma_atomic_pool_debugfs_init();
222 postcore_initcall(dma_atomic_pool_init
);
224 static inline struct gen_pool
*dma_guess_pool(struct gen_pool
*prev
, gfp_t gfp
)
227 if (IS_ENABLED(CONFIG_ZONE_DMA32
) && (gfp
& GFP_DMA32
))
228 return atomic_pool_dma32
;
229 if (IS_ENABLED(CONFIG_ZONE_DMA
) && (gfp
& GFP_DMA
))
230 return atomic_pool_dma
;
231 return atomic_pool_kernel
;
233 if (prev
== atomic_pool_kernel
)
234 return atomic_pool_dma32
? atomic_pool_dma32
: atomic_pool_dma
;
235 if (prev
== atomic_pool_dma32
)
236 return atomic_pool_dma
;
240 static struct page
*__dma_alloc_from_pool(struct device
*dev
, size_t size
,
241 struct gen_pool
*pool
, void **cpu_addr
,
242 bool (*phys_addr_ok
)(struct device
*, phys_addr_t
, size_t))
247 addr
= gen_pool_alloc(pool
, size
);
251 phys
= gen_pool_virt_to_phys(pool
, addr
);
252 if (phys_addr_ok
&& !phys_addr_ok(dev
, phys
, size
)) {
253 gen_pool_free(pool
, addr
, size
);
257 if (gen_pool_avail(pool
) < atomic_pool_size
)
258 schedule_work(&atomic_pool_work
);
260 *cpu_addr
= (void *)addr
;
261 memset(*cpu_addr
, 0, size
);
262 return pfn_to_page(__phys_to_pfn(phys
));
265 struct page
*dma_alloc_from_pool(struct device
*dev
, size_t size
,
266 void **cpu_addr
, gfp_t gfp
,
267 bool (*phys_addr_ok
)(struct device
*, phys_addr_t
, size_t))
269 struct gen_pool
*pool
= NULL
;
272 while ((pool
= dma_guess_pool(pool
, gfp
))) {
273 page
= __dma_alloc_from_pool(dev
, size
, pool
, cpu_addr
,
279 WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev
));
283 bool dma_free_from_pool(struct device
*dev
, void *start
, size_t size
)
285 struct gen_pool
*pool
= NULL
;
287 while ((pool
= dma_guess_pool(pool
, 0))) {
288 if (!gen_pool_has_addr(pool
, (unsigned long)start
, size
))
290 gen_pool_free(pool
, (unsigned long)start
, size
);