1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 ARM Ltd.
4 * Copyright (C) 2020 Google LLC
6 #include <linux/debugfs.h>
7 #include <linux/dma-direct.h>
8 #include <linux/dma-noncoherent.h>
9 #include <linux/init.h>
10 #include <linux/genalloc.h>
11 #include <linux/set_memory.h>
12 #include <linux/slab.h>
13 #include <linux/workqueue.h>
15 static struct gen_pool
*atomic_pool_dma __ro_after_init
;
16 static unsigned long pool_size_dma
;
17 static struct gen_pool
*atomic_pool_dma32 __ro_after_init
;
18 static unsigned long pool_size_dma32
;
19 static struct gen_pool
*atomic_pool_kernel __ro_after_init
;
20 static unsigned long pool_size_kernel
;
22 /* Size can be defined by the coherent_pool command line */
23 static size_t atomic_pool_size
;
25 /* Dynamic background expansion when the atomic pool is near capacity */
26 static struct work_struct atomic_pool_work
;
28 static int __init
early_coherent_pool(char *p
)
30 atomic_pool_size
= memparse(p
, &p
);
33 early_param("coherent_pool", early_coherent_pool
);
35 static void __init
dma_atomic_pool_debugfs_init(void)
39 root
= debugfs_create_dir("dma_pools", NULL
);
40 if (IS_ERR_OR_NULL(root
))
43 debugfs_create_ulong("pool_size_dma", 0400, root
, &pool_size_dma
);
44 debugfs_create_ulong("pool_size_dma32", 0400, root
, &pool_size_dma32
);
45 debugfs_create_ulong("pool_size_kernel", 0400, root
, &pool_size_kernel
);
48 static void dma_atomic_pool_size_add(gfp_t gfp
, size_t size
)
51 pool_size_dma
+= size
;
52 else if (gfp
& __GFP_DMA32
)
53 pool_size_dma32
+= size
;
55 pool_size_kernel
+= size
;
58 static int atomic_pool_expand(struct gen_pool
*pool
, size_t pool_size
,
66 /* Cannot allocate larger than MAX_ORDER-1 */
67 order
= min(get_order(pool_size
), MAX_ORDER
-1);
70 pool_size
= 1 << (PAGE_SHIFT
+ order
);
71 page
= alloc_pages(gfp
, order
);
72 } while (!page
&& order
-- > 0);
76 arch_dma_prep_coherent(page
, pool_size
);
78 #ifdef CONFIG_DMA_DIRECT_REMAP
79 addr
= dma_common_contiguous_remap(page
, pool_size
,
80 pgprot_dmacoherent(PAGE_KERNEL
),
81 __builtin_return_address(0));
85 addr
= page_to_virt(page
);
88 * Memory in the atomic DMA pools must be unencrypted, the pools do not
89 * shrink so no re-encryption occurs in dma_direct_free_pages().
91 ret
= set_memory_decrypted((unsigned long)page_to_virt(page
),
95 ret
= gen_pool_add_virt(pool
, (unsigned long)addr
, page_to_phys(page
),
96 pool_size
, NUMA_NO_NODE
);
100 dma_atomic_pool_size_add(gfp
, pool_size
);
104 ret
= set_memory_encrypted((unsigned long)page_to_virt(page
),
106 if (WARN_ON_ONCE(ret
)) {
107 /* Decrypt succeeded but encrypt failed, purposely leak */
111 #ifdef CONFIG_DMA_DIRECT_REMAP
112 dma_common_free_remap(addr
, pool_size
);
114 free_page
: __maybe_unused
115 __free_pages(page
, order
);
120 static void atomic_pool_resize(struct gen_pool
*pool
, gfp_t gfp
)
122 if (pool
&& gen_pool_avail(pool
) < atomic_pool_size
)
123 atomic_pool_expand(pool
, gen_pool_size(pool
), gfp
);
126 static void atomic_pool_work_fn(struct work_struct
*work
)
128 if (IS_ENABLED(CONFIG_ZONE_DMA
))
129 atomic_pool_resize(atomic_pool_dma
,
130 GFP_KERNEL
| GFP_DMA
);
131 if (IS_ENABLED(CONFIG_ZONE_DMA32
))
132 atomic_pool_resize(atomic_pool_dma32
,
133 GFP_KERNEL
| GFP_DMA32
);
134 atomic_pool_resize(atomic_pool_kernel
, GFP_KERNEL
);
137 static __init
struct gen_pool
*__dma_atomic_pool_init(size_t pool_size
,
140 struct gen_pool
*pool
;
143 pool
= gen_pool_create(PAGE_SHIFT
, NUMA_NO_NODE
);
147 gen_pool_set_algo(pool
, gen_pool_first_fit_order_align
, NULL
);
149 ret
= atomic_pool_expand(pool
, pool_size
, gfp
);
151 gen_pool_destroy(pool
);
152 pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
153 pool_size
>> 10, &gfp
);
157 pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
158 gen_pool_size(pool
) >> 10, &gfp
);
162 static int __init
dma_atomic_pool_init(void)
167 * If coherent_pool was not used on the command line, default the pool
168 * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
170 if (!atomic_pool_size
) {
171 unsigned long pages
= totalram_pages() / (SZ_1G
/ SZ_128K
);
172 pages
= min_t(unsigned long, pages
, MAX_ORDER_NR_PAGES
);
173 atomic_pool_size
= max_t(size_t, pages
<< PAGE_SHIFT
, SZ_128K
);
175 INIT_WORK(&atomic_pool_work
, atomic_pool_work_fn
);
177 atomic_pool_kernel
= __dma_atomic_pool_init(atomic_pool_size
,
179 if (!atomic_pool_kernel
)
181 if (IS_ENABLED(CONFIG_ZONE_DMA
)) {
182 atomic_pool_dma
= __dma_atomic_pool_init(atomic_pool_size
,
183 GFP_KERNEL
| GFP_DMA
);
184 if (!atomic_pool_dma
)
187 if (IS_ENABLED(CONFIG_ZONE_DMA32
)) {
188 atomic_pool_dma32
= __dma_atomic_pool_init(atomic_pool_size
,
189 GFP_KERNEL
| GFP_DMA32
);
190 if (!atomic_pool_dma32
)
194 dma_atomic_pool_debugfs_init();
197 postcore_initcall(dma_atomic_pool_init
);
199 static inline struct gen_pool
*dma_guess_pool_from_device(struct device
*dev
)
204 gfp
= dma_direct_optimal_gfp_mask(dev
, dev
->coherent_dma_mask
,
206 if (IS_ENABLED(CONFIG_ZONE_DMA
) && gfp
== GFP_DMA
)
207 return atomic_pool_dma
;
208 if (IS_ENABLED(CONFIG_ZONE_DMA32
) && gfp
== GFP_DMA32
)
209 return atomic_pool_dma32
;
210 return atomic_pool_kernel
;
213 static inline struct gen_pool
*dma_get_safer_pool(struct gen_pool
*bad_pool
)
215 if (bad_pool
== atomic_pool_kernel
)
216 return atomic_pool_dma32
? : atomic_pool_dma
;
218 if (bad_pool
== atomic_pool_dma32
)
219 return atomic_pool_dma
;
224 static inline struct gen_pool
*dma_guess_pool(struct device
*dev
,
225 struct gen_pool
*bad_pool
)
228 return dma_get_safer_pool(bad_pool
);
230 return dma_guess_pool_from_device(dev
);
233 void *dma_alloc_from_pool(struct device
*dev
, size_t size
,
234 struct page
**ret_page
, gfp_t flags
)
236 struct gen_pool
*pool
= NULL
;
237 unsigned long val
= 0;
242 pool
= dma_guess_pool(dev
, pool
);
244 WARN(1, "Failed to get suitable pool for %s\n",
249 val
= gen_pool_alloc(pool
, size
);
253 phys
= gen_pool_virt_to_phys(pool
, val
);
254 if (dma_coherent_ok(dev
, phys
, size
))
257 gen_pool_free(pool
, val
, size
);
263 *ret_page
= pfn_to_page(__phys_to_pfn(phys
));
265 memset(ptr
, 0, size
);
267 if (gen_pool_avail(pool
) < atomic_pool_size
)
268 schedule_work(&atomic_pool_work
);
274 bool dma_free_from_pool(struct device
*dev
, void *start
, size_t size
)
276 struct gen_pool
*pool
= NULL
;
279 pool
= dma_guess_pool(dev
, pool
);
283 if (gen_pool_has_addr(pool
, (unsigned long)start
, size
)) {
284 gen_pool_free(pool
, (unsigned long)start
, size
);