1 // SPDX-License-Identifier: GPL-2.0
5 * memory buffer pool support. Such pools are mostly used
6 * for guaranteed, deadlock-free memory allocations during
9 * started by Ingo Molnar, Copyright (C) 2001
10 * debugging by David Rientjes, Copyright (C) 2015
14 #include <linux/slab.h>
15 #include <linux/highmem.h>
16 #include <linux/kasan.h>
17 #include <linux/kmemleak.h>
18 #include <linux/export.h>
19 #include <linux/mempool.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
24 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
25 static void poison_error(mempool_t
*pool
, void *element
, size_t size
,
28 const int nr
= pool
->curr_nr
;
29 const int start
= max_t(int, byte
- (BITS_PER_LONG
/ 8), 0);
30 const int end
= min_t(int, byte
+ (BITS_PER_LONG
/ 8), size
);
33 pr_err("BUG: mempool element poison mismatch\n");
34 pr_err("Mempool %p size %zu\n", pool
, size
);
35 pr_err(" nr=%d @ %p: %s0x", nr
, element
, start
> 0 ? "... " : "");
36 for (i
= start
; i
< end
; i
++)
37 pr_cont("%x ", *(u8
*)(element
+ i
));
38 pr_cont("%s\n", end
< size
? "..." : "");
42 static void __check_element(mempool_t
*pool
, void *element
, size_t size
)
47 for (i
= 0; i
< size
; i
++) {
48 u8 exp
= (i
< size
- 1) ? POISON_FREE
: POISON_END
;
51 poison_error(pool
, element
, size
, i
);
55 memset(obj
, POISON_INUSE
, size
);
58 static void check_element(mempool_t
*pool
, void *element
)
60 /* Mempools backed by slab allocator */
61 if (pool
->free
== mempool_free_slab
|| pool
->free
== mempool_kfree
)
62 __check_element(pool
, element
, ksize(element
));
64 /* Mempools backed by page allocator */
65 if (pool
->free
== mempool_free_pages
) {
66 int order
= (int)(long)pool
->pool_data
;
67 void *addr
= kmap_atomic((struct page
*)element
);
69 __check_element(pool
, addr
, 1UL << (PAGE_SHIFT
+ order
));
74 static void __poison_element(void *element
, size_t size
)
78 memset(obj
, POISON_FREE
, size
- 1);
79 obj
[size
- 1] = POISON_END
;
82 static void poison_element(mempool_t
*pool
, void *element
)
84 /* Mempools backed by slab allocator */
85 if (pool
->alloc
== mempool_alloc_slab
|| pool
->alloc
== mempool_kmalloc
)
86 __poison_element(element
, ksize(element
));
88 /* Mempools backed by page allocator */
89 if (pool
->alloc
== mempool_alloc_pages
) {
90 int order
= (int)(long)pool
->pool_data
;
91 void *addr
= kmap_atomic((struct page
*)element
);
93 __poison_element(addr
, 1UL << (PAGE_SHIFT
+ order
));
97 #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
98 static inline void check_element(mempool_t
*pool
, void *element
)
101 static inline void poison_element(mempool_t
*pool
, void *element
)
104 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
106 static __always_inline
void kasan_poison_element(mempool_t
*pool
, void *element
)
108 if (pool
->alloc
== mempool_alloc_slab
|| pool
->alloc
== mempool_kmalloc
)
109 kasan_poison_kfree(element
, _RET_IP_
);
110 if (pool
->alloc
== mempool_alloc_pages
)
111 kasan_free_pages(element
, (unsigned long)pool
->pool_data
);
114 static void kasan_unpoison_element(mempool_t
*pool
, void *element
)
116 if (pool
->alloc
== mempool_alloc_slab
|| pool
->alloc
== mempool_kmalloc
)
117 kasan_unpoison_slab(element
);
118 if (pool
->alloc
== mempool_alloc_pages
)
119 kasan_alloc_pages(element
, (unsigned long)pool
->pool_data
);
122 static __always_inline
void add_element(mempool_t
*pool
, void *element
)
124 BUG_ON(pool
->curr_nr
>= pool
->min_nr
);
125 poison_element(pool
, element
);
126 kasan_poison_element(pool
, element
);
127 pool
->elements
[pool
->curr_nr
++] = element
;
130 static void *remove_element(mempool_t
*pool
)
132 void *element
= pool
->elements
[--pool
->curr_nr
];
134 BUG_ON(pool
->curr_nr
< 0);
135 kasan_unpoison_element(pool
, element
);
136 check_element(pool
, element
);
141 * mempool_exit - exit a mempool initialized with mempool_init()
142 * @pool: pointer to the memory pool which was initialized with
145 * Free all reserved elements in @pool and @pool itself. This function
146 * only sleeps if the free_fn() function sleeps.
148 * May be called on a zeroed but uninitialized mempool (i.e. allocated with
151 void mempool_exit(mempool_t
*pool
)
153 while (pool
->curr_nr
) {
154 void *element
= remove_element(pool
);
155 pool
->free(element
, pool
->pool_data
);
157 kfree(pool
->elements
);
158 pool
->elements
= NULL
;
160 EXPORT_SYMBOL(mempool_exit
);
163 * mempool_destroy - deallocate a memory pool
164 * @pool: pointer to the memory pool which was allocated via
167 * Free all reserved elements in @pool and @pool itself. This function
168 * only sleeps if the free_fn() function sleeps.
170 void mempool_destroy(mempool_t
*pool
)
178 EXPORT_SYMBOL(mempool_destroy
);
180 int mempool_init_node(mempool_t
*pool
, int min_nr
, mempool_alloc_t
*alloc_fn
,
181 mempool_free_t
*free_fn
, void *pool_data
,
182 gfp_t gfp_mask
, int node_id
)
184 spin_lock_init(&pool
->lock
);
185 pool
->min_nr
= min_nr
;
186 pool
->pool_data
= pool_data
;
187 pool
->alloc
= alloc_fn
;
188 pool
->free
= free_fn
;
189 init_waitqueue_head(&pool
->wait
);
191 pool
->elements
= kmalloc_array_node(min_nr
, sizeof(void *),
197 * First pre-allocate the guaranteed number of buffers.
199 while (pool
->curr_nr
< pool
->min_nr
) {
202 element
= pool
->alloc(gfp_mask
, pool
->pool_data
);
203 if (unlikely(!element
)) {
207 add_element(pool
, element
);
212 EXPORT_SYMBOL(mempool_init_node
);
215 * mempool_init - initialize a memory pool
216 * @pool: pointer to the memory pool that should be initialized
217 * @min_nr: the minimum number of elements guaranteed to be
218 * allocated for this pool.
219 * @alloc_fn: user-defined element-allocation function.
220 * @free_fn: user-defined element-freeing function.
221 * @pool_data: optional private data available to the user-defined functions.
223 * Like mempool_create(), but initializes the pool in (i.e. embedded in another
226 * Return: %0 on success, negative error code otherwise.
228 int mempool_init(mempool_t
*pool
, int min_nr
, mempool_alloc_t
*alloc_fn
,
229 mempool_free_t
*free_fn
, void *pool_data
)
231 return mempool_init_node(pool
, min_nr
, alloc_fn
, free_fn
,
232 pool_data
, GFP_KERNEL
, NUMA_NO_NODE
);
235 EXPORT_SYMBOL(mempool_init
);
238 * mempool_create - create a memory pool
239 * @min_nr: the minimum number of elements guaranteed to be
240 * allocated for this pool.
241 * @alloc_fn: user-defined element-allocation function.
242 * @free_fn: user-defined element-freeing function.
243 * @pool_data: optional private data available to the user-defined functions.
245 * this function creates and allocates a guaranteed size, preallocated
246 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
247 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
248 * functions might sleep - as long as the mempool_alloc() function is not called
251 * Return: pointer to the created memory pool object or %NULL on error.
253 mempool_t
*mempool_create(int min_nr
, mempool_alloc_t
*alloc_fn
,
254 mempool_free_t
*free_fn
, void *pool_data
)
256 return mempool_create_node(min_nr
,alloc_fn
,free_fn
, pool_data
,
257 GFP_KERNEL
, NUMA_NO_NODE
);
259 EXPORT_SYMBOL(mempool_create
);
261 mempool_t
*mempool_create_node(int min_nr
, mempool_alloc_t
*alloc_fn
,
262 mempool_free_t
*free_fn
, void *pool_data
,
263 gfp_t gfp_mask
, int node_id
)
267 pool
= kzalloc_node(sizeof(*pool
), gfp_mask
, node_id
);
271 if (mempool_init_node(pool
, min_nr
, alloc_fn
, free_fn
, pool_data
,
272 gfp_mask
, node_id
)) {
279 EXPORT_SYMBOL(mempool_create_node
);
282 * mempool_resize - resize an existing memory pool
283 * @pool: pointer to the memory pool which was allocated via
285 * @new_min_nr: the new minimum number of elements guaranteed to be
286 * allocated for this pool.
288 * This function shrinks/grows the pool. In the case of growing,
289 * it cannot be guaranteed that the pool will be grown to the new
290 * size immediately, but new mempool_free() calls will refill it.
291 * This function may sleep.
293 * Note, the caller must guarantee that no mempool_destroy is called
294 * while this function is running. mempool_alloc() & mempool_free()
295 * might be called (eg. from IRQ contexts) while this function executes.
297 * Return: %0 on success, negative error code otherwise.
299 int mempool_resize(mempool_t
*pool
, int new_min_nr
)
305 BUG_ON(new_min_nr
<= 0);
308 spin_lock_irqsave(&pool
->lock
, flags
);
309 if (new_min_nr
<= pool
->min_nr
) {
310 while (new_min_nr
< pool
->curr_nr
) {
311 element
= remove_element(pool
);
312 spin_unlock_irqrestore(&pool
->lock
, flags
);
313 pool
->free(element
, pool
->pool_data
);
314 spin_lock_irqsave(&pool
->lock
, flags
);
316 pool
->min_nr
= new_min_nr
;
319 spin_unlock_irqrestore(&pool
->lock
, flags
);
322 new_elements
= kmalloc_array(new_min_nr
, sizeof(*new_elements
),
327 spin_lock_irqsave(&pool
->lock
, flags
);
328 if (unlikely(new_min_nr
<= pool
->min_nr
)) {
329 /* Raced, other resize will do our work */
330 spin_unlock_irqrestore(&pool
->lock
, flags
);
334 memcpy(new_elements
, pool
->elements
,
335 pool
->curr_nr
* sizeof(*new_elements
));
336 kfree(pool
->elements
);
337 pool
->elements
= new_elements
;
338 pool
->min_nr
= new_min_nr
;
340 while (pool
->curr_nr
< pool
->min_nr
) {
341 spin_unlock_irqrestore(&pool
->lock
, flags
);
342 element
= pool
->alloc(GFP_KERNEL
, pool
->pool_data
);
345 spin_lock_irqsave(&pool
->lock
, flags
);
346 if (pool
->curr_nr
< pool
->min_nr
) {
347 add_element(pool
, element
);
349 spin_unlock_irqrestore(&pool
->lock
, flags
);
350 pool
->free(element
, pool
->pool_data
); /* Raced */
355 spin_unlock_irqrestore(&pool
->lock
, flags
);
359 EXPORT_SYMBOL(mempool_resize
);
362 * mempool_alloc - allocate an element from a specific memory pool
363 * @pool: pointer to the memory pool which was allocated via
365 * @gfp_mask: the usual allocation bitmask.
367 * this function only sleeps if the alloc_fn() function sleeps or
368 * returns NULL. Note that due to preallocation, this function
369 * *never* fails when called from process contexts. (it might
370 * fail if called from an IRQ context.)
371 * Note: using __GFP_ZERO is not supported.
373 * Return: pointer to the allocated element or %NULL on error.
375 void *mempool_alloc(mempool_t
*pool
, gfp_t gfp_mask
)
379 wait_queue_entry_t wait
;
382 VM_WARN_ON_ONCE(gfp_mask
& __GFP_ZERO
);
383 might_sleep_if(gfp_mask
& __GFP_DIRECT_RECLAIM
);
385 gfp_mask
|= __GFP_NOMEMALLOC
; /* don't allocate emergency reserves */
386 gfp_mask
|= __GFP_NORETRY
; /* don't loop in __alloc_pages */
387 gfp_mask
|= __GFP_NOWARN
; /* failures are OK */
389 gfp_temp
= gfp_mask
& ~(__GFP_DIRECT_RECLAIM
|__GFP_IO
);
393 element
= pool
->alloc(gfp_temp
, pool
->pool_data
);
394 if (likely(element
!= NULL
))
397 spin_lock_irqsave(&pool
->lock
, flags
);
398 if (likely(pool
->curr_nr
)) {
399 element
= remove_element(pool
);
400 spin_unlock_irqrestore(&pool
->lock
, flags
);
401 /* paired with rmb in mempool_free(), read comment there */
404 * Update the allocation stack trace as this is more useful
407 kmemleak_update_trace(element
);
412 * We use gfp mask w/o direct reclaim or IO for the first round. If
413 * alloc failed with that and @pool was empty, retry immediately.
415 if (gfp_temp
!= gfp_mask
) {
416 spin_unlock_irqrestore(&pool
->lock
, flags
);
421 /* We must not sleep if !__GFP_DIRECT_RECLAIM */
422 if (!(gfp_mask
& __GFP_DIRECT_RECLAIM
)) {
423 spin_unlock_irqrestore(&pool
->lock
, flags
);
427 /* Let's wait for someone else to return an element to @pool */
429 prepare_to_wait(&pool
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
431 spin_unlock_irqrestore(&pool
->lock
, flags
);
434 * FIXME: this should be io_schedule(). The timeout is there as a
435 * workaround for some DM problems in 2.6.18.
437 io_schedule_timeout(5*HZ
);
439 finish_wait(&pool
->wait
, &wait
);
442 EXPORT_SYMBOL(mempool_alloc
);
445 * mempool_free - return an element to the pool.
446 * @element: pool element pointer.
447 * @pool: pointer to the memory pool which was allocated via
450 * this function only sleeps if the free_fn() function sleeps.
452 void mempool_free(void *element
, mempool_t
*pool
)
456 if (unlikely(element
== NULL
))
460 * Paired with the wmb in mempool_alloc(). The preceding read is
461 * for @element and the following @pool->curr_nr. This ensures
462 * that the visible value of @pool->curr_nr is from after the
463 * allocation of @element. This is necessary for fringe cases
464 * where @element was passed to this task without going through
467 * For example, assume @p is %NULL at the beginning and one task
468 * performs "p = mempool_alloc(...);" while another task is doing
469 * "while (!p) cpu_relax(); mempool_free(p, ...);". This function
470 * may end up using curr_nr value which is from before allocation
471 * of @p without the following rmb.
476 * For correctness, we need a test which is guaranteed to trigger
477 * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
478 * without locking achieves that and refilling as soon as possible
481 * Because curr_nr visible here is always a value after the
482 * allocation of @element, any task which decremented curr_nr below
483 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
484 * incremented to min_nr afterwards. If curr_nr gets incremented
485 * to min_nr after the allocation of @element, the elements
486 * allocated after that are subject to the same guarantee.
488 * Waiters happen iff curr_nr is 0 and the above guarantee also
489 * ensures that there will be frees which return elements to the
490 * pool waking up the waiters.
492 if (unlikely(pool
->curr_nr
< pool
->min_nr
)) {
493 spin_lock_irqsave(&pool
->lock
, flags
);
494 if (likely(pool
->curr_nr
< pool
->min_nr
)) {
495 add_element(pool
, element
);
496 spin_unlock_irqrestore(&pool
->lock
, flags
);
497 wake_up(&pool
->wait
);
500 spin_unlock_irqrestore(&pool
->lock
, flags
);
502 pool
->free(element
, pool
->pool_data
);
504 EXPORT_SYMBOL(mempool_free
);
507 * A commonly used alloc and free fn.
509 void *mempool_alloc_slab(gfp_t gfp_mask
, void *pool_data
)
511 struct kmem_cache
*mem
= pool_data
;
512 VM_BUG_ON(mem
->ctor
);
513 return kmem_cache_alloc(mem
, gfp_mask
);
515 EXPORT_SYMBOL(mempool_alloc_slab
);
517 void mempool_free_slab(void *element
, void *pool_data
)
519 struct kmem_cache
*mem
= pool_data
;
520 kmem_cache_free(mem
, element
);
522 EXPORT_SYMBOL(mempool_free_slab
);
525 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
526 * specified by pool_data
528 void *mempool_kmalloc(gfp_t gfp_mask
, void *pool_data
)
530 size_t size
= (size_t)pool_data
;
531 return kmalloc(size
, gfp_mask
);
533 EXPORT_SYMBOL(mempool_kmalloc
);
535 void mempool_kfree(void *element
, void *pool_data
)
539 EXPORT_SYMBOL(mempool_kfree
);
542 * A simple mempool-backed page allocator that allocates pages
543 * of the order specified by pool_data.
545 void *mempool_alloc_pages(gfp_t gfp_mask
, void *pool_data
)
547 int order
= (int)(long)pool_data
;
548 return alloc_pages(gfp_mask
, order
);
550 EXPORT_SYMBOL(mempool_alloc_pages
);
552 void mempool_free_pages(void *element
, void *pool_data
)
554 int order
= (int)(long)pool_data
;
555 __free_pages(element
, order
);
557 EXPORT_SYMBOL(mempool_free_pages
);