1 // SPDX-License-Identifier: GPL-2.0
5 * memory buffer pool support. Such pools are mostly used
6 * for guaranteed, deadlock-free memory allocations during
9 * started by Ingo Molnar, Copyright (C) 2001
10 * debugging by David Rientjes, Copyright (C) 2015
14 #include <linux/slab.h>
15 #include <linux/highmem.h>
16 #include <linux/kasan.h>
17 #include <linux/kmemleak.h>
18 #include <linux/export.h>
19 #include <linux/mempool.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
24 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
25 static void poison_error(mempool_t
*pool
, void *element
, size_t size
,
28 const int nr
= pool
->curr_nr
;
29 const int start
= max_t(int, byte
- (BITS_PER_LONG
/ 8), 0);
30 const int end
= min_t(int, byte
+ (BITS_PER_LONG
/ 8), size
);
33 pr_err("BUG: mempool element poison mismatch\n");
34 pr_err("Mempool %p size %zu\n", pool
, size
);
35 pr_err(" nr=%d @ %p: %s0x", nr
, element
, start
> 0 ? "... " : "");
36 for (i
= start
; i
< end
; i
++)
37 pr_cont("%x ", *(u8
*)(element
+ i
));
38 pr_cont("%s\n", end
< size
? "..." : "");
42 static void __check_element(mempool_t
*pool
, void *element
, size_t size
)
47 for (i
= 0; i
< size
; i
++) {
48 u8 exp
= (i
< size
- 1) ? POISON_FREE
: POISON_END
;
51 poison_error(pool
, element
, size
, i
);
55 memset(obj
, POISON_INUSE
, size
);
58 static void check_element(mempool_t
*pool
, void *element
)
60 /* Mempools backed by slab allocator */
61 if (pool
->free
== mempool_free_slab
|| pool
->free
== mempool_kfree
) {
62 __check_element(pool
, element
, ksize(element
));
63 } else if (pool
->free
== mempool_free_pages
) {
64 /* Mempools backed by page allocator */
65 int order
= (int)(long)pool
->pool_data
;
66 void *addr
= kmap_atomic((struct page
*)element
);
68 __check_element(pool
, addr
, 1UL << (PAGE_SHIFT
+ order
));
73 static void __poison_element(void *element
, size_t size
)
77 memset(obj
, POISON_FREE
, size
- 1);
78 obj
[size
- 1] = POISON_END
;
81 static void poison_element(mempool_t
*pool
, void *element
)
83 /* Mempools backed by slab allocator */
84 if (pool
->alloc
== mempool_alloc_slab
|| pool
->alloc
== mempool_kmalloc
) {
85 __poison_element(element
, ksize(element
));
86 } else if (pool
->alloc
== mempool_alloc_pages
) {
87 /* Mempools backed by page allocator */
88 int order
= (int)(long)pool
->pool_data
;
89 void *addr
= kmap_atomic((struct page
*)element
);
91 __poison_element(addr
, 1UL << (PAGE_SHIFT
+ order
));
95 #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
96 static inline void check_element(mempool_t
*pool
, void *element
)
99 static inline void poison_element(mempool_t
*pool
, void *element
)
102 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
104 static __always_inline
void kasan_poison_element(mempool_t
*pool
, void *element
)
106 if (pool
->alloc
== mempool_alloc_slab
|| pool
->alloc
== mempool_kmalloc
)
107 kasan_slab_free_mempool(element
, _RET_IP_
);
108 else if (pool
->alloc
== mempool_alloc_pages
)
109 kasan_free_pages(element
, (unsigned long)pool
->pool_data
);
112 static void kasan_unpoison_element(mempool_t
*pool
, void *element
)
114 if (pool
->alloc
== mempool_alloc_slab
|| pool
->alloc
== mempool_kmalloc
)
115 kasan_unpoison_range(element
, __ksize(element
));
116 else if (pool
->alloc
== mempool_alloc_pages
)
117 kasan_alloc_pages(element
, (unsigned long)pool
->pool_data
);
120 static __always_inline
void add_element(mempool_t
*pool
, void *element
)
122 BUG_ON(pool
->curr_nr
>= pool
->min_nr
);
123 poison_element(pool
, element
);
124 kasan_poison_element(pool
, element
);
125 pool
->elements
[pool
->curr_nr
++] = element
;
128 static void *remove_element(mempool_t
*pool
)
130 void *element
= pool
->elements
[--pool
->curr_nr
];
132 BUG_ON(pool
->curr_nr
< 0);
133 kasan_unpoison_element(pool
, element
);
134 check_element(pool
, element
);
139 * mempool_exit - exit a mempool initialized with mempool_init()
140 * @pool: pointer to the memory pool which was initialized with
143 * Free all reserved elements in @pool and @pool itself. This function
144 * only sleeps if the free_fn() function sleeps.
146 * May be called on a zeroed but uninitialized mempool (i.e. allocated with
149 void mempool_exit(mempool_t
*pool
)
151 while (pool
->curr_nr
) {
152 void *element
= remove_element(pool
);
153 pool
->free(element
, pool
->pool_data
);
155 kfree(pool
->elements
);
156 pool
->elements
= NULL
;
158 EXPORT_SYMBOL(mempool_exit
);
161 * mempool_destroy - deallocate a memory pool
162 * @pool: pointer to the memory pool which was allocated via
165 * Free all reserved elements in @pool and @pool itself. This function
166 * only sleeps if the free_fn() function sleeps.
168 void mempool_destroy(mempool_t
*pool
)
176 EXPORT_SYMBOL(mempool_destroy
);
178 int mempool_init_node(mempool_t
*pool
, int min_nr
, mempool_alloc_t
*alloc_fn
,
179 mempool_free_t
*free_fn
, void *pool_data
,
180 gfp_t gfp_mask
, int node_id
)
182 spin_lock_init(&pool
->lock
);
183 pool
->min_nr
= min_nr
;
184 pool
->pool_data
= pool_data
;
185 pool
->alloc
= alloc_fn
;
186 pool
->free
= free_fn
;
187 init_waitqueue_head(&pool
->wait
);
189 pool
->elements
= kmalloc_array_node(min_nr
, sizeof(void *),
195 * First pre-allocate the guaranteed number of buffers.
197 while (pool
->curr_nr
< pool
->min_nr
) {
200 element
= pool
->alloc(gfp_mask
, pool
->pool_data
);
201 if (unlikely(!element
)) {
205 add_element(pool
, element
);
210 EXPORT_SYMBOL(mempool_init_node
);
213 * mempool_init - initialize a memory pool
214 * @pool: pointer to the memory pool that should be initialized
215 * @min_nr: the minimum number of elements guaranteed to be
216 * allocated for this pool.
217 * @alloc_fn: user-defined element-allocation function.
218 * @free_fn: user-defined element-freeing function.
219 * @pool_data: optional private data available to the user-defined functions.
221 * Like mempool_create(), but initializes the pool in (i.e. embedded in another
224 * Return: %0 on success, negative error code otherwise.
226 int mempool_init(mempool_t
*pool
, int min_nr
, mempool_alloc_t
*alloc_fn
,
227 mempool_free_t
*free_fn
, void *pool_data
)
229 return mempool_init_node(pool
, min_nr
, alloc_fn
, free_fn
,
230 pool_data
, GFP_KERNEL
, NUMA_NO_NODE
);
233 EXPORT_SYMBOL(mempool_init
);
236 * mempool_create - create a memory pool
237 * @min_nr: the minimum number of elements guaranteed to be
238 * allocated for this pool.
239 * @alloc_fn: user-defined element-allocation function.
240 * @free_fn: user-defined element-freeing function.
241 * @pool_data: optional private data available to the user-defined functions.
243 * this function creates and allocates a guaranteed size, preallocated
244 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
245 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
246 * functions might sleep - as long as the mempool_alloc() function is not called
249 * Return: pointer to the created memory pool object or %NULL on error.
251 mempool_t
*mempool_create(int min_nr
, mempool_alloc_t
*alloc_fn
,
252 mempool_free_t
*free_fn
, void *pool_data
)
254 return mempool_create_node(min_nr
,alloc_fn
,free_fn
, pool_data
,
255 GFP_KERNEL
, NUMA_NO_NODE
);
257 EXPORT_SYMBOL(mempool_create
);
259 mempool_t
*mempool_create_node(int min_nr
, mempool_alloc_t
*alloc_fn
,
260 mempool_free_t
*free_fn
, void *pool_data
,
261 gfp_t gfp_mask
, int node_id
)
265 pool
= kzalloc_node(sizeof(*pool
), gfp_mask
, node_id
);
269 if (mempool_init_node(pool
, min_nr
, alloc_fn
, free_fn
, pool_data
,
270 gfp_mask
, node_id
)) {
277 EXPORT_SYMBOL(mempool_create_node
);
280 * mempool_resize - resize an existing memory pool
281 * @pool: pointer to the memory pool which was allocated via
283 * @new_min_nr: the new minimum number of elements guaranteed to be
284 * allocated for this pool.
286 * This function shrinks/grows the pool. In the case of growing,
287 * it cannot be guaranteed that the pool will be grown to the new
288 * size immediately, but new mempool_free() calls will refill it.
289 * This function may sleep.
291 * Note, the caller must guarantee that no mempool_destroy is called
292 * while this function is running. mempool_alloc() & mempool_free()
293 * might be called (eg. from IRQ contexts) while this function executes.
295 * Return: %0 on success, negative error code otherwise.
297 int mempool_resize(mempool_t
*pool
, int new_min_nr
)
303 BUG_ON(new_min_nr
<= 0);
306 spin_lock_irqsave(&pool
->lock
, flags
);
307 if (new_min_nr
<= pool
->min_nr
) {
308 while (new_min_nr
< pool
->curr_nr
) {
309 element
= remove_element(pool
);
310 spin_unlock_irqrestore(&pool
->lock
, flags
);
311 pool
->free(element
, pool
->pool_data
);
312 spin_lock_irqsave(&pool
->lock
, flags
);
314 pool
->min_nr
= new_min_nr
;
317 spin_unlock_irqrestore(&pool
->lock
, flags
);
320 new_elements
= kmalloc_array(new_min_nr
, sizeof(*new_elements
),
325 spin_lock_irqsave(&pool
->lock
, flags
);
326 if (unlikely(new_min_nr
<= pool
->min_nr
)) {
327 /* Raced, other resize will do our work */
328 spin_unlock_irqrestore(&pool
->lock
, flags
);
332 memcpy(new_elements
, pool
->elements
,
333 pool
->curr_nr
* sizeof(*new_elements
));
334 kfree(pool
->elements
);
335 pool
->elements
= new_elements
;
336 pool
->min_nr
= new_min_nr
;
338 while (pool
->curr_nr
< pool
->min_nr
) {
339 spin_unlock_irqrestore(&pool
->lock
, flags
);
340 element
= pool
->alloc(GFP_KERNEL
, pool
->pool_data
);
343 spin_lock_irqsave(&pool
->lock
, flags
);
344 if (pool
->curr_nr
< pool
->min_nr
) {
345 add_element(pool
, element
);
347 spin_unlock_irqrestore(&pool
->lock
, flags
);
348 pool
->free(element
, pool
->pool_data
); /* Raced */
353 spin_unlock_irqrestore(&pool
->lock
, flags
);
357 EXPORT_SYMBOL(mempool_resize
);
360 * mempool_alloc - allocate an element from a specific memory pool
361 * @pool: pointer to the memory pool which was allocated via
363 * @gfp_mask: the usual allocation bitmask.
365 * this function only sleeps if the alloc_fn() function sleeps or
366 * returns NULL. Note that due to preallocation, this function
367 * *never* fails when called from process contexts. (it might
368 * fail if called from an IRQ context.)
369 * Note: using __GFP_ZERO is not supported.
371 * Return: pointer to the allocated element or %NULL on error.
373 void *mempool_alloc(mempool_t
*pool
, gfp_t gfp_mask
)
377 wait_queue_entry_t wait
;
380 VM_WARN_ON_ONCE(gfp_mask
& __GFP_ZERO
);
381 might_sleep_if(gfp_mask
& __GFP_DIRECT_RECLAIM
);
383 gfp_mask
|= __GFP_NOMEMALLOC
; /* don't allocate emergency reserves */
384 gfp_mask
|= __GFP_NORETRY
; /* don't loop in __alloc_pages */
385 gfp_mask
|= __GFP_NOWARN
; /* failures are OK */
387 gfp_temp
= gfp_mask
& ~(__GFP_DIRECT_RECLAIM
|__GFP_IO
);
391 element
= pool
->alloc(gfp_temp
, pool
->pool_data
);
392 if (likely(element
!= NULL
))
395 spin_lock_irqsave(&pool
->lock
, flags
);
396 if (likely(pool
->curr_nr
)) {
397 element
= remove_element(pool
);
398 spin_unlock_irqrestore(&pool
->lock
, flags
);
399 /* paired with rmb in mempool_free(), read comment there */
402 * Update the allocation stack trace as this is more useful
405 kmemleak_update_trace(element
);
410 * We use gfp mask w/o direct reclaim or IO for the first round. If
411 * alloc failed with that and @pool was empty, retry immediately.
413 if (gfp_temp
!= gfp_mask
) {
414 spin_unlock_irqrestore(&pool
->lock
, flags
);
419 /* We must not sleep if !__GFP_DIRECT_RECLAIM */
420 if (!(gfp_mask
& __GFP_DIRECT_RECLAIM
)) {
421 spin_unlock_irqrestore(&pool
->lock
, flags
);
425 /* Let's wait for someone else to return an element to @pool */
427 prepare_to_wait(&pool
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
429 spin_unlock_irqrestore(&pool
->lock
, flags
);
432 * FIXME: this should be io_schedule(). The timeout is there as a
433 * workaround for some DM problems in 2.6.18.
435 io_schedule_timeout(5*HZ
);
437 finish_wait(&pool
->wait
, &wait
);
440 EXPORT_SYMBOL(mempool_alloc
);
443 * mempool_free - return an element to the pool.
444 * @element: pool element pointer.
445 * @pool: pointer to the memory pool which was allocated via
448 * this function only sleeps if the free_fn() function sleeps.
450 void mempool_free(void *element
, mempool_t
*pool
)
454 if (unlikely(element
== NULL
))
458 * Paired with the wmb in mempool_alloc(). The preceding read is
459 * for @element and the following @pool->curr_nr. This ensures
460 * that the visible value of @pool->curr_nr is from after the
461 * allocation of @element. This is necessary for fringe cases
462 * where @element was passed to this task without going through
465 * For example, assume @p is %NULL at the beginning and one task
466 * performs "p = mempool_alloc(...);" while another task is doing
467 * "while (!p) cpu_relax(); mempool_free(p, ...);". This function
468 * may end up using curr_nr value which is from before allocation
469 * of @p without the following rmb.
474 * For correctness, we need a test which is guaranteed to trigger
475 * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
476 * without locking achieves that and refilling as soon as possible
479 * Because curr_nr visible here is always a value after the
480 * allocation of @element, any task which decremented curr_nr below
481 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
482 * incremented to min_nr afterwards. If curr_nr gets incremented
483 * to min_nr after the allocation of @element, the elements
484 * allocated after that are subject to the same guarantee.
486 * Waiters happen iff curr_nr is 0 and the above guarantee also
487 * ensures that there will be frees which return elements to the
488 * pool waking up the waiters.
490 if (unlikely(READ_ONCE(pool
->curr_nr
) < pool
->min_nr
)) {
491 spin_lock_irqsave(&pool
->lock
, flags
);
492 if (likely(pool
->curr_nr
< pool
->min_nr
)) {
493 add_element(pool
, element
);
494 spin_unlock_irqrestore(&pool
->lock
, flags
);
495 wake_up(&pool
->wait
);
498 spin_unlock_irqrestore(&pool
->lock
, flags
);
500 pool
->free(element
, pool
->pool_data
);
502 EXPORT_SYMBOL(mempool_free
);
505 * A commonly used alloc and free fn.
507 void *mempool_alloc_slab(gfp_t gfp_mask
, void *pool_data
)
509 struct kmem_cache
*mem
= pool_data
;
510 VM_BUG_ON(mem
->ctor
);
511 return kmem_cache_alloc(mem
, gfp_mask
);
513 EXPORT_SYMBOL(mempool_alloc_slab
);
515 void mempool_free_slab(void *element
, void *pool_data
)
517 struct kmem_cache
*mem
= pool_data
;
518 kmem_cache_free(mem
, element
);
520 EXPORT_SYMBOL(mempool_free_slab
);
523 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
524 * specified by pool_data
526 void *mempool_kmalloc(gfp_t gfp_mask
, void *pool_data
)
528 size_t size
= (size_t)pool_data
;
529 return kmalloc(size
, gfp_mask
);
531 EXPORT_SYMBOL(mempool_kmalloc
);
533 void mempool_kfree(void *element
, void *pool_data
)
537 EXPORT_SYMBOL(mempool_kfree
);
540 * A simple mempool-backed page allocator that allocates pages
541 * of the order specified by pool_data.
543 void *mempool_alloc_pages(gfp_t gfp_mask
, void *pool_data
)
545 int order
= (int)(long)pool_data
;
546 return alloc_pages(gfp_mask
, order
);
548 EXPORT_SYMBOL(mempool_alloc_pages
);
550 void mempool_free_pages(void *element
, void *pool_data
)
552 int order
= (int)(long)pool_data
;
553 __free_pages(element
, order
);
555 EXPORT_SYMBOL(mempool_free_pages
);