4 * memory buffer pool support. Such pools are mostly used
5 * for guaranteed, deadlock-free memory allocations during
8 * started by Ingo Molnar, Copyright (C) 2001
9 * debugging by David Rientjes, Copyright (C) 2015
13 #include <linux/slab.h>
14 #include <linux/highmem.h>
15 #include <linux/kasan.h>
16 #include <linux/kmemleak.h>
17 #include <linux/export.h>
18 #include <linux/mempool.h>
19 #include <linux/blkdev.h>
20 #include <linux/writeback.h>
23 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
24 static void poison_error(mempool_t
*pool
, void *element
, size_t size
,
27 const int nr
= pool
->curr_nr
;
28 const int start
= max_t(int, byte
- (BITS_PER_LONG
/ 8), 0);
29 const int end
= min_t(int, byte
+ (BITS_PER_LONG
/ 8), size
);
32 pr_err("BUG: mempool element poison mismatch\n");
33 pr_err("Mempool %p size %zu\n", pool
, size
);
34 pr_err(" nr=%d @ %p: %s0x", nr
, element
, start
> 0 ? "... " : "");
35 for (i
= start
; i
< end
; i
++)
36 pr_cont("%x ", *(u8
*)(element
+ i
));
37 pr_cont("%s\n", end
< size
? "..." : "");
41 static void __check_element(mempool_t
*pool
, void *element
, size_t size
)
46 for (i
= 0; i
< size
; i
++) {
47 u8 exp
= (i
< size
- 1) ? POISON_FREE
: POISON_END
;
50 poison_error(pool
, element
, size
, i
);
54 memset(obj
, POISON_INUSE
, size
);
57 static void check_element(mempool_t
*pool
, void *element
)
59 /* Mempools backed by slab allocator */
60 if (pool
->free
== mempool_free_slab
|| pool
->free
== mempool_kfree
)
61 __check_element(pool
, element
, ksize(element
));
63 /* Mempools backed by page allocator */
64 if (pool
->free
== mempool_free_pages
) {
65 int order
= (int)(long)pool
->pool_data
;
66 void *addr
= kmap_atomic((struct page
*)element
);
68 __check_element(pool
, addr
, 1UL << (PAGE_SHIFT
+ order
));
73 static void __poison_element(void *element
, size_t size
)
77 memset(obj
, POISON_FREE
, size
- 1);
78 obj
[size
- 1] = POISON_END
;
81 static void poison_element(mempool_t
*pool
, void *element
)
83 /* Mempools backed by slab allocator */
84 if (pool
->alloc
== mempool_alloc_slab
|| pool
->alloc
== mempool_kmalloc
)
85 __poison_element(element
, ksize(element
));
87 /* Mempools backed by page allocator */
88 if (pool
->alloc
== mempool_alloc_pages
) {
89 int order
= (int)(long)pool
->pool_data
;
90 void *addr
= kmap_atomic((struct page
*)element
);
92 __poison_element(addr
, 1UL << (PAGE_SHIFT
+ order
));
96 #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
97 static inline void check_element(mempool_t
*pool
, void *element
)
100 static inline void poison_element(mempool_t
*pool
, void *element
)
103 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
105 static void kasan_poison_element(mempool_t
*pool
, void *element
)
107 if (pool
->alloc
== mempool_alloc_slab
)
108 kasan_slab_free(pool
->pool_data
, element
);
109 if (pool
->alloc
== mempool_kmalloc
)
110 kasan_kfree(element
);
111 if (pool
->alloc
== mempool_alloc_pages
)
112 kasan_free_pages(element
, (unsigned long)pool
->pool_data
);
115 static void kasan_unpoison_element(mempool_t
*pool
, void *element
)
117 if (pool
->alloc
== mempool_alloc_slab
)
118 kasan_slab_alloc(pool
->pool_data
, element
);
119 if (pool
->alloc
== mempool_kmalloc
)
120 kasan_krealloc(element
, (size_t)pool
->pool_data
);
121 if (pool
->alloc
== mempool_alloc_pages
)
122 kasan_alloc_pages(element
, (unsigned long)pool
->pool_data
);
125 static void add_element(mempool_t
*pool
, void *element
)
127 BUG_ON(pool
->curr_nr
>= pool
->min_nr
);
128 poison_element(pool
, element
);
129 kasan_poison_element(pool
, element
);
130 pool
->elements
[pool
->curr_nr
++] = element
;
133 static void *remove_element(mempool_t
*pool
)
135 void *element
= pool
->elements
[--pool
->curr_nr
];
137 BUG_ON(pool
->curr_nr
< 0);
138 check_element(pool
, element
);
139 kasan_unpoison_element(pool
, element
);
144 * mempool_destroy - deallocate a memory pool
145 * @pool: pointer to the memory pool which was allocated via
148 * Free all reserved elements in @pool and @pool itself. This function
149 * only sleeps if the free_fn() function sleeps.
151 void mempool_destroy(mempool_t
*pool
)
156 while (pool
->curr_nr
) {
157 void *element
= remove_element(pool
);
158 pool
->free(element
, pool
->pool_data
);
160 kfree(pool
->elements
);
163 EXPORT_SYMBOL(mempool_destroy
);
166 * mempool_create - create a memory pool
167 * @min_nr: the minimum number of elements guaranteed to be
168 * allocated for this pool.
169 * @alloc_fn: user-defined element-allocation function.
170 * @free_fn: user-defined element-freeing function.
171 * @pool_data: optional private data available to the user-defined functions.
173 * this function creates and allocates a guaranteed size, preallocated
174 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
175 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
176 * functions might sleep - as long as the mempool_alloc() function is not called
179 mempool_t
*mempool_create(int min_nr
, mempool_alloc_t
*alloc_fn
,
180 mempool_free_t
*free_fn
, void *pool_data
)
182 return mempool_create_node(min_nr
,alloc_fn
,free_fn
, pool_data
,
183 GFP_KERNEL
, NUMA_NO_NODE
);
185 EXPORT_SYMBOL(mempool_create
);
187 mempool_t
*mempool_create_node(int min_nr
, mempool_alloc_t
*alloc_fn
,
188 mempool_free_t
*free_fn
, void *pool_data
,
189 gfp_t gfp_mask
, int node_id
)
192 pool
= kzalloc_node(sizeof(*pool
), gfp_mask
, node_id
);
195 pool
->elements
= kmalloc_node(min_nr
* sizeof(void *),
197 if (!pool
->elements
) {
201 spin_lock_init(&pool
->lock
);
202 pool
->min_nr
= min_nr
;
203 pool
->pool_data
= pool_data
;
204 init_waitqueue_head(&pool
->wait
);
205 pool
->alloc
= alloc_fn
;
206 pool
->free
= free_fn
;
209 * First pre-allocate the guaranteed number of buffers.
211 while (pool
->curr_nr
< pool
->min_nr
) {
214 element
= pool
->alloc(gfp_mask
, pool
->pool_data
);
215 if (unlikely(!element
)) {
216 mempool_destroy(pool
);
219 add_element(pool
, element
);
223 EXPORT_SYMBOL(mempool_create_node
);
226 * mempool_resize - resize an existing memory pool
227 * @pool: pointer to the memory pool which was allocated via
229 * @new_min_nr: the new minimum number of elements guaranteed to be
230 * allocated for this pool.
232 * This function shrinks/grows the pool. In the case of growing,
233 * it cannot be guaranteed that the pool will be grown to the new
234 * size immediately, but new mempool_free() calls will refill it.
235 * This function may sleep.
237 * Note, the caller must guarantee that no mempool_destroy is called
238 * while this function is running. mempool_alloc() & mempool_free()
239 * might be called (eg. from IRQ contexts) while this function executes.
241 int mempool_resize(mempool_t
*pool
, int new_min_nr
)
247 BUG_ON(new_min_nr
<= 0);
250 spin_lock_irqsave(&pool
->lock
, flags
);
251 if (new_min_nr
<= pool
->min_nr
) {
252 while (new_min_nr
< pool
->curr_nr
) {
253 element
= remove_element(pool
);
254 spin_unlock_irqrestore(&pool
->lock
, flags
);
255 pool
->free(element
, pool
->pool_data
);
256 spin_lock_irqsave(&pool
->lock
, flags
);
258 pool
->min_nr
= new_min_nr
;
261 spin_unlock_irqrestore(&pool
->lock
, flags
);
264 new_elements
= kmalloc_array(new_min_nr
, sizeof(*new_elements
),
269 spin_lock_irqsave(&pool
->lock
, flags
);
270 if (unlikely(new_min_nr
<= pool
->min_nr
)) {
271 /* Raced, other resize will do our work */
272 spin_unlock_irqrestore(&pool
->lock
, flags
);
276 memcpy(new_elements
, pool
->elements
,
277 pool
->curr_nr
* sizeof(*new_elements
));
278 kfree(pool
->elements
);
279 pool
->elements
= new_elements
;
280 pool
->min_nr
= new_min_nr
;
282 while (pool
->curr_nr
< pool
->min_nr
) {
283 spin_unlock_irqrestore(&pool
->lock
, flags
);
284 element
= pool
->alloc(GFP_KERNEL
, pool
->pool_data
);
287 spin_lock_irqsave(&pool
->lock
, flags
);
288 if (pool
->curr_nr
< pool
->min_nr
) {
289 add_element(pool
, element
);
291 spin_unlock_irqrestore(&pool
->lock
, flags
);
292 pool
->free(element
, pool
->pool_data
); /* Raced */
297 spin_unlock_irqrestore(&pool
->lock
, flags
);
301 EXPORT_SYMBOL(mempool_resize
);
304 * mempool_alloc - allocate an element from a specific memory pool
305 * @pool: pointer to the memory pool which was allocated via
307 * @gfp_mask: the usual allocation bitmask.
309 * this function only sleeps if the alloc_fn() function sleeps or
310 * returns NULL. Note that due to preallocation, this function
311 * *never* fails when called from process contexts. (it might
312 * fail if called from an IRQ context.)
313 * Note: using __GFP_ZERO is not supported.
315 void * mempool_alloc(mempool_t
*pool
, gfp_t gfp_mask
)
322 VM_WARN_ON_ONCE(gfp_mask
& __GFP_ZERO
);
323 might_sleep_if(gfp_mask
& __GFP_WAIT
);
325 gfp_mask
|= __GFP_NOMEMALLOC
; /* don't allocate emergency reserves */
326 gfp_mask
|= __GFP_NORETRY
; /* don't loop in __alloc_pages */
327 gfp_mask
|= __GFP_NOWARN
; /* failures are OK */
329 gfp_temp
= gfp_mask
& ~(__GFP_WAIT
|__GFP_IO
);
333 element
= pool
->alloc(gfp_temp
, pool
->pool_data
);
334 if (likely(element
!= NULL
))
337 spin_lock_irqsave(&pool
->lock
, flags
);
338 if (likely(pool
->curr_nr
)) {
339 element
= remove_element(pool
);
340 spin_unlock_irqrestore(&pool
->lock
, flags
);
341 /* paired with rmb in mempool_free(), read comment there */
344 * Update the allocation stack trace as this is more useful
347 kmemleak_update_trace(element
);
352 * We use gfp mask w/o __GFP_WAIT or IO for the first round. If
353 * alloc failed with that and @pool was empty, retry immediately.
355 if (gfp_temp
!= gfp_mask
) {
356 spin_unlock_irqrestore(&pool
->lock
, flags
);
361 /* We must not sleep if !__GFP_WAIT */
362 if (!(gfp_mask
& __GFP_WAIT
)) {
363 spin_unlock_irqrestore(&pool
->lock
, flags
);
367 /* Let's wait for someone else to return an element to @pool */
369 prepare_to_wait(&pool
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
371 spin_unlock_irqrestore(&pool
->lock
, flags
);
374 * FIXME: this should be io_schedule(). The timeout is there as a
375 * workaround for some DM problems in 2.6.18.
377 io_schedule_timeout(5*HZ
);
379 finish_wait(&pool
->wait
, &wait
);
382 EXPORT_SYMBOL(mempool_alloc
);
385 * mempool_free - return an element to the pool.
386 * @element: pool element pointer.
387 * @pool: pointer to the memory pool which was allocated via
390 * this function only sleeps if the free_fn() function sleeps.
392 void mempool_free(void *element
, mempool_t
*pool
)
396 if (unlikely(element
== NULL
))
400 * Paired with the wmb in mempool_alloc(). The preceding read is
401 * for @element and the following @pool->curr_nr. This ensures
402 * that the visible value of @pool->curr_nr is from after the
403 * allocation of @element. This is necessary for fringe cases
404 * where @element was passed to this task without going through
407 * For example, assume @p is %NULL at the beginning and one task
408 * performs "p = mempool_alloc(...);" while another task is doing
409 * "while (!p) cpu_relax(); mempool_free(p, ...);". This function
410 * may end up using curr_nr value which is from before allocation
411 * of @p without the following rmb.
416 * For correctness, we need a test which is guaranteed to trigger
417 * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
418 * without locking achieves that and refilling as soon as possible
421 * Because curr_nr visible here is always a value after the
422 * allocation of @element, any task which decremented curr_nr below
423 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
424 * incremented to min_nr afterwards. If curr_nr gets incremented
425 * to min_nr after the allocation of @element, the elements
426 * allocated after that are subject to the same guarantee.
428 * Waiters happen iff curr_nr is 0 and the above guarantee also
429 * ensures that there will be frees which return elements to the
430 * pool waking up the waiters.
432 if (unlikely(pool
->curr_nr
< pool
->min_nr
)) {
433 spin_lock_irqsave(&pool
->lock
, flags
);
434 if (likely(pool
->curr_nr
< pool
->min_nr
)) {
435 add_element(pool
, element
);
436 spin_unlock_irqrestore(&pool
->lock
, flags
);
437 wake_up(&pool
->wait
);
440 spin_unlock_irqrestore(&pool
->lock
, flags
);
442 pool
->free(element
, pool
->pool_data
);
444 EXPORT_SYMBOL(mempool_free
);
447 * A commonly used alloc and free fn.
449 void *mempool_alloc_slab(gfp_t gfp_mask
, void *pool_data
)
451 struct kmem_cache
*mem
= pool_data
;
452 VM_BUG_ON(mem
->ctor
);
453 return kmem_cache_alloc(mem
, gfp_mask
);
455 EXPORT_SYMBOL(mempool_alloc_slab
);
457 void mempool_free_slab(void *element
, void *pool_data
)
459 struct kmem_cache
*mem
= pool_data
;
460 kmem_cache_free(mem
, element
);
462 EXPORT_SYMBOL(mempool_free_slab
);
465 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
466 * specified by pool_data
468 void *mempool_kmalloc(gfp_t gfp_mask
, void *pool_data
)
470 size_t size
= (size_t)pool_data
;
471 return kmalloc(size
, gfp_mask
);
473 EXPORT_SYMBOL(mempool_kmalloc
);
475 void mempool_kfree(void *element
, void *pool_data
)
479 EXPORT_SYMBOL(mempool_kfree
);
482 * A simple mempool-backed page allocator that allocates pages
483 * of the order specified by pool_data.
485 void *mempool_alloc_pages(gfp_t gfp_mask
, void *pool_data
)
487 int order
= (int)(long)pool_data
;
488 return alloc_pages(gfp_mask
, order
);
490 EXPORT_SYMBOL(mempool_alloc_pages
);
492 void mempool_free_pages(void *element
, void *pool_data
)
494 int order
= (int)(long)pool_data
;
495 __free_pages(element
, order
);
497 EXPORT_SYMBOL(mempool_free_pages
);