Linux 4.19-rc7
[linux-2.6/btrfs-unstable.git] / mm / mempool.c
blob0ef8cc8d1602246a670d4e443ba7624bd2c99df0
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/mempool.c
5 * memory buffer pool support. Such pools are mostly used
6 * for guaranteed, deadlock-free memory allocations during
7 * extreme VM load.
9 * started by Ingo Molnar, Copyright (C) 2001
10 * debugging by David Rientjes, Copyright (C) 2015
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h>
16 #include <linux/kasan.h>
17 #include <linux/kmemleak.h>
18 #include <linux/export.h>
19 #include <linux/mempool.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include "slab.h"
24 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
25 static void poison_error(mempool_t *pool, void *element, size_t size,
26 size_t byte)
28 const int nr = pool->curr_nr;
29 const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
30 const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
31 int i;
33 pr_err("BUG: mempool element poison mismatch\n");
34 pr_err("Mempool %p size %zu\n", pool, size);
35 pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
36 for (i = start; i < end; i++)
37 pr_cont("%x ", *(u8 *)(element + i));
38 pr_cont("%s\n", end < size ? "..." : "");
39 dump_stack();
42 static void __check_element(mempool_t *pool, void *element, size_t size)
44 u8 *obj = element;
45 size_t i;
47 for (i = 0; i < size; i++) {
48 u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
50 if (obj[i] != exp) {
51 poison_error(pool, element, size, i);
52 return;
55 memset(obj, POISON_INUSE, size);
58 static void check_element(mempool_t *pool, void *element)
60 /* Mempools backed by slab allocator */
61 if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
62 __check_element(pool, element, ksize(element));
64 /* Mempools backed by page allocator */
65 if (pool->free == mempool_free_pages) {
66 int order = (int)(long)pool->pool_data;
67 void *addr = kmap_atomic((struct page *)element);
69 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
70 kunmap_atomic(addr);
74 static void __poison_element(void *element, size_t size)
76 u8 *obj = element;
78 memset(obj, POISON_FREE, size - 1);
79 obj[size - 1] = POISON_END;
82 static void poison_element(mempool_t *pool, void *element)
84 /* Mempools backed by slab allocator */
85 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
86 __poison_element(element, ksize(element));
88 /* Mempools backed by page allocator */
89 if (pool->alloc == mempool_alloc_pages) {
90 int order = (int)(long)pool->pool_data;
91 void *addr = kmap_atomic((struct page *)element);
93 __poison_element(addr, 1UL << (PAGE_SHIFT + order));
94 kunmap_atomic(addr);
97 #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
98 static inline void check_element(mempool_t *pool, void *element)
101 static inline void poison_element(mempool_t *pool, void *element)
104 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
106 static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
108 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
109 kasan_poison_kfree(element, _RET_IP_);
110 if (pool->alloc == mempool_alloc_pages)
111 kasan_free_pages(element, (unsigned long)pool->pool_data);
114 static void kasan_unpoison_element(mempool_t *pool, void *element)
116 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
117 kasan_unpoison_slab(element);
118 if (pool->alloc == mempool_alloc_pages)
119 kasan_alloc_pages(element, (unsigned long)pool->pool_data);
122 static __always_inline void add_element(mempool_t *pool, void *element)
124 BUG_ON(pool->curr_nr >= pool->min_nr);
125 poison_element(pool, element);
126 kasan_poison_element(pool, element);
127 pool->elements[pool->curr_nr++] = element;
130 static void *remove_element(mempool_t *pool)
132 void *element = pool->elements[--pool->curr_nr];
134 BUG_ON(pool->curr_nr < 0);
135 kasan_unpoison_element(pool, element);
136 check_element(pool, element);
137 return element;
141 * mempool_exit - exit a mempool initialized with mempool_init()
142 * @pool: pointer to the memory pool which was initialized with
143 * mempool_init().
145 * Free all reserved elements in @pool and @pool itself. This function
146 * only sleeps if the free_fn() function sleeps.
148 * May be called on a zeroed but uninitialized mempool (i.e. allocated with
149 * kzalloc()).
151 void mempool_exit(mempool_t *pool)
153 while (pool->curr_nr) {
154 void *element = remove_element(pool);
155 pool->free(element, pool->pool_data);
157 kfree(pool->elements);
158 pool->elements = NULL;
160 EXPORT_SYMBOL(mempool_exit);
163 * mempool_destroy - deallocate a memory pool
164 * @pool: pointer to the memory pool which was allocated via
165 * mempool_create().
167 * Free all reserved elements in @pool and @pool itself. This function
168 * only sleeps if the free_fn() function sleeps.
170 void mempool_destroy(mempool_t *pool)
172 if (unlikely(!pool))
173 return;
175 mempool_exit(pool);
176 kfree(pool);
178 EXPORT_SYMBOL(mempool_destroy);
180 int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
181 mempool_free_t *free_fn, void *pool_data,
182 gfp_t gfp_mask, int node_id)
184 spin_lock_init(&pool->lock);
185 pool->min_nr = min_nr;
186 pool->pool_data = pool_data;
187 pool->alloc = alloc_fn;
188 pool->free = free_fn;
189 init_waitqueue_head(&pool->wait);
191 pool->elements = kmalloc_array_node(min_nr, sizeof(void *),
192 gfp_mask, node_id);
193 if (!pool->elements)
194 return -ENOMEM;
197 * First pre-allocate the guaranteed number of buffers.
199 while (pool->curr_nr < pool->min_nr) {
200 void *element;
202 element = pool->alloc(gfp_mask, pool->pool_data);
203 if (unlikely(!element)) {
204 mempool_exit(pool);
205 return -ENOMEM;
207 add_element(pool, element);
210 return 0;
212 EXPORT_SYMBOL(mempool_init_node);
215 * mempool_init - initialize a memory pool
216 * @pool: pointer to the memory pool that should be initialized
217 * @min_nr: the minimum number of elements guaranteed to be
218 * allocated for this pool.
219 * @alloc_fn: user-defined element-allocation function.
220 * @free_fn: user-defined element-freeing function.
221 * @pool_data: optional private data available to the user-defined functions.
223 * Like mempool_create(), but initializes the pool in (i.e. embedded in another
224 * structure).
226 int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
227 mempool_free_t *free_fn, void *pool_data)
229 return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
230 pool_data, GFP_KERNEL, NUMA_NO_NODE);
233 EXPORT_SYMBOL(mempool_init);
236 * mempool_create - create a memory pool
237 * @min_nr: the minimum number of elements guaranteed to be
238 * allocated for this pool.
239 * @alloc_fn: user-defined element-allocation function.
240 * @free_fn: user-defined element-freeing function.
241 * @pool_data: optional private data available to the user-defined functions.
243 * this function creates and allocates a guaranteed size, preallocated
244 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
245 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
246 * functions might sleep - as long as the mempool_alloc() function is not called
247 * from IRQ contexts.
249 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
250 mempool_free_t *free_fn, void *pool_data)
252 return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
253 GFP_KERNEL, NUMA_NO_NODE);
255 EXPORT_SYMBOL(mempool_create);
257 mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
258 mempool_free_t *free_fn, void *pool_data,
259 gfp_t gfp_mask, int node_id)
261 mempool_t *pool;
263 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
264 if (!pool)
265 return NULL;
267 if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
268 gfp_mask, node_id)) {
269 kfree(pool);
270 return NULL;
273 return pool;
275 EXPORT_SYMBOL(mempool_create_node);
278 * mempool_resize - resize an existing memory pool
279 * @pool: pointer to the memory pool which was allocated via
280 * mempool_create().
281 * @new_min_nr: the new minimum number of elements guaranteed to be
282 * allocated for this pool.
284 * This function shrinks/grows the pool. In the case of growing,
285 * it cannot be guaranteed that the pool will be grown to the new
286 * size immediately, but new mempool_free() calls will refill it.
287 * This function may sleep.
289 * Note, the caller must guarantee that no mempool_destroy is called
290 * while this function is running. mempool_alloc() & mempool_free()
291 * might be called (eg. from IRQ contexts) while this function executes.
293 int mempool_resize(mempool_t *pool, int new_min_nr)
295 void *element;
296 void **new_elements;
297 unsigned long flags;
299 BUG_ON(new_min_nr <= 0);
300 might_sleep();
302 spin_lock_irqsave(&pool->lock, flags);
303 if (new_min_nr <= pool->min_nr) {
304 while (new_min_nr < pool->curr_nr) {
305 element = remove_element(pool);
306 spin_unlock_irqrestore(&pool->lock, flags);
307 pool->free(element, pool->pool_data);
308 spin_lock_irqsave(&pool->lock, flags);
310 pool->min_nr = new_min_nr;
311 goto out_unlock;
313 spin_unlock_irqrestore(&pool->lock, flags);
315 /* Grow the pool */
316 new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
317 GFP_KERNEL);
318 if (!new_elements)
319 return -ENOMEM;
321 spin_lock_irqsave(&pool->lock, flags);
322 if (unlikely(new_min_nr <= pool->min_nr)) {
323 /* Raced, other resize will do our work */
324 spin_unlock_irqrestore(&pool->lock, flags);
325 kfree(new_elements);
326 goto out;
328 memcpy(new_elements, pool->elements,
329 pool->curr_nr * sizeof(*new_elements));
330 kfree(pool->elements);
331 pool->elements = new_elements;
332 pool->min_nr = new_min_nr;
334 while (pool->curr_nr < pool->min_nr) {
335 spin_unlock_irqrestore(&pool->lock, flags);
336 element = pool->alloc(GFP_KERNEL, pool->pool_data);
337 if (!element)
338 goto out;
339 spin_lock_irqsave(&pool->lock, flags);
340 if (pool->curr_nr < pool->min_nr) {
341 add_element(pool, element);
342 } else {
343 spin_unlock_irqrestore(&pool->lock, flags);
344 pool->free(element, pool->pool_data); /* Raced */
345 goto out;
348 out_unlock:
349 spin_unlock_irqrestore(&pool->lock, flags);
350 out:
351 return 0;
353 EXPORT_SYMBOL(mempool_resize);
356 * mempool_alloc - allocate an element from a specific memory pool
357 * @pool: pointer to the memory pool which was allocated via
358 * mempool_create().
359 * @gfp_mask: the usual allocation bitmask.
361 * this function only sleeps if the alloc_fn() function sleeps or
362 * returns NULL. Note that due to preallocation, this function
363 * *never* fails when called from process contexts. (it might
364 * fail if called from an IRQ context.)
365 * Note: using __GFP_ZERO is not supported.
367 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
369 void *element;
370 unsigned long flags;
371 wait_queue_entry_t wait;
372 gfp_t gfp_temp;
374 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
375 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
377 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
378 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
379 gfp_mask |= __GFP_NOWARN; /* failures are OK */
381 gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
383 repeat_alloc:
385 element = pool->alloc(gfp_temp, pool->pool_data);
386 if (likely(element != NULL))
387 return element;
389 spin_lock_irqsave(&pool->lock, flags);
390 if (likely(pool->curr_nr)) {
391 element = remove_element(pool);
392 spin_unlock_irqrestore(&pool->lock, flags);
393 /* paired with rmb in mempool_free(), read comment there */
394 smp_wmb();
396 * Update the allocation stack trace as this is more useful
397 * for debugging.
399 kmemleak_update_trace(element);
400 return element;
404 * We use gfp mask w/o direct reclaim or IO for the first round. If
405 * alloc failed with that and @pool was empty, retry immediately.
407 if (gfp_temp != gfp_mask) {
408 spin_unlock_irqrestore(&pool->lock, flags);
409 gfp_temp = gfp_mask;
410 goto repeat_alloc;
413 /* We must not sleep if !__GFP_DIRECT_RECLAIM */
414 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
415 spin_unlock_irqrestore(&pool->lock, flags);
416 return NULL;
419 /* Let's wait for someone else to return an element to @pool */
420 init_wait(&wait);
421 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
423 spin_unlock_irqrestore(&pool->lock, flags);
426 * FIXME: this should be io_schedule(). The timeout is there as a
427 * workaround for some DM problems in 2.6.18.
429 io_schedule_timeout(5*HZ);
431 finish_wait(&pool->wait, &wait);
432 goto repeat_alloc;
434 EXPORT_SYMBOL(mempool_alloc);
437 * mempool_free - return an element to the pool.
438 * @element: pool element pointer.
439 * @pool: pointer to the memory pool which was allocated via
440 * mempool_create().
442 * this function only sleeps if the free_fn() function sleeps.
444 void mempool_free(void *element, mempool_t *pool)
446 unsigned long flags;
448 if (unlikely(element == NULL))
449 return;
452 * Paired with the wmb in mempool_alloc(). The preceding read is
453 * for @element and the following @pool->curr_nr. This ensures
454 * that the visible value of @pool->curr_nr is from after the
455 * allocation of @element. This is necessary for fringe cases
456 * where @element was passed to this task without going through
457 * barriers.
459 * For example, assume @p is %NULL at the beginning and one task
460 * performs "p = mempool_alloc(...);" while another task is doing
461 * "while (!p) cpu_relax(); mempool_free(p, ...);". This function
462 * may end up using curr_nr value which is from before allocation
463 * of @p without the following rmb.
465 smp_rmb();
468 * For correctness, we need a test which is guaranteed to trigger
469 * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
470 * without locking achieves that and refilling as soon as possible
471 * is desirable.
473 * Because curr_nr visible here is always a value after the
474 * allocation of @element, any task which decremented curr_nr below
475 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
476 * incremented to min_nr afterwards. If curr_nr gets incremented
477 * to min_nr after the allocation of @element, the elements
478 * allocated after that are subject to the same guarantee.
480 * Waiters happen iff curr_nr is 0 and the above guarantee also
481 * ensures that there will be frees which return elements to the
482 * pool waking up the waiters.
484 if (unlikely(pool->curr_nr < pool->min_nr)) {
485 spin_lock_irqsave(&pool->lock, flags);
486 if (likely(pool->curr_nr < pool->min_nr)) {
487 add_element(pool, element);
488 spin_unlock_irqrestore(&pool->lock, flags);
489 wake_up(&pool->wait);
490 return;
492 spin_unlock_irqrestore(&pool->lock, flags);
494 pool->free(element, pool->pool_data);
496 EXPORT_SYMBOL(mempool_free);
499 * A commonly used alloc and free fn.
501 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
503 struct kmem_cache *mem = pool_data;
504 VM_BUG_ON(mem->ctor);
505 return kmem_cache_alloc(mem, gfp_mask);
507 EXPORT_SYMBOL(mempool_alloc_slab);
509 void mempool_free_slab(void *element, void *pool_data)
511 struct kmem_cache *mem = pool_data;
512 kmem_cache_free(mem, element);
514 EXPORT_SYMBOL(mempool_free_slab);
517 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
518 * specified by pool_data
520 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
522 size_t size = (size_t)pool_data;
523 return kmalloc(size, gfp_mask);
525 EXPORT_SYMBOL(mempool_kmalloc);
527 void mempool_kfree(void *element, void *pool_data)
529 kfree(element);
531 EXPORT_SYMBOL(mempool_kfree);
534 * A simple mempool-backed page allocator that allocates pages
535 * of the order specified by pool_data.
537 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
539 int order = (int)(long)pool_data;
540 return alloc_pages(gfp_mask, order);
542 EXPORT_SYMBOL(mempool_alloc_pages);
544 void mempool_free_pages(void *element, void *pool_data)
546 int order = (int)(long)pool_data;
547 __free_pages(element, order);
549 EXPORT_SYMBOL(mempool_free_pages);