ZIL: Call brt_pending_add() replaying TX_CLONE_RANGE
[zfs.git] / module / os / linux / spl / spl-kmem-cache.c
blob3c30dfc577b4ab634ecf42ef38b6fe505288c774
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
8 * This file is part of the SPL, Solaris Porting Layer.
10 * The SPL is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
15 * The SPL is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
24 #include <linux/percpu_compat.h>
25 #include <sys/kmem.h>
26 #include <sys/kmem_cache.h>
27 #include <sys/taskq.h>
28 #include <sys/timer.h>
29 #include <sys/vmem.h>
30 #include <sys/wait.h>
31 #include <linux/slab.h>
32 #include <linux/swap.h>
33 #include <linux/prefetch.h>
36 * Within the scope of spl-kmem.c file the kmem_cache_* definitions
37 * are removed to allow access to the real Linux slab allocator.
39 #undef kmem_cache_destroy
40 #undef kmem_cache_create
41 #undef kmem_cache_alloc
42 #undef kmem_cache_free
46 * Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}()
47 * with smp_mb__{before,after}_atomic() because they were redundant. This is
48 * only used inside our SLAB allocator, so we implement an internal wrapper
49 * here to give us smp_mb__{before,after}_atomic() on older kernels.
51 #ifndef smp_mb__before_atomic
52 #define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x)
53 #endif
55 #ifndef smp_mb__after_atomic
56 #define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
57 #endif
59 /* BEGIN CSTYLED */
61 * Cache magazines are an optimization designed to minimize the cost of
62 * allocating memory. They do this by keeping a per-cpu cache of recently
63 * freed objects, which can then be reallocated without taking a lock. This
64 * can improve performance on highly contended caches. However, because
65 * objects in magazines will prevent otherwise empty slabs from being
66 * immediately released this may not be ideal for low memory machines.
68 * For this reason spl_kmem_cache_magazine_size can be used to set a maximum
69 * magazine size. When this value is set to 0 the magazine size will be
70 * automatically determined based on the object size. Otherwise magazines
71 * will be limited to 2-256 objects per magazine (i.e per cpu). Magazines
72 * may never be entirely disabled in this implementation.
74 static unsigned int spl_kmem_cache_magazine_size = 0;
75 module_param(spl_kmem_cache_magazine_size, uint, 0444);
76 MODULE_PARM_DESC(spl_kmem_cache_magazine_size,
77 "Default magazine size (2-256), set automatically (0)");
80 * The default behavior is to report the number of objects remaining in the
81 * cache. This allows the Linux VM to repeatedly reclaim objects from the
82 * cache when memory is low satisfy other memory allocations. Alternately,
83 * setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache
84 * is reclaimed. This may increase the likelihood of out of memory events.
86 static unsigned int spl_kmem_cache_reclaim = 0 /* KMC_RECLAIM_ONCE */;
87 module_param(spl_kmem_cache_reclaim, uint, 0644);
88 MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)");
90 static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
91 module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
92 MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
94 static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE;
95 module_param(spl_kmem_cache_max_size, uint, 0644);
96 MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
99 * For small objects the Linux slab allocator should be used to make the most
100 * efficient use of the memory. However, large objects are not supported by
101 * the Linux slab and therefore the SPL implementation is preferred. A cutoff
102 * of 16K was determined to be optimal for architectures using 4K pages and
103 * to also work well on architecutres using larger 64K page sizes.
105 static unsigned int spl_kmem_cache_slab_limit = 16384;
106 module_param(spl_kmem_cache_slab_limit, uint, 0644);
107 MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
108 "Objects less than N bytes use the Linux slab");
111 * The number of threads available to allocate new slabs for caches. This
112 * should not need to be tuned but it is available for performance analysis.
114 static unsigned int spl_kmem_cache_kmem_threads = 4;
115 module_param(spl_kmem_cache_kmem_threads, uint, 0444);
116 MODULE_PARM_DESC(spl_kmem_cache_kmem_threads,
117 "Number of spl_kmem_cache threads");
118 /* END CSTYLED */
121 * Slab allocation interfaces
123 * While the Linux slab implementation was inspired by the Solaris
124 * implementation I cannot use it to emulate the Solaris APIs. I
125 * require two features which are not provided by the Linux slab.
127 * 1) Constructors AND destructors. Recent versions of the Linux
128 * kernel have removed support for destructors. This is a deal
129 * breaker for the SPL which contains particularly expensive
130 * initializers for mutex's, condition variables, etc. We also
131 * require a minimal level of cleanup for these data types unlike
132 * many Linux data types which do need to be explicitly destroyed.
134 * 2) Virtual address space backed slab. Callers of the Solaris slab
135 * expect it to work well for both small are very large allocations.
136 * Because of memory fragmentation the Linux slab which is backed
137 * by kmalloc'ed memory performs very badly when confronted with
138 * large numbers of large allocations. Basing the slab on the
139 * virtual address space removes the need for contiguous pages
140 * and greatly improve performance for large allocations.
142 * For these reasons, the SPL has its own slab implementation with
143 * the needed features. It is not as highly optimized as either the
144 * Solaris or Linux slabs, but it should get me most of what is
145 * needed until it can be optimized or obsoleted by another approach.
147 * One serious concern I do have about this method is the relatively
148 * small virtual address space on 32bit arches. This will seriously
149 * constrain the size of the slab caches and their performance.
152 struct list_head spl_kmem_cache_list; /* List of caches */
153 struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
154 static taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */
156 static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
158 static void *
159 kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
161 gfp_t lflags = kmem_flags_convert(flags);
162 void *ptr;
164 ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
166 /* Resulting allocated memory will be page aligned */
167 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
169 return (ptr);
172 static void
173 kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
175 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
178 * The Linux direct reclaim path uses this out of band value to
179 * determine if forward progress is being made. Normally this is
180 * incremented by kmem_freepages() which is part of the various
181 * Linux slab implementations. However, since we are using none
182 * of that infrastructure we are responsible for incrementing it.
184 if (current->reclaim_state)
185 #ifdef HAVE_RECLAIM_STATE_RECLAIMED
186 current->reclaim_state->reclaimed += size >> PAGE_SHIFT;
187 #else
188 current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
189 #endif
190 vfree(ptr);
194 * Required space for each aligned sks.
196 static inline uint32_t
197 spl_sks_size(spl_kmem_cache_t *skc)
199 return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t),
200 skc->skc_obj_align, uint32_t));
204 * Required space for each aligned object.
206 static inline uint32_t
207 spl_obj_size(spl_kmem_cache_t *skc)
209 uint32_t align = skc->skc_obj_align;
211 return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
212 P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t));
215 uint64_t
216 spl_kmem_cache_inuse(kmem_cache_t *cache)
218 return (cache->skc_obj_total);
220 EXPORT_SYMBOL(spl_kmem_cache_inuse);
222 uint64_t
223 spl_kmem_cache_entry_size(kmem_cache_t *cache)
225 return (cache->skc_obj_size);
227 EXPORT_SYMBOL(spl_kmem_cache_entry_size);
230 * Lookup the spl_kmem_object_t for an object given that object.
232 static inline spl_kmem_obj_t *
233 spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
235 return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
236 skc->skc_obj_align, uint32_t));
240 * It's important that we pack the spl_kmem_obj_t structure and the
241 * actual objects in to one large address space to minimize the number
242 * of calls to the allocator. It is far better to do a few large
243 * allocations and then subdivide it ourselves. Now which allocator
244 * we use requires balancing a few trade offs.
246 * For small objects we use kmem_alloc() because as long as you are
247 * only requesting a small number of pages (ideally just one) its cheap.
248 * However, when you start requesting multiple pages with kmem_alloc()
249 * it gets increasingly expensive since it requires contiguous pages.
250 * For this reason we shift to vmem_alloc() for slabs of large objects
251 * which removes the need for contiguous pages. We do not use
252 * vmem_alloc() in all cases because there is significant locking
253 * overhead in __get_vm_area_node(). This function takes a single
254 * global lock when acquiring an available virtual address range which
255 * serializes all vmem_alloc()'s for all slab caches. Using slightly
256 * different allocation functions for small and large objects should
257 * give us the best of both worlds.
259 * +------------------------+
260 * | spl_kmem_slab_t --+-+ |
261 * | skc_obj_size <-+ | |
262 * | spl_kmem_obj_t | |
263 * | skc_obj_size <---+ |
264 * | spl_kmem_obj_t | |
265 * | ... v |
266 * +------------------------+
268 static spl_kmem_slab_t *
269 spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
271 spl_kmem_slab_t *sks;
272 void *base;
273 uint32_t obj_size;
275 base = kv_alloc(skc, skc->skc_slab_size, flags);
276 if (base == NULL)
277 return (NULL);
279 sks = (spl_kmem_slab_t *)base;
280 sks->sks_magic = SKS_MAGIC;
281 sks->sks_objs = skc->skc_slab_objs;
282 sks->sks_age = jiffies;
283 sks->sks_cache = skc;
284 INIT_LIST_HEAD(&sks->sks_list);
285 INIT_LIST_HEAD(&sks->sks_free_list);
286 sks->sks_ref = 0;
287 obj_size = spl_obj_size(skc);
289 for (int i = 0; i < sks->sks_objs; i++) {
290 void *obj = base + spl_sks_size(skc) + (i * obj_size);
292 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
293 spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj);
294 sko->sko_addr = obj;
295 sko->sko_magic = SKO_MAGIC;
296 sko->sko_slab = sks;
297 INIT_LIST_HEAD(&sko->sko_list);
298 list_add_tail(&sko->sko_list, &sks->sks_free_list);
301 return (sks);
305 * Remove a slab from complete or partial list, it must be called with
306 * the 'skc->skc_lock' held but the actual free must be performed
307 * outside the lock to prevent deadlocking on vmem addresses.
309 static void
310 spl_slab_free(spl_kmem_slab_t *sks,
311 struct list_head *sks_list, struct list_head *sko_list)
313 spl_kmem_cache_t *skc;
315 ASSERT(sks->sks_magic == SKS_MAGIC);
316 ASSERT(sks->sks_ref == 0);
318 skc = sks->sks_cache;
319 ASSERT(skc->skc_magic == SKC_MAGIC);
322 * Update slab/objects counters in the cache, then remove the
323 * slab from the skc->skc_partial_list. Finally add the slab
324 * and all its objects in to the private work lists where the
325 * destructors will be called and the memory freed to the system.
327 skc->skc_obj_total -= sks->sks_objs;
328 skc->skc_slab_total--;
329 list_del(&sks->sks_list);
330 list_add(&sks->sks_list, sks_list);
331 list_splice_init(&sks->sks_free_list, sko_list);
335 * Reclaim empty slabs at the end of the partial list.
337 static void
338 spl_slab_reclaim(spl_kmem_cache_t *skc)
340 spl_kmem_slab_t *sks = NULL, *m = NULL;
341 spl_kmem_obj_t *sko = NULL, *n = NULL;
342 LIST_HEAD(sks_list);
343 LIST_HEAD(sko_list);
346 * Empty slabs and objects must be moved to a private list so they
347 * can be safely freed outside the spin lock. All empty slabs are
348 * at the end of skc->skc_partial_list, therefore once a non-empty
349 * slab is found we can stop scanning.
351 spin_lock(&skc->skc_lock);
352 list_for_each_entry_safe_reverse(sks, m,
353 &skc->skc_partial_list, sks_list) {
355 if (sks->sks_ref > 0)
356 break;
358 spl_slab_free(sks, &sks_list, &sko_list);
360 spin_unlock(&skc->skc_lock);
363 * The following two loops ensure all the object destructors are run,
364 * and the slabs themselves are freed. This is all done outside the
365 * skc->skc_lock since this allows the destructor to sleep, and
366 * allows us to perform a conditional reschedule when a freeing a
367 * large number of objects and slabs back to the system.
370 list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
371 ASSERT(sko->sko_magic == SKO_MAGIC);
374 list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
375 ASSERT(sks->sks_magic == SKS_MAGIC);
376 kv_free(skc, sks, skc->skc_slab_size);
380 static spl_kmem_emergency_t *
381 spl_emergency_search(struct rb_root *root, void *obj)
383 struct rb_node *node = root->rb_node;
384 spl_kmem_emergency_t *ske;
385 unsigned long address = (unsigned long)obj;
387 while (node) {
388 ske = container_of(node, spl_kmem_emergency_t, ske_node);
390 if (address < ske->ske_obj)
391 node = node->rb_left;
392 else if (address > ske->ske_obj)
393 node = node->rb_right;
394 else
395 return (ske);
398 return (NULL);
401 static int
402 spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
404 struct rb_node **new = &(root->rb_node), *parent = NULL;
405 spl_kmem_emergency_t *ske_tmp;
406 unsigned long address = ske->ske_obj;
408 while (*new) {
409 ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
411 parent = *new;
412 if (address < ske_tmp->ske_obj)
413 new = &((*new)->rb_left);
414 else if (address > ske_tmp->ske_obj)
415 new = &((*new)->rb_right);
416 else
417 return (0);
420 rb_link_node(&ske->ske_node, parent, new);
421 rb_insert_color(&ske->ske_node, root);
423 return (1);
427 * Allocate a single emergency object and track it in a red black tree.
429 static int
430 spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
432 gfp_t lflags = kmem_flags_convert(flags);
433 spl_kmem_emergency_t *ske;
434 int order = get_order(skc->skc_obj_size);
435 int empty;
437 /* Last chance use a partial slab if one now exists */
438 spin_lock(&skc->skc_lock);
439 empty = list_empty(&skc->skc_partial_list);
440 spin_unlock(&skc->skc_lock);
441 if (!empty)
442 return (-EEXIST);
444 ske = kmalloc(sizeof (*ske), lflags);
445 if (ske == NULL)
446 return (-ENOMEM);
448 ske->ske_obj = __get_free_pages(lflags, order);
449 if (ske->ske_obj == 0) {
450 kfree(ske);
451 return (-ENOMEM);
454 spin_lock(&skc->skc_lock);
455 empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
456 if (likely(empty)) {
457 skc->skc_obj_total++;
458 skc->skc_obj_emergency++;
459 if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
460 skc->skc_obj_emergency_max = skc->skc_obj_emergency;
462 spin_unlock(&skc->skc_lock);
464 if (unlikely(!empty)) {
465 free_pages(ske->ske_obj, order);
466 kfree(ske);
467 return (-EINVAL);
470 *obj = (void *)ske->ske_obj;
472 return (0);
476 * Locate the passed object in the red black tree and free it.
478 static int
479 spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
481 spl_kmem_emergency_t *ske;
482 int order = get_order(skc->skc_obj_size);
484 spin_lock(&skc->skc_lock);
485 ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
486 if (ske) {
487 rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
488 skc->skc_obj_emergency--;
489 skc->skc_obj_total--;
491 spin_unlock(&skc->skc_lock);
493 if (ske == NULL)
494 return (-ENOENT);
496 free_pages(ske->ske_obj, order);
497 kfree(ske);
499 return (0);
503 * Release objects from the per-cpu magazine back to their slab. The flush
504 * argument contains the max number of entries to remove from the magazine.
506 static void
507 spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
509 spin_lock(&skc->skc_lock);
511 ASSERT(skc->skc_magic == SKC_MAGIC);
512 ASSERT(skm->skm_magic == SKM_MAGIC);
514 int count = MIN(flush, skm->skm_avail);
515 for (int i = 0; i < count; i++)
516 spl_cache_shrink(skc, skm->skm_objs[i]);
518 skm->skm_avail -= count;
519 memmove(skm->skm_objs, &(skm->skm_objs[count]),
520 sizeof (void *) * skm->skm_avail);
522 spin_unlock(&skc->skc_lock);
526 * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
527 * When on-slab we want to target spl_kmem_cache_obj_per_slab. However,
528 * for very small objects we may end up with more than this so as not
529 * to waste space in the minimal allocation of a single page.
531 static int
532 spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
534 uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs;
536 sks_size = spl_sks_size(skc);
537 obj_size = spl_obj_size(skc);
538 max_size = (spl_kmem_cache_max_size * 1024 * 1024);
539 tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size);
541 if (tgt_size <= max_size) {
542 tgt_objs = (tgt_size - sks_size) / obj_size;
543 } else {
544 tgt_objs = (max_size - sks_size) / obj_size;
545 tgt_size = (tgt_objs * obj_size) + sks_size;
548 if (tgt_objs == 0)
549 return (-ENOSPC);
551 *objs = tgt_objs;
552 *size = tgt_size;
554 return (0);
558 * Make a guess at reasonable per-cpu magazine size based on the size of
559 * each object and the cost of caching N of them in each magazine. Long
560 * term this should really adapt based on an observed usage heuristic.
562 static int
563 spl_magazine_size(spl_kmem_cache_t *skc)
565 uint32_t obj_size = spl_obj_size(skc);
566 int size;
568 if (spl_kmem_cache_magazine_size > 0)
569 return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2));
571 /* Per-magazine sizes below assume a 4Kib page size */
572 if (obj_size > (PAGE_SIZE * 256))
573 size = 4; /* Minimum 4Mib per-magazine */
574 else if (obj_size > (PAGE_SIZE * 32))
575 size = 16; /* Minimum 2Mib per-magazine */
576 else if (obj_size > (PAGE_SIZE))
577 size = 64; /* Minimum 256Kib per-magazine */
578 else if (obj_size > (PAGE_SIZE / 4))
579 size = 128; /* Minimum 128Kib per-magazine */
580 else
581 size = 256;
583 return (size);
587 * Allocate a per-cpu magazine to associate with a specific core.
589 static spl_kmem_magazine_t *
590 spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
592 spl_kmem_magazine_t *skm;
593 int size = sizeof (spl_kmem_magazine_t) +
594 sizeof (void *) * skc->skc_mag_size;
596 skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
597 if (skm) {
598 skm->skm_magic = SKM_MAGIC;
599 skm->skm_avail = 0;
600 skm->skm_size = skc->skc_mag_size;
601 skm->skm_refill = skc->skc_mag_refill;
602 skm->skm_cache = skc;
603 skm->skm_cpu = cpu;
606 return (skm);
610 * Free a per-cpu magazine associated with a specific core.
612 static void
613 spl_magazine_free(spl_kmem_magazine_t *skm)
615 ASSERT(skm->skm_magic == SKM_MAGIC);
616 ASSERT(skm->skm_avail == 0);
617 kfree(skm);
621 * Create all pre-cpu magazines of reasonable sizes.
623 static int
624 spl_magazine_create(spl_kmem_cache_t *skc)
626 int i = 0;
628 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
630 skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
631 num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
632 skc->skc_mag_size = spl_magazine_size(skc);
633 skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
635 for_each_possible_cpu(i) {
636 skc->skc_mag[i] = spl_magazine_alloc(skc, i);
637 if (!skc->skc_mag[i]) {
638 for (i--; i >= 0; i--)
639 spl_magazine_free(skc->skc_mag[i]);
641 kfree(skc->skc_mag);
642 return (-ENOMEM);
646 return (0);
650 * Destroy all pre-cpu magazines.
652 static void
653 spl_magazine_destroy(spl_kmem_cache_t *skc)
655 spl_kmem_magazine_t *skm;
656 int i = 0;
658 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
660 for_each_possible_cpu(i) {
661 skm = skc->skc_mag[i];
662 spl_cache_flush(skc, skm, skm->skm_avail);
663 spl_magazine_free(skm);
666 kfree(skc->skc_mag);
670 * Create a object cache based on the following arguments:
671 * name cache name
672 * size cache object size
673 * align cache object alignment
674 * ctor cache object constructor
675 * dtor cache object destructor
676 * reclaim cache object reclaim
677 * priv cache private data for ctor/dtor/reclaim
678 * vmp unused must be NULL
679 * flags
680 * KMC_KVMEM Force kvmem backed SPL cache
681 * KMC_SLAB Force Linux slab backed cache
682 * KMC_NODEBUG Disable debugging (unsupported)
684 spl_kmem_cache_t *
685 spl_kmem_cache_create(const char *name, size_t size, size_t align,
686 spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim,
687 void *priv, void *vmp, int flags)
689 gfp_t lflags = kmem_flags_convert(KM_SLEEP);
690 spl_kmem_cache_t *skc;
691 int rc;
694 * Unsupported flags
696 ASSERT(vmp == NULL);
697 ASSERT(reclaim == NULL);
699 might_sleep();
701 skc = kzalloc(sizeof (*skc), lflags);
702 if (skc == NULL)
703 return (NULL);
705 skc->skc_magic = SKC_MAGIC;
706 skc->skc_name_size = strlen(name) + 1;
707 skc->skc_name = kmalloc(skc->skc_name_size, lflags);
708 if (skc->skc_name == NULL) {
709 kfree(skc);
710 return (NULL);
712 strlcpy(skc->skc_name, name, skc->skc_name_size);
714 skc->skc_ctor = ctor;
715 skc->skc_dtor = dtor;
716 skc->skc_private = priv;
717 skc->skc_vmp = vmp;
718 skc->skc_linux_cache = NULL;
719 skc->skc_flags = flags;
720 skc->skc_obj_size = size;
721 skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
722 atomic_set(&skc->skc_ref, 0);
724 INIT_LIST_HEAD(&skc->skc_list);
725 INIT_LIST_HEAD(&skc->skc_complete_list);
726 INIT_LIST_HEAD(&skc->skc_partial_list);
727 skc->skc_emergency_tree = RB_ROOT;
728 spin_lock_init(&skc->skc_lock);
729 init_waitqueue_head(&skc->skc_waitq);
730 skc->skc_slab_fail = 0;
731 skc->skc_slab_create = 0;
732 skc->skc_slab_destroy = 0;
733 skc->skc_slab_total = 0;
734 skc->skc_slab_alloc = 0;
735 skc->skc_slab_max = 0;
736 skc->skc_obj_total = 0;
737 skc->skc_obj_alloc = 0;
738 skc->skc_obj_max = 0;
739 skc->skc_obj_deadlock = 0;
740 skc->skc_obj_emergency = 0;
741 skc->skc_obj_emergency_max = 0;
743 rc = percpu_counter_init_common(&skc->skc_linux_alloc, 0,
744 GFP_KERNEL);
745 if (rc != 0) {
746 kfree(skc);
747 return (NULL);
751 * Verify the requested alignment restriction is sane.
753 if (align) {
754 VERIFY(ISP2(align));
755 VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
756 VERIFY3U(align, <=, PAGE_SIZE);
757 skc->skc_obj_align = align;
761 * When no specific type of slab is requested (kmem, vmem, or
762 * linuxslab) then select a cache type based on the object size
763 * and default tunables.
765 if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) {
766 if (spl_kmem_cache_slab_limit &&
767 size <= (size_t)spl_kmem_cache_slab_limit) {
769 * Objects smaller than spl_kmem_cache_slab_limit can
770 * use the Linux slab for better space-efficiency.
772 skc->skc_flags |= KMC_SLAB;
773 } else {
775 * All other objects are considered large and are
776 * placed on kvmem backed slabs.
778 skc->skc_flags |= KMC_KVMEM;
783 * Given the type of slab allocate the required resources.
785 if (skc->skc_flags & KMC_KVMEM) {
786 rc = spl_slab_size(skc,
787 &skc->skc_slab_objs, &skc->skc_slab_size);
788 if (rc)
789 goto out;
791 rc = spl_magazine_create(skc);
792 if (rc)
793 goto out;
794 } else {
795 unsigned long slabflags = 0;
797 if (size > (SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE))
798 goto out;
800 #if defined(SLAB_USERCOPY)
802 * Required for PAX-enabled kernels if the slab is to be
803 * used for copying between user and kernel space.
805 slabflags |= SLAB_USERCOPY;
806 #endif
808 #if defined(HAVE_KMEM_CACHE_CREATE_USERCOPY)
810 * Newer grsec patchset uses kmem_cache_create_usercopy()
811 * instead of SLAB_USERCOPY flag
813 skc->skc_linux_cache = kmem_cache_create_usercopy(
814 skc->skc_name, size, align, slabflags, 0, size, NULL);
815 #else
816 skc->skc_linux_cache = kmem_cache_create(
817 skc->skc_name, size, align, slabflags, NULL);
818 #endif
819 if (skc->skc_linux_cache == NULL)
820 goto out;
823 down_write(&spl_kmem_cache_sem);
824 list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
825 up_write(&spl_kmem_cache_sem);
827 return (skc);
828 out:
829 kfree(skc->skc_name);
830 percpu_counter_destroy(&skc->skc_linux_alloc);
831 kfree(skc);
832 return (NULL);
834 EXPORT_SYMBOL(spl_kmem_cache_create);
837 * Register a move callback for cache defragmentation.
838 * XXX: Unimplemented but harmless to stub out for now.
840 void
841 spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
842 kmem_cbrc_t (move)(void *, void *, size_t, void *))
844 ASSERT(move != NULL);
846 EXPORT_SYMBOL(spl_kmem_cache_set_move);
849 * Destroy a cache and all objects associated with the cache.
851 void
852 spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
854 DECLARE_WAIT_QUEUE_HEAD(wq);
855 taskqid_t id;
857 ASSERT(skc->skc_magic == SKC_MAGIC);
858 ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB));
860 down_write(&spl_kmem_cache_sem);
861 list_del_init(&skc->skc_list);
862 up_write(&spl_kmem_cache_sem);
864 /* Cancel any and wait for any pending delayed tasks */
865 VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
867 spin_lock(&skc->skc_lock);
868 id = skc->skc_taskqid;
869 spin_unlock(&skc->skc_lock);
871 taskq_cancel_id(spl_kmem_cache_taskq, id);
874 * Wait until all current callers complete, this is mainly
875 * to catch the case where a low memory situation triggers a
876 * cache reaping action which races with this destroy.
878 wait_event(wq, atomic_read(&skc->skc_ref) == 0);
880 if (skc->skc_flags & KMC_KVMEM) {
881 spl_magazine_destroy(skc);
882 spl_slab_reclaim(skc);
883 } else {
884 ASSERT(skc->skc_flags & KMC_SLAB);
885 kmem_cache_destroy(skc->skc_linux_cache);
888 spin_lock(&skc->skc_lock);
891 * Validate there are no objects in use and free all the
892 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
894 ASSERT3U(skc->skc_slab_alloc, ==, 0);
895 ASSERT3U(skc->skc_obj_alloc, ==, 0);
896 ASSERT3U(skc->skc_slab_total, ==, 0);
897 ASSERT3U(skc->skc_obj_total, ==, 0);
898 ASSERT3U(skc->skc_obj_emergency, ==, 0);
899 ASSERT(list_empty(&skc->skc_complete_list));
901 ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0);
902 percpu_counter_destroy(&skc->skc_linux_alloc);
904 spin_unlock(&skc->skc_lock);
906 kfree(skc->skc_name);
907 kfree(skc);
909 EXPORT_SYMBOL(spl_kmem_cache_destroy);
912 * Allocate an object from a slab attached to the cache. This is used to
913 * repopulate the per-cpu magazine caches in batches when they run low.
915 static void *
916 spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
918 spl_kmem_obj_t *sko;
920 ASSERT(skc->skc_magic == SKC_MAGIC);
921 ASSERT(sks->sks_magic == SKS_MAGIC);
923 sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
924 ASSERT(sko->sko_magic == SKO_MAGIC);
925 ASSERT(sko->sko_addr != NULL);
927 /* Remove from sks_free_list */
928 list_del_init(&sko->sko_list);
930 sks->sks_age = jiffies;
931 sks->sks_ref++;
932 skc->skc_obj_alloc++;
934 /* Track max obj usage statistics */
935 if (skc->skc_obj_alloc > skc->skc_obj_max)
936 skc->skc_obj_max = skc->skc_obj_alloc;
938 /* Track max slab usage statistics */
939 if (sks->sks_ref == 1) {
940 skc->skc_slab_alloc++;
942 if (skc->skc_slab_alloc > skc->skc_slab_max)
943 skc->skc_slab_max = skc->skc_slab_alloc;
946 return (sko->sko_addr);
950 * Generic slab allocation function to run by the global work queues.
951 * It is responsible for allocating a new slab, linking it in to the list
952 * of partial slabs, and then waking any waiters.
954 static int
955 __spl_cache_grow(spl_kmem_cache_t *skc, int flags)
957 spl_kmem_slab_t *sks;
959 fstrans_cookie_t cookie = spl_fstrans_mark();
960 sks = spl_slab_alloc(skc, flags);
961 spl_fstrans_unmark(cookie);
963 spin_lock(&skc->skc_lock);
964 if (sks) {
965 skc->skc_slab_total++;
966 skc->skc_obj_total += sks->sks_objs;
967 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
969 smp_mb__before_atomic();
970 clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
971 smp_mb__after_atomic();
973 spin_unlock(&skc->skc_lock);
975 return (sks == NULL ? -ENOMEM : 0);
978 static void
979 spl_cache_grow_work(void *data)
981 spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
982 spl_kmem_cache_t *skc = ska->ska_cache;
984 int error = __spl_cache_grow(skc, ska->ska_flags);
986 atomic_dec(&skc->skc_ref);
987 smp_mb__before_atomic();
988 clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
989 smp_mb__after_atomic();
990 if (error == 0)
991 wake_up_all(&skc->skc_waitq);
993 kfree(ska);
997 * Returns non-zero when a new slab should be available.
999 static int
1000 spl_cache_grow_wait(spl_kmem_cache_t *skc)
1002 return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags));
1006 * No available objects on any slabs, create a new slab. Note that this
1007 * functionality is disabled for KMC_SLAB caches which are backed by the
1008 * Linux slab.
1010 static int
1011 spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
1013 int remaining, rc = 0;
1015 ASSERT0(flags & ~KM_PUBLIC_MASK);
1016 ASSERT(skc->skc_magic == SKC_MAGIC);
1017 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
1019 *obj = NULL;
1022 * Since we can't sleep attempt an emergency allocation to satisfy
1023 * the request. The only alterative is to fail the allocation but
1024 * it's preferable try. The use of KM_NOSLEEP is expected to be rare.
1026 if (flags & KM_NOSLEEP)
1027 return (spl_emergency_alloc(skc, flags, obj));
1029 might_sleep();
1032 * Before allocating a new slab wait for any reaping to complete and
1033 * then return so the local magazine can be rechecked for new objects.
1035 if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
1036 rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
1037 TASK_UNINTERRUPTIBLE);
1038 return (rc ? rc : -EAGAIN);
1042 * Note: It would be nice to reduce the overhead of context switch
1043 * and improve NUMA locality, by trying to allocate a new slab in the
1044 * current process context with KM_NOSLEEP flag.
1046 * However, this can't be applied to vmem/kvmem due to a bug that
1047 * spl_vmalloc() doesn't honor gfp flags in page table allocation.
1051 * This is handled by dispatching a work request to the global work
1052 * queue. This allows us to asynchronously allocate a new slab while
1053 * retaining the ability to safely fall back to a smaller synchronous
1054 * allocations to ensure forward progress is always maintained.
1056 if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
1057 spl_kmem_alloc_t *ska;
1059 ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags));
1060 if (ska == NULL) {
1061 clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags);
1062 smp_mb__after_atomic();
1063 wake_up_all(&skc->skc_waitq);
1064 return (-ENOMEM);
1067 atomic_inc(&skc->skc_ref);
1068 ska->ska_cache = skc;
1069 ska->ska_flags = flags;
1070 taskq_init_ent(&ska->ska_tqe);
1071 taskq_dispatch_ent(spl_kmem_cache_taskq,
1072 spl_cache_grow_work, ska, 0, &ska->ska_tqe);
1076 * The goal here is to only detect the rare case where a virtual slab
1077 * allocation has deadlocked. We must be careful to minimize the use
1078 * of emergency objects which are more expensive to track. Therefore,
1079 * we set a very long timeout for the asynchronous allocation and if
1080 * the timeout is reached the cache is flagged as deadlocked. From
1081 * this point only new emergency objects will be allocated until the
1082 * asynchronous allocation completes and clears the deadlocked flag.
1084 if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
1085 rc = spl_emergency_alloc(skc, flags, obj);
1086 } else {
1087 remaining = wait_event_timeout(skc->skc_waitq,
1088 spl_cache_grow_wait(skc), HZ / 10);
1090 if (!remaining) {
1091 spin_lock(&skc->skc_lock);
1092 if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
1093 set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
1094 skc->skc_obj_deadlock++;
1096 spin_unlock(&skc->skc_lock);
1099 rc = -ENOMEM;
1102 return (rc);
1106 * Refill a per-cpu magazine with objects from the slabs for this cache.
1107 * Ideally the magazine can be repopulated using existing objects which have
1108 * been released, however if we are unable to locate enough free objects new
1109 * slabs of objects will be created. On success NULL is returned, otherwise
1110 * the address of a single emergency object is returned for use by the caller.
1112 static void *
1113 spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
1115 spl_kmem_slab_t *sks;
1116 int count = 0, rc, refill;
1117 void *obj = NULL;
1119 ASSERT(skc->skc_magic == SKC_MAGIC);
1120 ASSERT(skm->skm_magic == SKM_MAGIC);
1122 refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
1123 spin_lock(&skc->skc_lock);
1125 while (refill > 0) {
1126 /* No slabs available we may need to grow the cache */
1127 if (list_empty(&skc->skc_partial_list)) {
1128 spin_unlock(&skc->skc_lock);
1130 local_irq_enable();
1131 rc = spl_cache_grow(skc, flags, &obj);
1132 local_irq_disable();
1134 /* Emergency object for immediate use by caller */
1135 if (rc == 0 && obj != NULL)
1136 return (obj);
1138 if (rc)
1139 goto out;
1141 /* Rescheduled to different CPU skm is not local */
1142 if (skm != skc->skc_mag[smp_processor_id()])
1143 goto out;
1146 * Potentially rescheduled to the same CPU but
1147 * allocations may have occurred from this CPU while
1148 * we were sleeping so recalculate max refill.
1150 refill = MIN(refill, skm->skm_size - skm->skm_avail);
1152 spin_lock(&skc->skc_lock);
1153 continue;
1156 /* Grab the next available slab */
1157 sks = list_entry((&skc->skc_partial_list)->next,
1158 spl_kmem_slab_t, sks_list);
1159 ASSERT(sks->sks_magic == SKS_MAGIC);
1160 ASSERT(sks->sks_ref < sks->sks_objs);
1161 ASSERT(!list_empty(&sks->sks_free_list));
1164 * Consume as many objects as needed to refill the requested
1165 * cache. We must also be careful not to overfill it.
1167 while (sks->sks_ref < sks->sks_objs && refill-- > 0 &&
1168 ++count) {
1169 ASSERT(skm->skm_avail < skm->skm_size);
1170 ASSERT(count < skm->skm_size);
1171 skm->skm_objs[skm->skm_avail++] =
1172 spl_cache_obj(skc, sks);
1175 /* Move slab to skc_complete_list when full */
1176 if (sks->sks_ref == sks->sks_objs) {
1177 list_del(&sks->sks_list);
1178 list_add(&sks->sks_list, &skc->skc_complete_list);
1182 spin_unlock(&skc->skc_lock);
1183 out:
1184 return (NULL);
1188 * Release an object back to the slab from which it came.
1190 static void
1191 spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
1193 spl_kmem_slab_t *sks = NULL;
1194 spl_kmem_obj_t *sko = NULL;
1196 ASSERT(skc->skc_magic == SKC_MAGIC);
1198 sko = spl_sko_from_obj(skc, obj);
1199 ASSERT(sko->sko_magic == SKO_MAGIC);
1200 sks = sko->sko_slab;
1201 ASSERT(sks->sks_magic == SKS_MAGIC);
1202 ASSERT(sks->sks_cache == skc);
1203 list_add(&sko->sko_list, &sks->sks_free_list);
1205 sks->sks_age = jiffies;
1206 sks->sks_ref--;
1207 skc->skc_obj_alloc--;
1210 * Move slab to skc_partial_list when no longer full. Slabs
1211 * are added to the head to keep the partial list is quasi-full
1212 * sorted order. Fuller at the head, emptier at the tail.
1214 if (sks->sks_ref == (sks->sks_objs - 1)) {
1215 list_del(&sks->sks_list);
1216 list_add(&sks->sks_list, &skc->skc_partial_list);
1220 * Move empty slabs to the end of the partial list so
1221 * they can be easily found and freed during reclamation.
1223 if (sks->sks_ref == 0) {
1224 list_del(&sks->sks_list);
1225 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
1226 skc->skc_slab_alloc--;
1231 * Allocate an object from the per-cpu magazine, or if the magazine
1232 * is empty directly allocate from a slab and repopulate the magazine.
1234 void *
1235 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
1237 spl_kmem_magazine_t *skm;
1238 void *obj = NULL;
1240 ASSERT0(flags & ~KM_PUBLIC_MASK);
1241 ASSERT(skc->skc_magic == SKC_MAGIC);
1242 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1245 * Allocate directly from a Linux slab. All optimizations are left
1246 * to the underlying cache we only need to guarantee that KM_SLEEP
1247 * callers will never fail.
1249 if (skc->skc_flags & KMC_SLAB) {
1250 struct kmem_cache *slc = skc->skc_linux_cache;
1251 do {
1252 obj = kmem_cache_alloc(slc, kmem_flags_convert(flags));
1253 } while ((obj == NULL) && !(flags & KM_NOSLEEP));
1255 if (obj != NULL) {
1257 * Even though we leave everything up to the
1258 * underlying cache we still keep track of
1259 * how many objects we've allocated in it for
1260 * better debuggability.
1262 percpu_counter_inc(&skc->skc_linux_alloc);
1264 goto ret;
1267 local_irq_disable();
1269 restart:
1271 * Safe to update per-cpu structure without lock, but
1272 * in the restart case we must be careful to reacquire
1273 * the local magazine since this may have changed
1274 * when we need to grow the cache.
1276 skm = skc->skc_mag[smp_processor_id()];
1277 ASSERT(skm->skm_magic == SKM_MAGIC);
1279 if (likely(skm->skm_avail)) {
1280 /* Object available in CPU cache, use it */
1281 obj = skm->skm_objs[--skm->skm_avail];
1282 } else {
1283 obj = spl_cache_refill(skc, skm, flags);
1284 if ((obj == NULL) && !(flags & KM_NOSLEEP))
1285 goto restart;
1287 local_irq_enable();
1288 goto ret;
1291 local_irq_enable();
1292 ASSERT(obj);
1293 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
1295 ret:
1296 /* Pre-emptively migrate object to CPU L1 cache */
1297 if (obj) {
1298 if (obj && skc->skc_ctor)
1299 skc->skc_ctor(obj, skc->skc_private, flags);
1300 else
1301 prefetchw(obj);
1304 return (obj);
1306 EXPORT_SYMBOL(spl_kmem_cache_alloc);
1309 * Free an object back to the local per-cpu magazine, there is no
1310 * guarantee that this is the same magazine the object was originally
1311 * allocated from. We may need to flush entire from the magazine
1312 * back to the slabs to make space.
1314 void
1315 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
1317 spl_kmem_magazine_t *skm;
1318 unsigned long flags;
1319 int do_reclaim = 0;
1320 int do_emergency = 0;
1322 ASSERT(skc->skc_magic == SKC_MAGIC);
1323 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1326 * Run the destructor
1328 if (skc->skc_dtor)
1329 skc->skc_dtor(obj, skc->skc_private);
1332 * Free the object from the Linux underlying Linux slab.
1334 if (skc->skc_flags & KMC_SLAB) {
1335 kmem_cache_free(skc->skc_linux_cache, obj);
1336 percpu_counter_dec(&skc->skc_linux_alloc);
1337 return;
1341 * While a cache has outstanding emergency objects all freed objects
1342 * must be checked. However, since emergency objects will never use
1343 * a virtual address these objects can be safely excluded as an
1344 * optimization.
1346 if (!is_vmalloc_addr(obj)) {
1347 spin_lock(&skc->skc_lock);
1348 do_emergency = (skc->skc_obj_emergency > 0);
1349 spin_unlock(&skc->skc_lock);
1351 if (do_emergency && (spl_emergency_free(skc, obj) == 0))
1352 return;
1355 local_irq_save(flags);
1358 * Safe to update per-cpu structure without lock, but
1359 * no remote memory allocation tracking is being performed
1360 * it is entirely possible to allocate an object from one
1361 * CPU cache and return it to another.
1363 skm = skc->skc_mag[smp_processor_id()];
1364 ASSERT(skm->skm_magic == SKM_MAGIC);
1367 * Per-CPU cache full, flush it to make space for this object,
1368 * this may result in an empty slab which can be reclaimed once
1369 * interrupts are re-enabled.
1371 if (unlikely(skm->skm_avail >= skm->skm_size)) {
1372 spl_cache_flush(skc, skm, skm->skm_refill);
1373 do_reclaim = 1;
1376 /* Available space in cache, use it */
1377 skm->skm_objs[skm->skm_avail++] = obj;
1379 local_irq_restore(flags);
1381 if (do_reclaim)
1382 spl_slab_reclaim(skc);
1384 EXPORT_SYMBOL(spl_kmem_cache_free);
1387 * Depending on how many and which objects are released it may simply
1388 * repopulate the local magazine which will then need to age-out. Objects
1389 * which cannot fit in the magazine will be released back to their slabs
1390 * which will also need to age out before being released. This is all just
1391 * best effort and we do not want to thrash creating and destroying slabs.
1393 void
1394 spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
1396 ASSERT(skc->skc_magic == SKC_MAGIC);
1397 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1399 if (skc->skc_flags & KMC_SLAB)
1400 return;
1402 atomic_inc(&skc->skc_ref);
1405 * Prevent concurrent cache reaping when contended.
1407 if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
1408 goto out;
1410 /* Reclaim from the magazine and free all now empty slabs. */
1411 unsigned long irq_flags;
1412 local_irq_save(irq_flags);
1413 spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
1414 spl_cache_flush(skc, skm, skm->skm_avail);
1415 local_irq_restore(irq_flags);
1417 spl_slab_reclaim(skc);
1418 clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags);
1419 smp_mb__after_atomic();
1420 wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
1421 out:
1422 atomic_dec(&skc->skc_ref);
1424 EXPORT_SYMBOL(spl_kmem_cache_reap_now);
1427 * This is stubbed out for code consistency with other platforms. There
1428 * is existing logic to prevent concurrent reaping so while this is ugly
1429 * it should do no harm.
1432 spl_kmem_cache_reap_active(void)
1434 return (0);
1436 EXPORT_SYMBOL(spl_kmem_cache_reap_active);
1439 * Reap all free slabs from all registered caches.
1441 void
1442 spl_kmem_reap(void)
1444 spl_kmem_cache_t *skc = NULL;
1446 down_read(&spl_kmem_cache_sem);
1447 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
1448 spl_kmem_cache_reap_now(skc);
1450 up_read(&spl_kmem_cache_sem);
1452 EXPORT_SYMBOL(spl_kmem_reap);
1455 spl_kmem_cache_init(void)
1457 init_rwsem(&spl_kmem_cache_sem);
1458 INIT_LIST_HEAD(&spl_kmem_cache_list);
1459 spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
1460 spl_kmem_cache_kmem_threads, maxclsyspri,
1461 spl_kmem_cache_kmem_threads * 8, INT_MAX,
1462 TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
1464 if (spl_kmem_cache_taskq == NULL)
1465 return (-ENOMEM);
1467 return (0);
1470 void
1471 spl_kmem_cache_fini(void)
1473 taskq_destroy(spl_kmem_cache_taskq);