UBI: limit amount of reserved eraseblocks for bad PEB handling
[linux/fpc-iii.git] / mm / slab_common.c
blobaa3ca5bb01b55a097f1397a6a92728f81b3167a5
1 /*
2 * Slab allocator functions that are independent of the allocator strategy
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6 #include <linux/slab.h>
8 #include <linux/mm.h>
9 #include <linux/poison.h>
10 #include <linux/interrupt.h>
11 #include <linux/memory.h>
12 #include <linux/compiler.h>
13 #include <linux/module.h>
14 #include <linux/cpu.h>
15 #include <linux/uaccess.h>
16 #include <asm/cacheflush.h>
17 #include <asm/tlbflush.h>
18 #include <asm/page.h>
20 #include "slab.h"
22 enum slab_state slab_state;
23 LIST_HEAD(slab_caches);
24 DEFINE_MUTEX(slab_mutex);
27 * kmem_cache_create - Create a cache.
28 * @name: A string which is used in /proc/slabinfo to identify this cache.
29 * @size: The size of objects to be created in this cache.
30 * @align: The required alignment for the objects.
31 * @flags: SLAB flags
32 * @ctor: A constructor for the objects.
34 * Returns a ptr to the cache on success, NULL on failure.
35 * Cannot be called within a interrupt, but can be interrupted.
36 * The @ctor is run when new pages are allocated by the cache.
38 * The flags are
40 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
41 * to catch references to uninitialised memory.
43 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
44 * for buffer overruns.
46 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
47 * cacheline. This can be beneficial if you're counting cycles as closely
48 * as davem.
51 struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align,
52 unsigned long flags, void (*ctor)(void *))
54 struct kmem_cache *s = NULL;
56 #ifdef CONFIG_DEBUG_VM
57 if (!name || in_interrupt() || size < sizeof(void *) ||
58 size > KMALLOC_MAX_SIZE) {
59 printk(KERN_ERR "kmem_cache_create(%s) integrity check"
60 " failed\n", name);
61 goto out;
63 #endif
65 get_online_cpus();
66 mutex_lock(&slab_mutex);
68 #ifdef CONFIG_DEBUG_VM
69 list_for_each_entry(s, &slab_caches, list) {
70 char tmp;
71 int res;
74 * This happens when the module gets unloaded and doesn't
75 * destroy its slab cache and no-one else reuses the vmalloc
76 * area of the module. Print a warning.
78 res = probe_kernel_address(s->name, tmp);
79 if (res) {
80 printk(KERN_ERR
81 "Slab cache with size %d has lost its name\n",
82 s->object_size);
83 continue;
86 if (!strcmp(s->name, name)) {
87 printk(KERN_ERR "kmem_cache_create(%s): Cache name"
88 " already exists.\n",
89 name);
90 dump_stack();
91 s = NULL;
92 goto oops;
96 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
97 #endif
99 s = __kmem_cache_create(name, size, align, flags, ctor);
101 #ifdef CONFIG_DEBUG_VM
102 oops:
103 #endif
104 mutex_unlock(&slab_mutex);
105 put_online_cpus();
107 #ifdef CONFIG_DEBUG_VM
108 out:
109 #endif
110 if (!s && (flags & SLAB_PANIC))
111 panic("kmem_cache_create: Failed to create slab '%s'\n", name);
113 return s;
115 EXPORT_SYMBOL(kmem_cache_create);
117 int slab_is_available(void)
119 return slab_state >= UP;