1 #ifndef _LINUX_SLQB_DEF_H
2 #define _LINUX_SLQB_DEF_H
5 * SLQB : A slab allocator with object queues.
7 * (C) 2008 Nick Piggin <npiggin@suse.de>
9 #include <linux/types.h>
10 #include <linux/gfp.h>
11 #include <linux/workqueue.h>
12 #include <linux/kobject.h>
13 #include <linux/rcu_types.h>
14 #include <linux/mm_types.h>
15 #include <linux/kernel.h>
16 #include <linux/kobject.h>
18 #define SLAB_NUMA 0x00000001UL /* shortcut */
21 ALLOC
, /* Allocation count */
22 ALLOC_SLAB_FILL
, /* Fill freelist from page list */
23 ALLOC_SLAB_NEW
, /* New slab acquired from page allocator */
24 FREE
, /* Free count */
25 FREE_REMOTE
, /* NUMA: freeing to remote list */
26 FLUSH_FREE_LIST
, /* Freelist flushed */
27 FLUSH_FREE_LIST_OBJECTS
, /* Objects flushed from freelist */
28 FLUSH_FREE_LIST_REMOTE
, /* Objects flushed from freelist to remote */
29 FLUSH_SLAB_PARTIAL
, /* Freeing moves slab to partial list */
30 FLUSH_SLAB_FREE
, /* Slab freed to the page allocator */
31 FLUSH_RFREE_LIST
, /* Rfree list flushed */
32 FLUSH_RFREE_LIST_OBJECTS
, /* Rfree objects flushed */
33 CLAIM_REMOTE_LIST
, /* Remote freed list claimed */
34 CLAIM_REMOTE_LIST_OBJECTS
, /* Remote freed objects claimed */
39 * Singly-linked list with head, tail, and nr
48 * Every kmem_cache_list has a kmem_cache_remote_free structure, by which
49 * objects can be returned to the kmem_cache_list from remote CPUs.
51 struct kmem_cache_remote_free
{
54 } ____cacheline_aligned
;
57 * A kmem_cache_list manages all the slabs and objects allocated from a given
58 * source. Per-cpu kmem_cache_lists allow node-local allocations. Per-node
59 * kmem_cache_lists allow off-node allocations (but require locking).
61 struct kmem_cache_list
{
62 /* Fastpath LIFO freelist of objects */
63 struct kmlist freelist
;
65 /* remote_free has reached a watermark */
66 int remote_free_check
;
68 /* kmem_cache corresponding to this list */
69 struct kmem_cache
*cache
;
71 /* Number of partial slabs (pages) */
72 unsigned long nr_partial
;
74 /* Slabs which have some free objects */
75 struct list_head partial
;
77 /* Total number of slabs allocated */
78 unsigned long nr_slabs
;
80 /* Protects nr_partial, nr_slabs, and partial */
85 * In the case of per-cpu lists, remote_free is for objects freed by
86 * non-owner CPU back to its home list. For per-node lists, remote_free
87 * is always used to free objects.
89 struct kmem_cache_remote_free remote_free
;
92 #ifdef CONFIG_SLQB_STATS
93 unsigned long stats
[NR_SLQB_STAT_ITEMS
];
95 } ____cacheline_aligned
;
98 * Primary per-cpu, per-kmem_cache structure.
100 struct kmem_cache_cpu
{
101 struct kmem_cache_list list
; /* List for node-local slabs */
102 unsigned int colour_next
; /* Next colour offset to use */
106 * rlist is a list of objects that don't fit on list.freelist (ie.
107 * wrong node). The objects all correspond to a given kmem_cache_list,
108 * remote_cache_list. To free objects to another list, we must first
109 * flush the existing objects, then switch remote_cache_list.
111 * An NR_CPUS or MAX_NUMNODES array would be nice here, but then we
112 * get to O(NR_CPUS^2) memory consumption situation.
115 struct kmem_cache_list
*remote_cache_list
;
117 } ____cacheline_aligned_in_smp
;
120 * Per-node, per-kmem_cache structure. Used for node-specific allocations.
122 struct kmem_cache_node
{
123 struct kmem_cache_list list
;
124 spinlock_t list_lock
; /* protects access to list */
125 } ____cacheline_aligned
;
128 * Management object for a slab cache.
132 int hiwater
; /* LIFO list high watermark */
133 int freebatch
; /* LIFO freelist batch flush size */
135 struct kmem_cache_cpu
**cpu_slab
; /* dynamic per-cpu structures */
137 struct kmem_cache_cpu cpu_slab
;
139 int objsize
; /* Size of object without meta data */
140 int offset
; /* Free pointer offset. */
141 int objects
; /* Number of objects in slab */
144 struct kmem_cache_node
**node_slab
; /* dynamic per-node structures */
147 int size
; /* Size of object including meta data */
148 int order
; /* Allocation order */
149 gfp_t allocflags
; /* gfp flags to use on allocation */
150 unsigned int colour_range
; /* range of colour counter */
151 unsigned int colour_off
; /* offset per colour */
152 void (*ctor
)(void *);
154 const char *name
; /* Name (only for display!) */
155 struct list_head list
; /* List of slab caches */
157 int align
; /* Alignment */
158 int inuse
; /* Offset to metadata */
160 #ifdef CONFIG_SLQB_SYSFS
161 struct kobject kobj
; /* For sysfs */
163 } ____cacheline_aligned
;
168 #if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
169 #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
171 #define KMALLOC_MIN_SIZE 8
174 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
175 #define KMALLOC_SHIFT_SLQB_HIGH (PAGE_SHIFT + \
176 ((9 <= (MAX_ORDER - 1)) ? 9 : (MAX_ORDER - 1)))
178 extern struct kmem_cache kmalloc_caches
[KMALLOC_SHIFT_SLQB_HIGH
+ 1];
179 extern struct kmem_cache kmalloc_caches_dma
[KMALLOC_SHIFT_SLQB_HIGH
+ 1];
182 * Constant size allocations use this path to find index into kmalloc caches
183 * arrays. get_slab() function is used for non-constant sizes.
185 static __always_inline
int kmalloc_index(size_t size
)
187 extern int ____kmalloc_too_large(void);
189 if (unlikely(size
<= KMALLOC_MIN_SIZE
))
190 return KMALLOC_SHIFT_LOW
;
192 #if L1_CACHE_BYTES < 64
193 if (size
> 64 && size
<= 96)
196 #if L1_CACHE_BYTES < 128
197 if (size
> 128 && size
<= 192)
200 if (size
<= 8) return 3;
201 if (size
<= 16) return 4;
202 if (size
<= 32) return 5;
203 if (size
<= 64) return 6;
204 if (size
<= 128) return 7;
205 if (size
<= 256) return 8;
206 if (size
<= 512) return 9;
207 if (size
<= 1024) return 10;
208 if (size
<= 2 * 1024) return 11;
209 if (size
<= 4 * 1024) return 12;
210 if (size
<= 8 * 1024) return 13;
211 if (size
<= 16 * 1024) return 14;
212 if (size
<= 32 * 1024) return 15;
213 if (size
<= 64 * 1024) return 16;
214 if (size
<= 128 * 1024) return 17;
215 if (size
<= 256 * 1024) return 18;
216 if (size
<= 512 * 1024) return 19;
217 if (size
<= 1024 * 1024) return 20;
218 if (size
<= 2 * 1024 * 1024) return 21;
219 if (size
<= 4 * 1024 * 1024) return 22;
220 if (size
<= 8 * 1024 * 1024) return 23;
221 if (size
<= 16 * 1024 * 1024) return 24;
222 if (size
<= 32 * 1024 * 1024) return 25;
223 return ____kmalloc_too_large();
226 #ifdef CONFIG_ZONE_DMA
227 #define SLQB_DMA __GFP_DMA
229 /* Disable "DMA slabs" */
230 #define SLQB_DMA (__force gfp_t)0
234 * Find the kmalloc slab cache for a given combination of allocation flags and
235 * size. Should really only be used for constant 'size' arguments, due to
238 static __always_inline
struct kmem_cache
*kmalloc_slab(size_t size
, gfp_t flags
)
242 if (unlikely(size
> 1UL << KMALLOC_SHIFT_SLQB_HIGH
))
245 return ZERO_SIZE_PTR
;
247 index
= kmalloc_index(size
);
248 if (likely(!(flags
& SLQB_DMA
)))
249 return &kmalloc_caches
[index
];
251 return &kmalloc_caches_dma
[index
];
254 void *kmem_cache_alloc(struct kmem_cache
*, gfp_t
);
255 void *__kmalloc(size_t size
, gfp_t flags
);
257 #ifndef ARCH_KMALLOC_MINALIGN
258 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
261 #ifndef ARCH_SLAB_MINALIGN
262 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
265 #define KMALLOC_HEADER (ARCH_KMALLOC_MINALIGN < sizeof(void *) ? \
266 sizeof(void *) : ARCH_KMALLOC_MINALIGN)
268 static __always_inline
void *kmalloc(size_t size
, gfp_t flags
)
270 if (__builtin_constant_p(size
)) {
271 struct kmem_cache
*s
;
273 s
= kmalloc_slab(size
, flags
);
274 if (unlikely(ZERO_OR_NULL_PTR(s
)))
277 return kmem_cache_alloc(s
, flags
);
279 return __kmalloc(size
, flags
);
283 void *__kmalloc_node(size_t size
, gfp_t flags
, int node
);
284 void *kmem_cache_alloc_node(struct kmem_cache
*, gfp_t flags
, int node
);
286 static __always_inline
void *kmalloc_node(size_t size
, gfp_t flags
, int node
)
288 if (__builtin_constant_p(size
)) {
289 struct kmem_cache
*s
;
291 s
= kmalloc_slab(size
, flags
);
292 if (unlikely(ZERO_OR_NULL_PTR(s
)))
295 return kmem_cache_alloc_node(s
, flags
, node
);
297 return __kmalloc_node(size
, flags
, node
);
301 #endif /* _LINUX_SLQB_DEF_H */