4 * Internal slab definitions
9 * Common fields provided in kmem_cache by all slab allocators
10 * This struct is either used directly by the allocator (SLOB)
11 * or the allocator must include definitions for all fields
12 * provided in kmem_cache_common in their definition of kmem_cache.
14 * Once we can do anonymous structs (C11 standard) we could put a
15 * anonymous struct definition in these allocators so that the
16 * separate allocations in the kmem_cache structure of SLAB and
17 * SLUB is no longer needed.
20 unsigned int object_size
;/* The original size of the object */
21 unsigned int size
; /* The aligned/padded/added on size */
22 unsigned int align
; /* Alignment as calculated */
23 unsigned long flags
; /* Active flags on the slab */
24 const char *name
; /* Slab name for sysfs */
25 int refcount
; /* Use counter */
26 void (*ctor
)(void *); /* Called on object slot creation */
27 struct list_head list
; /* List of all slab caches on the system */
30 #endif /* CONFIG_SLOB */
33 #include <linux/slab_def.h>
37 #include <linux/slub_def.h>
40 #include <linux/memcontrol.h>
43 * State of the slab allocator.
45 * This is used to describe the states of the allocator during bootup.
46 * Allocators use this to gradually bootstrap themselves. Most allocators
47 * have the problem that the structures used for managing slab caches are
48 * allocated from slab caches themselves.
51 DOWN
, /* No slab functionality yet */
52 PARTIAL
, /* SLUB: kmem_cache_node available */
53 PARTIAL_NODE
, /* SLAB: kmalloc size for node struct available */
54 UP
, /* Slab caches usable but not all extras yet */
55 FULL
/* Everything is working */
58 extern enum slab_state slab_state
;
60 /* The slab cache mutex protects the management structures during changes */
61 extern struct mutex slab_mutex
;
63 /* The list of all slab caches on the system */
64 extern struct list_head slab_caches
;
66 /* The slab cache that manages slab cache information */
67 extern struct kmem_cache
*kmem_cache
;
69 unsigned long calculate_alignment(unsigned long flags
,
70 unsigned long align
, unsigned long size
);
73 /* Kmalloc array related functions */
74 void setup_kmalloc_cache_index_table(void);
75 void create_kmalloc_caches(unsigned long);
77 /* Find the kmalloc slab corresponding for a certain size */
78 struct kmem_cache
*kmalloc_slab(size_t, gfp_t
);
82 /* Functions provided by the slab allocators */
83 extern int __kmem_cache_create(struct kmem_cache
*, unsigned long flags
);
85 extern struct kmem_cache
*create_kmalloc_cache(const char *name
, size_t size
,
87 extern void create_boot_cache(struct kmem_cache
*, const char *name
,
88 size_t size
, unsigned long flags
);
90 int slab_unmergeable(struct kmem_cache
*s
);
91 struct kmem_cache
*find_mergeable(size_t size
, size_t align
,
92 unsigned long flags
, const char *name
, void (*ctor
)(void *));
95 __kmem_cache_alias(const char *name
, size_t size
, size_t align
,
96 unsigned long flags
, void (*ctor
)(void *));
98 unsigned long kmem_cache_flags(unsigned long object_size
,
99 unsigned long flags
, const char *name
,
100 void (*ctor
)(void *));
102 static inline struct kmem_cache
*
103 __kmem_cache_alias(const char *name
, size_t size
, size_t align
,
104 unsigned long flags
, void (*ctor
)(void *))
107 static inline unsigned long kmem_cache_flags(unsigned long object_size
,
108 unsigned long flags
, const char *name
,
109 void (*ctor
)(void *))
116 /* Legal flag mask for kmem_cache_create(), for various configurations */
117 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
118 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
120 #if defined(CONFIG_DEBUG_SLAB)
121 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
122 #elif defined(CONFIG_SLUB_DEBUG)
123 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
124 SLAB_TRACE | SLAB_DEBUG_FREE)
126 #define SLAB_DEBUG_FLAGS (0)
129 #if defined(CONFIG_SLAB)
130 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
131 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
132 #elif defined(CONFIG_SLUB)
133 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
134 SLAB_TEMPORARY | SLAB_NOTRACK)
136 #define SLAB_CACHE_FLAGS (0)
139 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
141 int __kmem_cache_shutdown(struct kmem_cache
*);
142 int __kmem_cache_shrink(struct kmem_cache
*, bool);
143 void slab_kmem_cache_release(struct kmem_cache
*);
149 unsigned long active_objs
;
150 unsigned long num_objs
;
151 unsigned long active_slabs
;
152 unsigned long num_slabs
;
153 unsigned long shared_avail
;
155 unsigned int batchcount
;
157 unsigned int objects_per_slab
;
158 unsigned int cache_order
;
161 void get_slabinfo(struct kmem_cache
*s
, struct slabinfo
*sinfo
);
162 void slabinfo_show_stats(struct seq_file
*m
, struct kmem_cache
*s
);
163 ssize_t
slabinfo_write(struct file
*file
, const char __user
*buffer
,
164 size_t count
, loff_t
*ppos
);
167 * Generic implementation of bulk operations
168 * These are useful for situations in which the allocator cannot
169 * perform optimizations. In that case segments of the objecct listed
170 * may be allocated or freed using these operations.
172 void __kmem_cache_free_bulk(struct kmem_cache
*, size_t, void **);
173 bool __kmem_cache_alloc_bulk(struct kmem_cache
*, gfp_t
, size_t, void **);
175 #ifdef CONFIG_MEMCG_KMEM
177 * Iterate over all memcg caches of the given root cache. The caller must hold
180 #define for_each_memcg_cache(iter, root) \
181 list_for_each_entry(iter, &(root)->memcg_params.list, \
184 #define for_each_memcg_cache_safe(iter, tmp, root) \
185 list_for_each_entry_safe(iter, tmp, &(root)->memcg_params.list, \
188 static inline bool is_root_cache(struct kmem_cache
*s
)
190 return s
->memcg_params
.is_root_cache
;
193 static inline bool slab_equal_or_root(struct kmem_cache
*s
,
194 struct kmem_cache
*p
)
196 return p
== s
|| p
== s
->memcg_params
.root_cache
;
200 * We use suffixes to the name in memcg because we can't have caches
201 * created in the system with the same name. But when we print them
202 * locally, better refer to them with the base name
204 static inline const char *cache_name(struct kmem_cache
*s
)
206 if (!is_root_cache(s
))
207 s
= s
->memcg_params
.root_cache
;
212 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
213 * That said the caller must assure the memcg's cache won't go away by either
214 * taking a css reference to the owner cgroup, or holding the slab_mutex.
216 static inline struct kmem_cache
*
217 cache_from_memcg_idx(struct kmem_cache
*s
, int idx
)
219 struct kmem_cache
*cachep
;
220 struct memcg_cache_array
*arr
;
223 arr
= rcu_dereference(s
->memcg_params
.memcg_caches
);
226 * Make sure we will access the up-to-date value. The code updating
227 * memcg_caches issues a write barrier to match this (see
228 * memcg_create_kmem_cache()).
230 cachep
= lockless_dereference(arr
->entries
[idx
]);
236 static inline struct kmem_cache
*memcg_root_cache(struct kmem_cache
*s
)
238 if (is_root_cache(s
))
240 return s
->memcg_params
.root_cache
;
243 static __always_inline
int memcg_charge_slab(struct kmem_cache
*s
,
244 gfp_t gfp
, int order
)
246 if (!memcg_kmem_enabled())
248 if (is_root_cache(s
))
250 return memcg_charge_kmem(s
->memcg_params
.memcg
, gfp
, 1 << order
);
253 static __always_inline
void memcg_uncharge_slab(struct kmem_cache
*s
, int order
)
255 if (!memcg_kmem_enabled())
257 if (is_root_cache(s
))
259 memcg_uncharge_kmem(s
->memcg_params
.memcg
, 1 << order
);
262 extern void slab_init_memcg_params(struct kmem_cache
*);
264 #else /* !CONFIG_MEMCG_KMEM */
266 #define for_each_memcg_cache(iter, root) \
267 for ((void)(iter), (void)(root); 0; )
268 #define for_each_memcg_cache_safe(iter, tmp, root) \
269 for ((void)(iter), (void)(tmp), (void)(root); 0; )
271 static inline bool is_root_cache(struct kmem_cache
*s
)
276 static inline bool slab_equal_or_root(struct kmem_cache
*s
,
277 struct kmem_cache
*p
)
282 static inline const char *cache_name(struct kmem_cache
*s
)
287 static inline struct kmem_cache
*
288 cache_from_memcg_idx(struct kmem_cache
*s
, int idx
)
293 static inline struct kmem_cache
*memcg_root_cache(struct kmem_cache
*s
)
298 static inline int memcg_charge_slab(struct kmem_cache
*s
, gfp_t gfp
, int order
)
303 static inline void memcg_uncharge_slab(struct kmem_cache
*s
, int order
)
307 static inline void slab_init_memcg_params(struct kmem_cache
*s
)
310 #endif /* CONFIG_MEMCG_KMEM */
312 static inline struct kmem_cache
*cache_from_obj(struct kmem_cache
*s
, void *x
)
314 struct kmem_cache
*cachep
;
318 * When kmemcg is not being used, both assignments should return the
319 * same value. but we don't want to pay the assignment price in that
320 * case. If it is not compiled in, the compiler should be smart enough
321 * to not do even the assignment. In that case, slab_equal_or_root
322 * will also be a constant.
324 if (!memcg_kmem_enabled() && !unlikely(s
->flags
& SLAB_DEBUG_FREE
))
327 page
= virt_to_head_page(x
);
328 cachep
= page
->slab_cache
;
329 if (slab_equal_or_root(cachep
, s
))
332 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
333 __func__
, s
->name
, cachep
->name
);
340 * The slab lists for all objects.
342 struct kmem_cache_node
{
343 spinlock_t list_lock
;
346 struct list_head slabs_partial
; /* partial list first, better asm code */
347 struct list_head slabs_full
;
348 struct list_head slabs_free
;
349 unsigned long free_objects
;
350 unsigned int free_limit
;
351 unsigned int colour_next
; /* Per-node cache coloring */
352 struct array_cache
*shared
; /* shared per node */
353 struct alien_cache
**alien
; /* on other nodes */
354 unsigned long next_reap
; /* updated without locking */
355 int free_touched
; /* updated without locking */
359 unsigned long nr_partial
;
360 struct list_head partial
;
361 #ifdef CONFIG_SLUB_DEBUG
362 atomic_long_t nr_slabs
;
363 atomic_long_t total_objects
;
364 struct list_head full
;
370 static inline struct kmem_cache_node
*get_node(struct kmem_cache
*s
, int node
)
372 return s
->node
[node
];
376 * Iterator over all nodes. The body will be executed for each node that has
377 * a kmem_cache_node structure allocated (which is true for all online nodes)
379 #define for_each_kmem_cache_node(__s, __node, __n) \
380 for (__node = 0; __node < nr_node_ids; __node++) \
381 if ((__n = get_node(__s, __node)))
385 void *slab_start(struct seq_file
*m
, loff_t
*pos
);
386 void *slab_next(struct seq_file
*m
, void *p
, loff_t
*pos
);
387 void slab_stop(struct seq_file
*m
, void *p
);
388 int memcg_slab_show(struct seq_file
*m
, void *p
);
390 #endif /* MM_SLAB_H */