Linux 4.6-rc6
[cris-mirror.git] / include / linux / slub_def.h
blob665cd0cd18b8b1f3d9933dcda96bd5624e7c0a3b
1 #ifndef _LINUX_SLUB_DEF_H
2 #define _LINUX_SLUB_DEF_H
4 /*
5 * SLUB : A Slab allocator without object queues.
7 * (C) 2007 SGI, Christoph Lameter
8 */
9 #include <linux/kobject.h>
11 enum stat_item {
12 ALLOC_FASTPATH, /* Allocation from cpu slab */
13 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
14 FREE_FASTPATH, /* Free to cpu slab */
15 FREE_SLOWPATH, /* Freeing not to cpu slab */
16 FREE_FROZEN, /* Freeing to frozen slab */
17 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
18 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
19 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
20 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
21 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
22 ALLOC_NODE_MISMATCH, /* Switching cpu slab */
23 FREE_SLAB, /* Slab freed to the page allocator */
24 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
25 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
26 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
27 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
28 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
29 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
30 DEACTIVATE_BYPASS, /* Implicit deactivation */
31 ORDER_FALLBACK, /* Number of times fallback was necessary */
32 CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
33 CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
34 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
35 CPU_PARTIAL_FREE, /* Refill cpu partial on free */
36 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
37 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
38 NR_SLUB_STAT_ITEMS };
40 struct kmem_cache_cpu {
41 void **freelist; /* Pointer to next available object */
42 unsigned long tid; /* Globally unique transaction id */
43 struct page *page; /* The slab from which we are allocating */
44 struct page *partial; /* Partially allocated frozen slabs */
45 #ifdef CONFIG_SLUB_STATS
46 unsigned stat[NR_SLUB_STAT_ITEMS];
47 #endif
51 * Word size structure that can be atomically updated or read and that
52 * contains both the order and the number of objects that a slab of the
53 * given order would contain.
55 struct kmem_cache_order_objects {
56 unsigned long x;
60 * Slab cache management.
62 struct kmem_cache {
63 struct kmem_cache_cpu __percpu *cpu_slab;
64 /* Used for retriving partial slabs etc */
65 unsigned long flags;
66 unsigned long min_partial;
67 int size; /* The size of an object including meta data */
68 int object_size; /* The size of an object without meta data */
69 int offset; /* Free pointer offset. */
70 int cpu_partial; /* Number of per cpu partial objects to keep around */
71 struct kmem_cache_order_objects oo;
73 /* Allocation and freeing of slabs */
74 struct kmem_cache_order_objects max;
75 struct kmem_cache_order_objects min;
76 gfp_t allocflags; /* gfp flags to use on each alloc */
77 int refcount; /* Refcount for slab cache destroy */
78 void (*ctor)(void *);
79 int inuse; /* Offset to metadata */
80 int align; /* Alignment */
81 int reserved; /* Reserved bytes at the end of slabs */
82 const char *name; /* Name (only for display!) */
83 struct list_head list; /* List of slab caches */
84 int red_left_pad; /* Left redzone padding size */
85 #ifdef CONFIG_SYSFS
86 struct kobject kobj; /* For sysfs */
87 #endif
88 #ifdef CONFIG_MEMCG
89 struct memcg_cache_params memcg_params;
90 int max_attr_size; /* for propagation, maximum size of a stored attr */
91 #ifdef CONFIG_SYSFS
92 struct kset *memcg_kset;
93 #endif
94 #endif
96 #ifdef CONFIG_NUMA
98 * Defragmentation by allocating from a remote node.
100 int remote_node_defrag_ratio;
101 #endif
102 struct kmem_cache_node *node[MAX_NUMNODES];
105 #ifdef CONFIG_SYSFS
106 #define SLAB_SUPPORTS_SYSFS
107 void sysfs_slab_remove(struct kmem_cache *);
108 #else
109 static inline void sysfs_slab_remove(struct kmem_cache *s)
112 #endif
116 * virt_to_obj - returns address of the beginning of object.
117 * @s: object's kmem_cache
118 * @slab_page: address of slab page
119 * @x: address within object memory range
121 * Returns address of the beginning of object
123 static inline void *virt_to_obj(struct kmem_cache *s,
124 const void *slab_page,
125 const void *x)
127 return (void *)x - ((x - slab_page) % s->size);
130 void object_err(struct kmem_cache *s, struct page *page,
131 u8 *object, char *reason);
133 static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
134 void *x) {
135 void *object = x - (x - page_address(page)) % cache->size;
136 void *last_object = page_address(page) +
137 (page->objects - 1) * cache->size;
138 if (unlikely(object > last_object))
139 return last_object;
140 else
141 return object;
144 #endif /* _LINUX_SLUB_DEF_H */