1 // SPDX-License-Identifier: GPL-2.0
10 #include <linux/poison.h>
11 #include <linux/slab.h>
12 #include <linux/radix-tree.h>
13 #include <urcu/uatomic.h>
26 unsigned int non_kernel
;
27 unsigned long nr_allocated
;
28 unsigned long nr_tallocated
;
30 void (*callback
)(void *);
34 void kmem_cache_set_callback(struct kmem_cache
*cachep
, void (*callback
)(void *))
36 cachep
->callback
= callback
;
39 void kmem_cache_set_private(struct kmem_cache
*cachep
, void *private)
41 cachep
->private = private;
44 void kmem_cache_set_non_kernel(struct kmem_cache
*cachep
, unsigned int val
)
46 cachep
->non_kernel
= val
;
49 unsigned long kmem_cache_get_alloc(struct kmem_cache
*cachep
)
51 return cachep
->size
* cachep
->nr_allocated
;
54 unsigned long kmem_cache_nr_allocated(struct kmem_cache
*cachep
)
56 return cachep
->nr_allocated
;
59 unsigned long kmem_cache_nr_tallocated(struct kmem_cache
*cachep
)
61 return cachep
->nr_tallocated
;
64 void kmem_cache_zero_nr_tallocated(struct kmem_cache
*cachep
)
66 cachep
->nr_tallocated
= 0;
69 void *kmem_cache_alloc_lru(struct kmem_cache
*cachep
, struct list_lru
*lru
,
74 if (cachep
->exec_callback
) {
76 cachep
->callback(cachep
->private);
77 cachep
->exec_callback
= false;
80 if (!(gfp
& __GFP_DIRECT_RECLAIM
)) {
81 if (!cachep
->non_kernel
) {
82 cachep
->exec_callback
= true;
89 pthread_mutex_lock(&cachep
->lock
);
90 if (cachep
->nr_objs
) {
91 struct radix_tree_node
*node
= cachep
->objs
;
93 cachep
->objs
= node
->parent
;
94 pthread_mutex_unlock(&cachep
->lock
);
98 pthread_mutex_unlock(&cachep
->lock
);
100 if (posix_memalign(&p
, cachep
->align
, cachep
->size
) < 0)
103 p
= malloc(cachep
->size
);
108 else if (gfp
& __GFP_ZERO
)
109 memset(p
, 0, cachep
->size
);
112 uatomic_inc(&cachep
->nr_allocated
);
113 uatomic_inc(&nr_allocated
);
114 uatomic_inc(&cachep
->nr_tallocated
);
116 printf("Allocating %p from slab\n", p
);
120 void __kmem_cache_free_locked(struct kmem_cache
*cachep
, void *objp
)
123 if (cachep
->nr_objs
> 10 || cachep
->align
) {
124 memset(objp
, POISON_FREE
, cachep
->size
);
127 struct radix_tree_node
*node
= objp
;
129 node
->parent
= cachep
->objs
;
134 void kmem_cache_free_locked(struct kmem_cache
*cachep
, void *objp
)
136 uatomic_dec(&nr_allocated
);
137 uatomic_dec(&cachep
->nr_allocated
);
139 printf("Freeing %p to slab\n", objp
);
140 __kmem_cache_free_locked(cachep
, objp
);
143 void kmem_cache_free(struct kmem_cache
*cachep
, void *objp
)
145 pthread_mutex_lock(&cachep
->lock
);
146 kmem_cache_free_locked(cachep
, objp
);
147 pthread_mutex_unlock(&cachep
->lock
);
150 void kmem_cache_free_bulk(struct kmem_cache
*cachep
, size_t size
, void **list
)
153 pr_debug("Bulk free %p[0-%lu]\n", list
, size
- 1);
155 pthread_mutex_lock(&cachep
->lock
);
156 for (int i
= 0; i
< size
; i
++)
157 kmem_cache_free_locked(cachep
, list
[i
]);
158 pthread_mutex_unlock(&cachep
->lock
);
161 void kmem_cache_shrink(struct kmem_cache
*cachep
)
165 int kmem_cache_alloc_bulk(struct kmem_cache
*cachep
, gfp_t gfp
, size_t size
,
171 pr_debug("Bulk alloc %lu\n", size
);
173 pthread_mutex_lock(&cachep
->lock
);
174 if (cachep
->nr_objs
>= size
) {
175 struct radix_tree_node
*node
;
177 for (i
= 0; i
< size
; i
++) {
178 if (!(gfp
& __GFP_DIRECT_RECLAIM
)) {
179 if (!cachep
->non_kernel
)
181 cachep
->non_kernel
--;
186 cachep
->objs
= node
->parent
;
190 pthread_mutex_unlock(&cachep
->lock
);
192 pthread_mutex_unlock(&cachep
->lock
);
193 for (i
= 0; i
< size
; i
++) {
194 if (!(gfp
& __GFP_DIRECT_RECLAIM
)) {
195 if (!cachep
->non_kernel
)
197 cachep
->non_kernel
--;
201 if (posix_memalign(&p
[i
], cachep
->align
,
205 p
[i
] = malloc(cachep
->size
);
211 else if (gfp
& __GFP_ZERO
)
212 memset(p
[i
], 0, cachep
->size
);
218 pthread_mutex_lock(&cachep
->lock
);
219 for (i
= 0; i
< size
; i
++)
220 __kmem_cache_free_locked(cachep
, p
[i
]);
221 pthread_mutex_unlock(&cachep
->lock
);
225 for (i
= 0; i
< size
; i
++) {
226 uatomic_inc(&nr_allocated
);
227 uatomic_inc(&cachep
->nr_allocated
);
228 uatomic_inc(&cachep
->nr_tallocated
);
230 printf("Allocating %p from slab\n", p
[i
]);
237 kmem_cache_create(const char *name
, unsigned int size
, unsigned int align
,
238 unsigned int flags
, void (*ctor
)(void *))
240 struct kmem_cache
*ret
= malloc(sizeof(*ret
));
242 pthread_mutex_init(&ret
->lock
, NULL
);
246 ret
->nr_allocated
= 0;
247 ret
->nr_tallocated
= 0;
251 ret
->exec_callback
= false;
252 ret
->callback
= NULL
;
258 * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
260 void test_kmem_cache_bulk(void)
264 static struct kmem_cache
*test_cache
, *test_cache2
;
267 * Testing the bulk allocators without aligned kmem_cache to force the
268 * bulk alloc/free to reuse
270 test_cache
= kmem_cache_create("test_cache", 256, 0, SLAB_PANIC
, NULL
);
272 for (i
= 0; i
< 5; i
++)
273 list
[i
] = kmem_cache_alloc(test_cache
, __GFP_DIRECT_RECLAIM
);
275 for (i
= 0; i
< 5; i
++)
276 kmem_cache_free(test_cache
, list
[i
]);
277 assert(test_cache
->nr_objs
== 5);
279 kmem_cache_alloc_bulk(test_cache
, __GFP_DIRECT_RECLAIM
, 5, list
);
280 kmem_cache_free_bulk(test_cache
, 5, list
);
282 for (i
= 0; i
< 12 ; i
++)
283 list
[i
] = kmem_cache_alloc(test_cache
, __GFP_DIRECT_RECLAIM
);
285 for (i
= 0; i
< 12; i
++)
286 kmem_cache_free(test_cache
, list
[i
]);
288 /* The last free will not be kept around */
289 assert(test_cache
->nr_objs
== 11);
291 /* Aligned caches will immediately free */
292 test_cache2
= kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC
, NULL
);
294 kmem_cache_alloc_bulk(test_cache2
, __GFP_DIRECT_RECLAIM
, 10, list
);
295 kmem_cache_free_bulk(test_cache2
, 10, list
);
296 assert(!test_cache2
->nr_objs
);