Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / tools / testing / shared / linux.c
blob66dbb362385f3c3d923233448cc591adfe6dc9e7
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdlib.h>
3 #include <string.h>
4 #include <malloc.h>
5 #include <pthread.h>
6 #include <unistd.h>
7 #include <assert.h>
9 #include <linux/gfp.h>
10 #include <linux/poison.h>
11 #include <linux/slab.h>
12 #include <linux/radix-tree.h>
13 #include <urcu/uatomic.h>
15 int nr_allocated;
16 int preempt_count;
17 int test_verbose;
19 struct kmem_cache {
20 pthread_mutex_t lock;
21 unsigned int size;
22 unsigned int align;
23 int nr_objs;
24 void *objs;
25 void (*ctor)(void *);
26 unsigned int non_kernel;
27 unsigned long nr_allocated;
28 unsigned long nr_tallocated;
29 bool exec_callback;
30 void (*callback)(void *);
31 void *private;
34 void kmem_cache_set_callback(struct kmem_cache *cachep, void (*callback)(void *))
36 cachep->callback = callback;
39 void kmem_cache_set_private(struct kmem_cache *cachep, void *private)
41 cachep->private = private;
44 void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
46 cachep->non_kernel = val;
49 unsigned long kmem_cache_get_alloc(struct kmem_cache *cachep)
51 return cachep->size * cachep->nr_allocated;
54 unsigned long kmem_cache_nr_allocated(struct kmem_cache *cachep)
56 return cachep->nr_allocated;
59 unsigned long kmem_cache_nr_tallocated(struct kmem_cache *cachep)
61 return cachep->nr_tallocated;
64 void kmem_cache_zero_nr_tallocated(struct kmem_cache *cachep)
66 cachep->nr_tallocated = 0;
69 void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
70 int gfp)
72 void *p;
74 if (cachep->exec_callback) {
75 if (cachep->callback)
76 cachep->callback(cachep->private);
77 cachep->exec_callback = false;
80 if (!(gfp & __GFP_DIRECT_RECLAIM)) {
81 if (!cachep->non_kernel) {
82 cachep->exec_callback = true;
83 return NULL;
86 cachep->non_kernel--;
89 pthread_mutex_lock(&cachep->lock);
90 if (cachep->nr_objs) {
91 struct radix_tree_node *node = cachep->objs;
92 cachep->nr_objs--;
93 cachep->objs = node->parent;
94 pthread_mutex_unlock(&cachep->lock);
95 node->parent = NULL;
96 p = node;
97 } else {
98 pthread_mutex_unlock(&cachep->lock);
99 if (cachep->align) {
100 if (posix_memalign(&p, cachep->align, cachep->size) < 0)
101 return NULL;
102 } else {
103 p = malloc(cachep->size);
106 if (cachep->ctor)
107 cachep->ctor(p);
108 else if (gfp & __GFP_ZERO)
109 memset(p, 0, cachep->size);
112 uatomic_inc(&cachep->nr_allocated);
113 uatomic_inc(&nr_allocated);
114 uatomic_inc(&cachep->nr_tallocated);
115 if (kmalloc_verbose)
116 printf("Allocating %p from slab\n", p);
117 return p;
120 void __kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
122 assert(objp);
123 if (cachep->nr_objs > 10 || cachep->align) {
124 memset(objp, POISON_FREE, cachep->size);
125 free(objp);
126 } else {
127 struct radix_tree_node *node = objp;
128 cachep->nr_objs++;
129 node->parent = cachep->objs;
130 cachep->objs = node;
134 void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
136 uatomic_dec(&nr_allocated);
137 uatomic_dec(&cachep->nr_allocated);
138 if (kmalloc_verbose)
139 printf("Freeing %p to slab\n", objp);
140 __kmem_cache_free_locked(cachep, objp);
143 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
145 pthread_mutex_lock(&cachep->lock);
146 kmem_cache_free_locked(cachep, objp);
147 pthread_mutex_unlock(&cachep->lock);
150 void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
152 if (kmalloc_verbose)
153 pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
155 pthread_mutex_lock(&cachep->lock);
156 for (int i = 0; i < size; i++)
157 kmem_cache_free_locked(cachep, list[i]);
158 pthread_mutex_unlock(&cachep->lock);
161 void kmem_cache_shrink(struct kmem_cache *cachep)
165 int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
166 void **p)
168 size_t i;
170 if (kmalloc_verbose)
171 pr_debug("Bulk alloc %lu\n", size);
173 pthread_mutex_lock(&cachep->lock);
174 if (cachep->nr_objs >= size) {
175 struct radix_tree_node *node;
177 for (i = 0; i < size; i++) {
178 if (!(gfp & __GFP_DIRECT_RECLAIM)) {
179 if (!cachep->non_kernel)
180 break;
181 cachep->non_kernel--;
184 node = cachep->objs;
185 cachep->nr_objs--;
186 cachep->objs = node->parent;
187 p[i] = node;
188 node->parent = NULL;
190 pthread_mutex_unlock(&cachep->lock);
191 } else {
192 pthread_mutex_unlock(&cachep->lock);
193 for (i = 0; i < size; i++) {
194 if (!(gfp & __GFP_DIRECT_RECLAIM)) {
195 if (!cachep->non_kernel)
196 break;
197 cachep->non_kernel--;
200 if (cachep->align) {
201 if (posix_memalign(&p[i], cachep->align,
202 cachep->size) < 0)
203 break;
204 } else {
205 p[i] = malloc(cachep->size);
206 if (!p[i])
207 break;
209 if (cachep->ctor)
210 cachep->ctor(p[i]);
211 else if (gfp & __GFP_ZERO)
212 memset(p[i], 0, cachep->size);
216 if (i < size) {
217 size = i;
218 pthread_mutex_lock(&cachep->lock);
219 for (i = 0; i < size; i++)
220 __kmem_cache_free_locked(cachep, p[i]);
221 pthread_mutex_unlock(&cachep->lock);
222 return 0;
225 for (i = 0; i < size; i++) {
226 uatomic_inc(&nr_allocated);
227 uatomic_inc(&cachep->nr_allocated);
228 uatomic_inc(&cachep->nr_tallocated);
229 if (kmalloc_verbose)
230 printf("Allocating %p from slab\n", p[i]);
233 return size;
236 struct kmem_cache *
237 kmem_cache_create(const char *name, unsigned int size, unsigned int align,
238 unsigned int flags, void (*ctor)(void *))
240 struct kmem_cache *ret = malloc(sizeof(*ret));
242 pthread_mutex_init(&ret->lock, NULL);
243 ret->size = size;
244 ret->align = align;
245 ret->nr_objs = 0;
246 ret->nr_allocated = 0;
247 ret->nr_tallocated = 0;
248 ret->objs = NULL;
249 ret->ctor = ctor;
250 ret->non_kernel = 0;
251 ret->exec_callback = false;
252 ret->callback = NULL;
253 ret->private = NULL;
254 return ret;
258 * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
260 void test_kmem_cache_bulk(void)
262 int i;
263 void *list[12];
264 static struct kmem_cache *test_cache, *test_cache2;
267 * Testing the bulk allocators without aligned kmem_cache to force the
268 * bulk alloc/free to reuse
270 test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
272 for (i = 0; i < 5; i++)
273 list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
275 for (i = 0; i < 5; i++)
276 kmem_cache_free(test_cache, list[i]);
277 assert(test_cache->nr_objs == 5);
279 kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
280 kmem_cache_free_bulk(test_cache, 5, list);
282 for (i = 0; i < 12 ; i++)
283 list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
285 for (i = 0; i < 12; i++)
286 kmem_cache_free(test_cache, list[i]);
288 /* The last free will not be kept around */
289 assert(test_cache->nr_objs == 11);
291 /* Aligned caches will immediately free */
292 test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
294 kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
295 kmem_cache_free_bulk(test_cache2, 10, list);
296 assert(!test_cache2->nr_objs);