1 // SPDX-License-Identifier: GPL-2.0
4 * Test module for lockless object pool
6 * Copyright: wuqiang.matt@bytedance.com
9 #include <linux/errno.h>
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/completion.h>
13 #include <linux/kthread.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/delay.h>
17 #include <linux/hrtimer.h>
18 #include <linux/objpool.h>
20 #define OT_NR_MAX_BULK (16)
28 /* object allocation results */
34 /* control & results per testcase */
36 struct rw_semaphore start
;
37 struct completion wait
;
38 struct completion rcu
;
39 atomic_t nthreads ____cacheline_aligned_in_smp
;
40 atomic_t stop ____cacheline_aligned_in_smp
;
41 struct ot_mem_stat kmalloc
;
42 struct ot_mem_stat vmalloc
;
43 struct ot_obj_stat objects
;
49 int async
; /* synchronous or asynchronous */
50 int mode
; /* only mode 0 supported */
51 int objsz
; /* object size */
52 int duration
; /* ms */
56 unsigned long hrtimer
; /* ms */
63 struct objpool_head
*pool
; /* pool head */
64 struct ot_test
*test
; /* test parameters */
66 void (*worker
)(struct ot_item
*item
, int irq
);
70 struct hrtimer hrtimer
;
72 int bulk
[2]; /* for thread and irq */
76 /* summary per thread */
77 struct ot_obj_stat stat
[2]; /* thread and irq */
82 * memory leakage checking
85 static void *ot_kzalloc(struct ot_test
*test
, long size
)
87 void *ptr
= kzalloc(size
, GFP_KERNEL
);
90 atomic_long_add(size
, &test
->data
.kmalloc
.alloc
);
94 static void ot_kfree(struct ot_test
*test
, void *ptr
, long size
)
98 atomic_long_add(size
, &test
->data
.kmalloc
.free
);
102 static void ot_mem_report(struct ot_test
*test
)
106 pr_info("memory allocation summary for %s\n", test
->name
);
108 alloc
= atomic_long_read(&test
->data
.kmalloc
.alloc
);
109 free
= atomic_long_read(&test
->data
.kmalloc
.free
);
110 pr_info(" kmalloc: %lu - %lu = %lu\n", alloc
, free
, alloc
- free
);
112 alloc
= atomic_long_read(&test
->data
.vmalloc
.alloc
);
113 free
= atomic_long_read(&test
->data
.vmalloc
.free
);
114 pr_info(" vmalloc: %lu - %lu = %lu\n", alloc
, free
, alloc
- free
);
117 /* user object instance */
122 unsigned long payload
[32];
125 /* user objpool manager */
127 struct objpool_head pool
; /* objpool head */
128 struct ot_test
*test
; /* test parameters */
129 void *ptr
; /* user pool buffer */
130 unsigned long size
; /* buffer size */
134 static DEFINE_PER_CPU(struct ot_item
, ot_pcup_items
);
136 static int ot_init_data(struct ot_data
*data
)
138 memset(data
, 0, sizeof(*data
));
139 init_rwsem(&data
->start
);
140 init_completion(&data
->wait
);
141 init_completion(&data
->rcu
);
142 atomic_set(&data
->nthreads
, 1);
147 static int ot_init_node(void *nod
, void *context
)
149 struct ot_context
*sop
= context
;
150 struct ot_node
*on
= nod
;
152 on
->owner
= &sop
->pool
;
156 static enum hrtimer_restart
ot_hrtimer_handler(struct hrtimer
*hrt
)
158 struct ot_item
*item
= container_of(hrt
, struct ot_item
, hrtimer
);
159 struct ot_test
*test
= item
->test
;
161 if (atomic_read_acquire(&test
->data
.stop
))
162 return HRTIMER_NORESTART
;
164 /* do bulk-testings for objects pop/push */
165 item
->worker(item
, 1);
167 hrtimer_forward(hrt
, hrt
->base
->get_time(), item
->hrtcycle
);
168 return HRTIMER_RESTART
;
171 static void ot_start_hrtimer(struct ot_item
*item
)
173 if (!item
->test
->hrtimer
)
175 hrtimer_start(&item
->hrtimer
, item
->hrtcycle
, HRTIMER_MODE_REL
);
178 static void ot_stop_hrtimer(struct ot_item
*item
)
180 if (!item
->test
->hrtimer
)
182 hrtimer_cancel(&item
->hrtimer
);
185 static int ot_init_hrtimer(struct ot_item
*item
, unsigned long hrtimer
)
187 struct hrtimer
*hrt
= &item
->hrtimer
;
192 item
->hrtcycle
= ktime_set(0, hrtimer
* 1000000UL);
193 hrtimer_init(hrt
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
194 hrt
->function
= ot_hrtimer_handler
;
198 static int ot_init_cpu_item(struct ot_item
*item
,
199 struct ot_test
*test
,
200 struct objpool_head
*pool
,
201 void (*worker
)(struct ot_item
*, int))
203 memset(item
, 0, sizeof(*item
));
206 item
->worker
= worker
;
208 item
->bulk
[0] = test
->bulk_normal
;
209 item
->bulk
[1] = test
->bulk_irq
;
210 item
->delay
= test
->delay
;
212 /* initialize hrtimer */
213 ot_init_hrtimer(item
, item
->test
->hrtimer
);
217 static int ot_thread_worker(void *arg
)
219 struct ot_item
*item
= arg
;
220 struct ot_test
*test
= item
->test
;
223 atomic_inc(&test
->data
.nthreads
);
224 down_read(&test
->data
.start
);
225 up_read(&test
->data
.start
);
227 ot_start_hrtimer(item
);
229 if (atomic_read_acquire(&test
->data
.stop
))
231 /* do bulk-testings for objects pop/push */
232 item
->worker(item
, 0);
233 } while (!kthread_should_stop());
234 ot_stop_hrtimer(item
);
235 item
->duration
= (u64
) ktime_us_delta(ktime_get(), start
);
236 if (atomic_dec_and_test(&test
->data
.nthreads
))
237 complete(&test
->data
.wait
);
242 static void ot_perf_report(struct ot_test
*test
, u64 duration
)
244 struct ot_obj_stat total
, normal
= {0}, irq
= {0};
245 int cpu
, nthreads
= 0;
248 pr_info("Testing summary for %s\n", test
->name
);
250 for_each_possible_cpu(cpu
) {
251 struct ot_item
*item
= per_cpu_ptr(&ot_pcup_items
, cpu
);
254 normal
.nhits
+= item
->stat
[0].nhits
;
255 normal
.nmiss
+= item
->stat
[0].nmiss
;
256 irq
.nhits
+= item
->stat
[1].nhits
;
257 irq
.nmiss
+= item
->stat
[1].nmiss
;
258 pr_info("CPU: %d duration: %lluus\n", cpu
, item
->duration
);
259 pr_info("\tthread:\t%16lu hits \t%16lu miss\n",
260 item
->stat
[0].nhits
, item
->stat
[0].nmiss
);
261 pr_info("\tirq: \t%16lu hits \t%16lu miss\n",
262 item
->stat
[1].nhits
, item
->stat
[1].nmiss
);
263 pr_info("\ttotal: \t%16lu hits \t%16lu miss\n",
264 item
->stat
[0].nhits
+ item
->stat
[1].nhits
,
265 item
->stat
[0].nmiss
+ item
->stat
[1].nmiss
);
269 total
.nhits
= normal
.nhits
+ irq
.nhits
;
270 total
.nmiss
= normal
.nmiss
+ irq
.nmiss
;
272 pr_info("ALL: \tnthreads: %d duration: %lluus\n", nthreads
, duration
);
273 pr_info("SUM: \t%16lu hits \t%16lu miss\n",
274 total
.nhits
, total
.nmiss
);
276 test
->data
.objects
= total
;
277 test
->data
.duration
= duration
;
281 * synchronous test cases for objpool manipulation
284 /* objpool manipulation for synchronous mode (percpu objpool) */
285 static struct ot_context
*ot_init_sync_m0(struct ot_test
*test
)
287 struct ot_context
*sop
= NULL
;
288 int max
= num_possible_cpus() << 3;
289 gfp_t gfp
= GFP_KERNEL
;
291 sop
= (struct ot_context
*)ot_kzalloc(test
, sizeof(*sop
));
295 if (test
->objsz
< 512)
298 if (objpool_init(&sop
->pool
, max
, test
->objsz
,
299 gfp
, sop
, ot_init_node
, NULL
)) {
300 ot_kfree(test
, sop
, sizeof(*sop
));
303 WARN_ON(max
!= sop
->pool
.nr_objs
);
308 static void ot_fini_sync(struct ot_context
*sop
)
310 objpool_fini(&sop
->pool
);
311 ot_kfree(sop
->test
, sop
, sizeof(*sop
));
315 struct ot_context
* (*init
)(struct ot_test
*oc
);
316 void (*fini
)(struct ot_context
*sop
);
317 } g_ot_sync_ops
[] = {
318 {.init
= ot_init_sync_m0
, .fini
= ot_fini_sync
},
322 * synchronous test cases: performance mode
325 static void ot_bulk_sync(struct ot_item
*item
, int irq
)
327 struct ot_node
*nods
[OT_NR_MAX_BULK
];
330 for (i
= 0; i
< item
->bulk
[irq
]; i
++)
331 nods
[i
] = objpool_pop(item
->pool
);
333 if (!irq
&& (item
->delay
|| !(++(item
->niters
) & 0x7FFF)))
337 struct ot_node
*on
= nods
[i
];
340 objpool_push(on
, item
->pool
);
341 item
->stat
[irq
].nhits
++;
343 item
->stat
[irq
].nmiss
++;
348 static int ot_start_sync(struct ot_test
*test
)
350 struct ot_context
*sop
;
353 unsigned long timeout
;
356 /* initialize objpool for syncrhonous testcase */
357 sop
= g_ot_sync_ops
[test
->mode
].init(test
);
361 /* grab rwsem to block testing threads */
362 down_write(&test
->data
.start
);
364 for_each_possible_cpu(cpu
) {
365 struct ot_item
*item
= per_cpu_ptr(&ot_pcup_items
, cpu
);
366 struct task_struct
*work
;
368 ot_init_cpu_item(item
, test
, &sop
->pool
, ot_bulk_sync
);
370 /* skip offline cpus */
371 if (!cpu_online(cpu
))
374 work
= kthread_create_on_node(ot_thread_worker
, item
,
375 cpu_to_node(cpu
), "ot_worker_%d", cpu
);
377 pr_err("failed to create thread for cpu %d\n", cpu
);
379 kthread_bind(work
, cpu
);
380 wake_up_process(work
);
384 /* wait a while to make sure all threads waiting at start line */
387 /* in case no threads were created: memory insufficient ? */
388 if (atomic_dec_and_test(&test
->data
.nthreads
))
389 complete(&test
->data
.wait
);
391 // sched_set_fifo_low(current);
393 /* start objpool testing threads */
395 up_write(&test
->data
.start
);
397 /* yeild cpu to worker threads for duration ms */
398 timeout
= msecs_to_jiffies(test
->duration
);
399 schedule_timeout_interruptible(timeout
);
401 /* tell workers threads to quit */
402 atomic_set_release(&test
->data
.stop
, 1);
404 /* wait all workers threads finish and quit */
405 wait_for_completion(&test
->data
.wait
);
406 duration
= (u64
) ktime_us_delta(ktime_get(), start
);
408 /* cleanup objpool */
409 g_ot_sync_ops
[test
->mode
].fini(sop
);
411 /* report testing summary and performance results */
412 ot_perf_report(test
, duration
);
414 /* report memory allocation summary */
421 * asynchronous test cases: pool lifecycle controlled by refcount
424 static void ot_fini_async_rcu(struct rcu_head
*rcu
)
426 struct ot_context
*sop
= container_of(rcu
, struct ot_context
, rcu
);
427 struct ot_test
*test
= sop
->test
;
429 /* here all cpus are aware of the stop event: test->data.stop = 1 */
430 WARN_ON(!atomic_read_acquire(&test
->data
.stop
));
432 objpool_fini(&sop
->pool
);
433 complete(&test
->data
.rcu
);
436 static void ot_fini_async(struct ot_context
*sop
)
438 /* make sure the stop event is acknowledged by all cores */
439 call_rcu(&sop
->rcu
, ot_fini_async_rcu
);
442 static int ot_objpool_release(struct objpool_head
*head
, void *context
)
444 struct ot_context
*sop
= context
;
446 WARN_ON(!head
|| !sop
|| head
!= &sop
->pool
);
448 /* do context cleaning if needed */
450 ot_kfree(sop
->test
, sop
, sizeof(*sop
));
455 static struct ot_context
*ot_init_async_m0(struct ot_test
*test
)
457 struct ot_context
*sop
= NULL
;
458 int max
= num_possible_cpus() << 3;
459 gfp_t gfp
= GFP_KERNEL
;
461 sop
= (struct ot_context
*)ot_kzalloc(test
, sizeof(*sop
));
465 if (test
->objsz
< 512)
468 if (objpool_init(&sop
->pool
, max
, test
->objsz
, gfp
, sop
,
469 ot_init_node
, ot_objpool_release
)) {
470 ot_kfree(test
, sop
, sizeof(*sop
));
473 WARN_ON(max
!= sop
->pool
.nr_objs
);
479 struct ot_context
* (*init
)(struct ot_test
*oc
);
480 void (*fini
)(struct ot_context
*sop
);
481 } g_ot_async_ops
[] = {
482 {.init
= ot_init_async_m0
, .fini
= ot_fini_async
},
485 static void ot_nod_recycle(struct ot_node
*on
, struct objpool_head
*pool
,
488 struct ot_context
*sop
;
493 /* push object back to opjpool for reuse */
494 objpool_push(on
, pool
);
498 sop
= container_of(pool
, struct ot_context
, pool
);
499 WARN_ON(sop
!= pool
->context
);
501 /* unref objpool with nod removed forever */
502 objpool_drop(on
, pool
);
505 static void ot_bulk_async(struct ot_item
*item
, int irq
)
507 struct ot_test
*test
= item
->test
;
508 struct ot_node
*nods
[OT_NR_MAX_BULK
];
511 for (i
= 0; i
< item
->bulk
[irq
]; i
++)
512 nods
[i
] = objpool_pop(item
->pool
);
515 if (item
->delay
|| !(++(item
->niters
) & 0x7FFF))
520 stop
= atomic_read_acquire(&test
->data
.stop
);
522 /* drop all objects and deref objpool */
524 struct ot_node
*on
= nods
[i
];
528 ot_nod_recycle(on
, item
->pool
, stop
);
529 item
->stat
[irq
].nhits
++;
531 item
->stat
[irq
].nmiss
++;
539 static int ot_start_async(struct ot_test
*test
)
541 struct ot_context
*sop
;
544 unsigned long timeout
;
547 /* initialize objpool for syncrhonous testcase */
548 sop
= g_ot_async_ops
[test
->mode
].init(test
);
552 /* grab rwsem to block testing threads */
553 down_write(&test
->data
.start
);
555 for_each_possible_cpu(cpu
) {
556 struct ot_item
*item
= per_cpu_ptr(&ot_pcup_items
, cpu
);
557 struct task_struct
*work
;
559 ot_init_cpu_item(item
, test
, &sop
->pool
, ot_bulk_async
);
561 /* skip offline cpus */
562 if (!cpu_online(cpu
))
565 work
= kthread_create_on_node(ot_thread_worker
, item
,
566 cpu_to_node(cpu
), "ot_worker_%d", cpu
);
568 pr_err("failed to create thread for cpu %d\n", cpu
);
570 kthread_bind(work
, cpu
);
571 wake_up_process(work
);
575 /* wait a while to make sure all threads waiting at start line */
578 /* in case no threads were created: memory insufficient ? */
579 if (atomic_dec_and_test(&test
->data
.nthreads
))
580 complete(&test
->data
.wait
);
582 /* start objpool testing threads */
584 up_write(&test
->data
.start
);
586 /* yeild cpu to worker threads for duration ms */
587 timeout
= msecs_to_jiffies(test
->duration
);
588 schedule_timeout_interruptible(timeout
);
590 /* tell workers threads to quit */
591 atomic_set_release(&test
->data
.stop
, 1);
593 /* do async-finalization */
594 g_ot_async_ops
[test
->mode
].fini(sop
);
596 /* wait all workers threads finish and quit */
597 wait_for_completion(&test
->data
.wait
);
598 duration
= (u64
) ktime_us_delta(ktime_get(), start
);
600 /* assure rcu callback is triggered */
601 wait_for_completion(&test
->data
.rcu
);
604 * now we are sure that objpool is finalized either
605 * by rcu callback or by worker threads
608 /* report testing summary and performance results */
609 ot_perf_report(test
, duration
);
611 /* report memory allocation summary */
618 * predefined testing cases:
619 * synchronous case / overrun case / async case
621 * async: synchronous or asynchronous testing
622 * mode: only mode 0 supported
624 * duration: int, total test time in ms
625 * delay: int, delay (in ms) between each iteration
626 * bulk_normal: int, repeat times for thread worker
627 * bulk_irq: int, repeat times for irq consumer
628 * hrtimer: unsigned long, hrtimer intervnal in ms
629 * name: char *, tag for current test ot_item
632 #define NODE_COMPACT sizeof(struct ot_node)
633 #define NODE_VMALLOC (512)
635 static struct ot_test g_testcases
[] = {
638 {0, 0, NODE_COMPACT
, 1000, 0, 1, 0, 0, "sync: percpu objpool"},
639 {0, 0, NODE_VMALLOC
, 1000, 0, 1, 0, 0, "sync: percpu objpool from vmalloc"},
642 {0, 0, NODE_COMPACT
, 1000, 0, 1, 1, 4, "sync & hrtimer: percpu objpool"},
643 {0, 0, NODE_VMALLOC
, 1000, 0, 1, 1, 4, "sync & hrtimer: percpu objpool from vmalloc"},
646 {0, 0, NODE_COMPACT
, 1000, 0, 16, 0, 0, "sync overrun: percpu objpool"},
647 {0, 0, NODE_VMALLOC
, 1000, 0, 16, 0, 0, "sync overrun: percpu objpool from vmalloc"},
650 {1, 0, NODE_COMPACT
, 1000, 100, 1, 0, 0, "async: percpu objpool"},
651 {1, 0, NODE_VMALLOC
, 1000, 100, 1, 0, 0, "async: percpu objpool from vmalloc"},
653 /* async + hrtimer mode */
654 {1, 0, NODE_COMPACT
, 1000, 0, 4, 4, 4, "async & hrtimer: percpu objpool"},
655 {1, 0, NODE_VMALLOC
, 1000, 0, 4, 4, 4, "async & hrtimer: percpu objpool from vmalloc"},
658 static int __init
ot_mod_init(void)
662 /* perform testings */
663 for (i
= 0; i
< ARRAY_SIZE(g_testcases
); i
++) {
664 ot_init_data(&g_testcases
[i
].data
);
665 if (g_testcases
[i
].async
)
666 ot_start_async(&g_testcases
[i
]);
668 ot_start_sync(&g_testcases
[i
]);
671 /* show tests summary */
673 pr_info("Summary of testcases:\n");
674 for (i
= 0; i
< ARRAY_SIZE(g_testcases
); i
++) {
675 pr_info(" duration: %lluus \thits: %10lu \tmiss: %10lu \t%s\n",
676 g_testcases
[i
].data
.duration
, g_testcases
[i
].data
.objects
.nhits
,
677 g_testcases
[i
].data
.objects
.nmiss
, g_testcases
[i
].name
);
683 static void __exit
ot_mod_exit(void)
687 module_init(ot_mod_init
);
688 module_exit(ot_mod_exit
);
690 MODULE_DESCRIPTION("Test module for lockless object pool");
691 MODULE_LICENSE("GPL");