1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update module-based scalability-test facility
5 * Copyright (C) IBM Corporation, 2015
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
10 #define pr_fmt(fmt) fmt
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/kthread.h>
18 #include <linux/err.h>
19 #include <linux/spinlock.h>
20 #include <linux/smp.h>
21 #include <linux/rcupdate.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/atomic.h>
26 #include <linux/bitops.h>
27 #include <linux/completion.h>
28 #include <linux/moduleparam.h>
29 #include <linux/percpu.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/freezer.h>
33 #include <linux/cpu.h>
34 #include <linux/delay.h>
35 #include <linux/stat.h>
36 #include <linux/srcu.h>
37 #include <linux/slab.h>
38 #include <asm/byteorder.h>
39 #include <linux/torture.h>
40 #include <linux/vmalloc.h>
41 #include <linux/rcupdate_trace.h>
42 #include <linux/sched/debug.h>
46 MODULE_DESCRIPTION("Read-Copy Update module-based scalability-test facility");
47 MODULE_LICENSE("GPL");
48 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
50 #define SCALE_FLAG "-scale:"
51 #define SCALEOUT_STRING(s) \
52 pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s)
53 #define VERBOSE_SCALEOUT_STRING(s) \
54 do { if (verbose) pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s); } while (0)
55 #define SCALEOUT_ERRSTRING(s) \
56 pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s)
59 * The intended use cases for the nreaders and nwriters module parameters
62 * 1. Specify only the nr_cpus kernel boot parameter. This will
63 * set both nreaders and nwriters to the value specified by
64 * nr_cpus for a mixed reader/writer test.
66 * 2. Specify the nr_cpus kernel boot parameter, but set
67 * rcuscale.nreaders to zero. This will set nwriters to the
68 * value specified by nr_cpus for an update-only test.
70 * 3. Specify the nr_cpus kernel boot parameter, but set
71 * rcuscale.nwriters to zero. This will set nreaders to the
72 * value specified by nr_cpus for a read-only test.
74 * Various other use cases may of course be specified.
76 * Note that this test's readers are intended only as a test load for
77 * the writers. The reader scalability statistics will be overly
78 * pessimistic due to the per-critical-section interrupt disabling,
79 * test-end checks, and the pair of calls through pointers.
83 # define RCUSCALE_SHUTDOWN 0
85 # define RCUSCALE_SHUTDOWN 1
88 torture_param(bool, gp_async
, false, "Use asynchronous GP wait primitives");
89 torture_param(int, gp_async_max
, 1000, "Max # outstanding waits per writer");
90 torture_param(bool, gp_exp
, false, "Use expedited GP wait primitives");
91 torture_param(int, holdoff
, 10, "Holdoff time before test start (s)");
92 torture_param(int, minruntime
, 0, "Minimum run time (s)");
93 torture_param(int, nreaders
, -1, "Number of RCU reader threads");
94 torture_param(int, nwriters
, -1, "Number of RCU updater threads");
95 torture_param(bool, shutdown
, RCUSCALE_SHUTDOWN
,
96 "Shutdown at end of scalability tests.");
97 torture_param(int, verbose
, 1, "Enable verbose debugging printk()s");
98 torture_param(int, writer_holdoff
, 0, "Holdoff (us) between GPs, zero to disable");
99 torture_param(int, writer_holdoff_jiffies
, 0, "Holdoff (jiffies) between GPs, zero to disable");
100 torture_param(int, kfree_rcu_test
, 0, "Do we run a kfree_rcu() scale test?");
101 torture_param(int, kfree_mult
, 1, "Multiple of kfree_obj size to allocate.");
102 torture_param(int, kfree_by_call_rcu
, 0, "Use call_rcu() to emulate kfree_rcu()?");
104 static char *scale_type
= "rcu";
105 module_param(scale_type
, charp
, 0444);
106 MODULE_PARM_DESC(scale_type
, "Type of RCU to scalability-test (rcu, srcu, ...)");
108 // Structure definitions for custom fixed-per-task allocator.
109 struct writer_mblock
{
110 struct rcu_head wmb_rh
;
111 struct llist_node wmb_node
;
112 struct writer_freelist
*wmb_wfl
;
115 struct writer_freelist
{
116 struct llist_head ws_lhg
;
117 atomic_t ws_inflight
;
118 struct llist_head ____cacheline_internodealigned_in_smp ws_lhp
;
119 struct writer_mblock
*ws_mblocks
;
122 static int nrealreaders
;
123 static int nrealwriters
;
124 static struct task_struct
**writer_tasks
;
125 static struct task_struct
**reader_tasks
;
126 static struct task_struct
*shutdown_task
;
128 static u64
**writer_durations
;
129 static bool *writer_done
;
130 static struct writer_freelist
*writer_freelists
;
131 static int *writer_n_durations
;
132 static atomic_t n_rcu_scale_reader_started
;
133 static atomic_t n_rcu_scale_writer_started
;
134 static atomic_t n_rcu_scale_writer_finished
;
135 static wait_queue_head_t shutdown_wq
;
136 static u64 t_rcu_scale_writer_started
;
137 static u64 t_rcu_scale_writer_finished
;
138 static unsigned long b_rcu_gp_test_started
;
139 static unsigned long b_rcu_gp_test_finished
;
141 #define MAX_MEAS 10000
145 * Operations vector for selecting different types of tests.
148 struct rcu_scale_ops
{
151 void (*cleanup
)(void);
152 int (*readlock
)(void);
153 void (*readunlock
)(int idx
);
154 unsigned long (*get_gp_seq
)(void);
155 unsigned long (*gp_diff
)(unsigned long new, unsigned long old
);
156 unsigned long (*exp_completed
)(void);
157 void (*async
)(struct rcu_head
*head
, rcu_callback_t func
);
158 void (*gp_barrier
)(void);
160 void (*exp_sync
)(void);
161 struct task_struct
*(*rso_gp_kthread
)(void);
166 static struct rcu_scale_ops
*cur_ops
;
169 * Definitions for rcu scalability testing.
172 static int rcu_scale_read_lock(void) __acquires(RCU
)
178 static void rcu_scale_read_unlock(int idx
) __releases(RCU
)
183 static unsigned long __maybe_unused
rcu_no_completed(void)
188 static void rcu_sync_scale_init(void)
192 static struct rcu_scale_ops rcu_ops
= {
194 .init
= rcu_sync_scale_init
,
195 .readlock
= rcu_scale_read_lock
,
196 .readunlock
= rcu_scale_read_unlock
,
197 .get_gp_seq
= rcu_get_gp_seq
,
198 .gp_diff
= rcu_seq_diff
,
199 .exp_completed
= rcu_exp_batches_completed
,
200 .async
= call_rcu_hurry
,
201 .gp_barrier
= rcu_barrier
,
202 .sync
= synchronize_rcu
,
203 .exp_sync
= synchronize_rcu_expedited
,
208 * Definitions for srcu scalability testing.
211 DEFINE_STATIC_SRCU(srcu_ctl_scale
);
212 static struct srcu_struct
*srcu_ctlp
= &srcu_ctl_scale
;
214 static int srcu_scale_read_lock(void) __acquires(srcu_ctlp
)
216 return srcu_read_lock(srcu_ctlp
);
219 static void srcu_scale_read_unlock(int idx
) __releases(srcu_ctlp
)
221 srcu_read_unlock(srcu_ctlp
, idx
);
224 static unsigned long srcu_scale_completed(void)
226 return srcu_batches_completed(srcu_ctlp
);
229 static void srcu_call_rcu(struct rcu_head
*head
, rcu_callback_t func
)
231 call_srcu(srcu_ctlp
, head
, func
);
234 static void srcu_rcu_barrier(void)
236 srcu_barrier(srcu_ctlp
);
239 static void srcu_scale_synchronize(void)
241 synchronize_srcu(srcu_ctlp
);
244 static void srcu_scale_stats(void)
246 srcu_torture_stats_print(srcu_ctlp
, scale_type
, SCALE_FLAG
);
249 static void srcu_scale_synchronize_expedited(void)
251 synchronize_srcu_expedited(srcu_ctlp
);
254 static struct rcu_scale_ops srcu_ops
= {
255 .ptype
= SRCU_FLAVOR
,
256 .init
= rcu_sync_scale_init
,
257 .readlock
= srcu_scale_read_lock
,
258 .readunlock
= srcu_scale_read_unlock
,
259 .get_gp_seq
= srcu_scale_completed
,
260 .gp_diff
= rcu_seq_diff
,
261 .exp_completed
= srcu_scale_completed
,
262 .async
= srcu_call_rcu
,
263 .gp_barrier
= srcu_rcu_barrier
,
264 .sync
= srcu_scale_synchronize
,
265 .exp_sync
= srcu_scale_synchronize_expedited
,
266 .stats
= srcu_scale_stats
,
270 static struct srcu_struct srcud
;
272 static void srcu_sync_scale_init(void)
275 init_srcu_struct(srcu_ctlp
);
278 static void srcu_sync_scale_cleanup(void)
280 cleanup_srcu_struct(srcu_ctlp
);
283 static struct rcu_scale_ops srcud_ops
= {
284 .ptype
= SRCU_FLAVOR
,
285 .init
= srcu_sync_scale_init
,
286 .cleanup
= srcu_sync_scale_cleanup
,
287 .readlock
= srcu_scale_read_lock
,
288 .readunlock
= srcu_scale_read_unlock
,
289 .get_gp_seq
= srcu_scale_completed
,
290 .gp_diff
= rcu_seq_diff
,
291 .exp_completed
= srcu_scale_completed
,
292 .async
= srcu_call_rcu
,
293 .gp_barrier
= srcu_rcu_barrier
,
294 .sync
= srcu_scale_synchronize
,
295 .exp_sync
= srcu_scale_synchronize_expedited
,
296 .stats
= srcu_scale_stats
,
300 #ifdef CONFIG_TASKS_RCU
303 * Definitions for RCU-tasks scalability testing.
306 static int tasks_scale_read_lock(void)
311 static void tasks_scale_read_unlock(int idx
)
315 static void rcu_tasks_scale_stats(void)
317 rcu_tasks_torture_stats_print(scale_type
, SCALE_FLAG
);
320 static struct rcu_scale_ops tasks_ops
= {
321 .ptype
= RCU_TASKS_FLAVOR
,
322 .init
= rcu_sync_scale_init
,
323 .readlock
= tasks_scale_read_lock
,
324 .readunlock
= tasks_scale_read_unlock
,
325 .get_gp_seq
= rcu_no_completed
,
326 .gp_diff
= rcu_seq_diff
,
327 .async
= call_rcu_tasks
,
328 .gp_barrier
= rcu_barrier_tasks
,
329 .sync
= synchronize_rcu_tasks
,
330 .exp_sync
= synchronize_rcu_tasks
,
331 .rso_gp_kthread
= get_rcu_tasks_gp_kthread
,
332 .stats
= IS_ENABLED(CONFIG_TINY_RCU
) ? NULL
: rcu_tasks_scale_stats
,
336 #define TASKS_OPS &tasks_ops,
338 #else // #ifdef CONFIG_TASKS_RCU
342 #endif // #else // #ifdef CONFIG_TASKS_RCU
344 #ifdef CONFIG_TASKS_RUDE_RCU
347 * Definitions for RCU-tasks-rude scalability testing.
350 static int tasks_rude_scale_read_lock(void)
355 static void tasks_rude_scale_read_unlock(int idx
)
359 static void rcu_tasks_rude_scale_stats(void)
361 rcu_tasks_rude_torture_stats_print(scale_type
, SCALE_FLAG
);
364 static struct rcu_scale_ops tasks_rude_ops
= {
365 .ptype
= RCU_TASKS_RUDE_FLAVOR
,
366 .init
= rcu_sync_scale_init
,
367 .readlock
= tasks_rude_scale_read_lock
,
368 .readunlock
= tasks_rude_scale_read_unlock
,
369 .get_gp_seq
= rcu_no_completed
,
370 .gp_diff
= rcu_seq_diff
,
371 .sync
= synchronize_rcu_tasks_rude
,
372 .exp_sync
= synchronize_rcu_tasks_rude
,
373 .rso_gp_kthread
= get_rcu_tasks_rude_gp_kthread
,
374 .stats
= IS_ENABLED(CONFIG_TINY_RCU
) ? NULL
: rcu_tasks_rude_scale_stats
,
378 #define TASKS_RUDE_OPS &tasks_rude_ops,
380 #else // #ifdef CONFIG_TASKS_RUDE_RCU
382 #define TASKS_RUDE_OPS
384 #endif // #else // #ifdef CONFIG_TASKS_RUDE_RCU
386 #ifdef CONFIG_TASKS_TRACE_RCU
389 * Definitions for RCU-tasks-trace scalability testing.
392 static int tasks_trace_scale_read_lock(void)
394 rcu_read_lock_trace();
398 static void tasks_trace_scale_read_unlock(int idx
)
400 rcu_read_unlock_trace();
403 static void rcu_tasks_trace_scale_stats(void)
405 rcu_tasks_trace_torture_stats_print(scale_type
, SCALE_FLAG
);
408 static struct rcu_scale_ops tasks_tracing_ops
= {
409 .ptype
= RCU_TASKS_FLAVOR
,
410 .init
= rcu_sync_scale_init
,
411 .readlock
= tasks_trace_scale_read_lock
,
412 .readunlock
= tasks_trace_scale_read_unlock
,
413 .get_gp_seq
= rcu_no_completed
,
414 .gp_diff
= rcu_seq_diff
,
415 .async
= call_rcu_tasks_trace
,
416 .gp_barrier
= rcu_barrier_tasks_trace
,
417 .sync
= synchronize_rcu_tasks_trace
,
418 .exp_sync
= synchronize_rcu_tasks_trace
,
419 .rso_gp_kthread
= get_rcu_tasks_trace_gp_kthread
,
420 .stats
= IS_ENABLED(CONFIG_TINY_RCU
) ? NULL
: rcu_tasks_trace_scale_stats
,
421 .name
= "tasks-tracing"
424 #define TASKS_TRACING_OPS &tasks_tracing_ops,
426 #else // #ifdef CONFIG_TASKS_TRACE_RCU
428 #define TASKS_TRACING_OPS
430 #endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU
432 static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old
)
434 if (!cur_ops
->gp_diff
)
436 return cur_ops
->gp_diff(new, old
);
440 * If scalability tests complete, wait for shutdown to commence.
442 static void rcu_scale_wait_shutdown(void)
444 cond_resched_tasks_rcu_qs();
445 if (atomic_read(&n_rcu_scale_writer_finished
) < nrealwriters
)
447 while (!torture_must_stop())
448 schedule_timeout_uninterruptible(1);
452 * RCU scalability reader kthread. Repeatedly does empty RCU read-side
453 * critical section, minimizing update-side interference. However, the
454 * point of this test is not to evaluate reader scalability, but instead
455 * to serve as a test load for update-side scalability testing.
458 rcu_scale_reader(void *arg
)
464 VERBOSE_SCALEOUT_STRING("rcu_scale_reader task started");
465 set_cpus_allowed_ptr(current
, cpumask_of(me
% nr_cpu_ids
));
466 set_user_nice(current
, MAX_NICE
);
467 atomic_inc(&n_rcu_scale_reader_started
);
470 local_irq_save(flags
);
471 idx
= cur_ops
->readlock();
472 cur_ops
->readunlock(idx
);
473 local_irq_restore(flags
);
474 rcu_scale_wait_shutdown();
475 } while (!torture_must_stop());
476 torture_kthread_stopping("rcu_scale_reader");
481 * Allocate a writer_mblock structure for the specified rcu_scale_writer
484 static struct writer_mblock
*rcu_scale_alloc(long me
)
486 struct llist_node
*llnp
;
487 struct writer_freelist
*wflp
;
488 struct writer_mblock
*wmbp
;
490 if (WARN_ON_ONCE(!writer_freelists
))
492 wflp
= &writer_freelists
[me
];
493 if (llist_empty(&wflp
->ws_lhp
)) {
494 // ->ws_lhp is private to its rcu_scale_writer task.
495 wmbp
= container_of(llist_del_all(&wflp
->ws_lhg
), struct writer_mblock
, wmb_node
);
496 wflp
->ws_lhp
.first
= &wmbp
->wmb_node
;
498 llnp
= llist_del_first(&wflp
->ws_lhp
);
501 return container_of(llnp
, struct writer_mblock
, wmb_node
);
505 * Free a writer_mblock structure to its rcu_scale_writer task.
507 static void rcu_scale_free(struct writer_mblock
*wmbp
)
509 struct writer_freelist
*wflp
;
513 wflp
= wmbp
->wmb_wfl
;
514 llist_add(&wmbp
->wmb_node
, &wflp
->ws_lhg
);
518 * Callback function for asynchronous grace periods from rcu_scale_writer().
520 static void rcu_scale_async_cb(struct rcu_head
*rhp
)
522 struct writer_mblock
*wmbp
= container_of(rhp
, struct writer_mblock
, wmb_rh
);
523 struct writer_freelist
*wflp
= wmbp
->wmb_wfl
;
525 atomic_dec(&wflp
->ws_inflight
);
526 rcu_scale_free(wmbp
);
530 * RCU scale writer kthread. Repeatedly does a grace period.
533 rcu_scale_writer(void *arg
)
539 bool selfreport
= false;
540 bool started
= false, done
= false, alldone
= false;
542 DEFINE_TORTURE_RANDOM(tr
);
544 u64
*wdpp
= writer_durations
[me
];
545 struct writer_freelist
*wflp
= &writer_freelists
[me
];
546 struct writer_mblock
*wmbp
= NULL
;
548 VERBOSE_SCALEOUT_STRING("rcu_scale_writer task started");
550 set_cpus_allowed_ptr(current
, cpumask_of(me
% nr_cpu_ids
));
551 current
->flags
|= PF_NO_SETAFFINITY
;
552 sched_set_fifo_low(current
);
555 schedule_timeout_idle(holdoff
* HZ
);
558 * Wait until rcu_end_inkernel_boot() is called for normal GP tests
559 * so that RCU is not always expedited for normal GP tests.
560 * The system_state test is approximate, but works well in practice.
562 while (!gp_exp
&& system_state
!= SYSTEM_RUNNING
)
563 schedule_timeout_uninterruptible(1);
565 t
= ktime_get_mono_fast_ns();
566 if (atomic_inc_return(&n_rcu_scale_writer_started
) >= nrealwriters
) {
567 t_rcu_scale_writer_started
= t
;
569 b_rcu_gp_test_started
=
570 cur_ops
->exp_completed() / 2;
572 b_rcu_gp_test_started
= cur_ops
->get_gp_seq();
576 jdone
= jiffies
+ minruntime
* HZ
;
578 bool gp_succeeded
= false;
581 udelay(writer_holdoff
);
582 if (writer_holdoff_jiffies
)
583 schedule_timeout_idle(torture_random(&tr
) % writer_holdoff_jiffies
+ 1);
585 *wdp
= ktime_get_mono_fast_ns();
586 if (gp_async
&& !WARN_ON_ONCE(!cur_ops
->async
)) {
588 wmbp
= rcu_scale_alloc(me
);
589 if (wmbp
&& atomic_read(&wflp
->ws_inflight
) < gp_async_max
) {
590 atomic_inc(&wflp
->ws_inflight
);
591 cur_ops
->async(&wmbp
->wmb_rh
, rcu_scale_async_cb
);
594 } else if (!kthread_should_stop()) {
595 cur_ops
->gp_barrier();
597 rcu_scale_free(wmbp
); /* Because we are stopping. */
607 t
= ktime_get_mono_fast_ns();
611 atomic_read(&n_rcu_scale_writer_started
) >= nrealwriters
)
613 if (!done
&& i
>= MIN_MEAS
&& time_after(jiffies
, jdone
)) {
615 WRITE_ONCE(writer_done
[me
], true);
616 sched_set_normal(current
, 0);
617 pr_alert("%s%s rcu_scale_writer %ld has %d measurements\n",
618 scale_type
, SCALE_FLAG
, me
, MIN_MEAS
);
619 if (atomic_inc_return(&n_rcu_scale_writer_finished
) >=
621 schedule_timeout_interruptible(10);
622 rcu_ftrace_dump(DUMP_ALL
);
623 SCALEOUT_STRING("Test complete");
624 t_rcu_scale_writer_finished
= t
;
626 b_rcu_gp_test_finished
=
627 cur_ops
->exp_completed() / 2;
629 b_rcu_gp_test_finished
=
630 cur_ops
->get_gp_seq();
633 smp_mb(); /* Assign before wake. */
634 wake_up(&shutdown_wq
);
638 if (done
&& !alldone
&&
639 atomic_read(&n_rcu_scale_writer_finished
) >= nrealwriters
)
641 if (done
&& !alldone
&& time_after(jiffies
, jdone
+ HZ
* 60)) {
642 static atomic_t dumped
;
645 if (!atomic_xchg(&dumped
, 1)) {
646 for (i
= 0; i
< nrealwriters
; i
++) {
649 pr_info("%s: Task %ld flags writer %d:\n", __func__
, me
, i
);
650 sched_show_task(writer_tasks
[i
]);
656 if (!selfreport
&& time_after(jiffies
, jdone
+ HZ
* (70 + me
))) {
657 pr_info("%s: Writer %ld self-report: started %d done %d/%d->%d i %d jdone %lu.\n",
658 __func__
, me
, started
, done
, writer_done
[me
], atomic_read(&n_rcu_scale_writer_finished
), i
, jiffies
- jdone
);
661 if (gp_succeeded
&& started
&& !alldone
&& i
< MAX_MEAS
- 1)
663 rcu_scale_wait_shutdown();
664 } while (!torture_must_stop());
665 if (gp_async
&& cur_ops
->async
) {
666 rcu_scale_free(wmbp
);
667 cur_ops
->gp_barrier();
669 writer_n_durations
[me
] = i_max
+ 1;
670 torture_kthread_stopping("rcu_scale_writer");
675 rcu_scale_print_module_parms(struct rcu_scale_ops
*cur_ops
, const char *tag
)
677 pr_alert("%s" SCALE_FLAG
678 "--- %s: gp_async=%d gp_async_max=%d gp_exp=%d holdoff=%d minruntime=%d nreaders=%d nwriters=%d writer_holdoff=%d writer_holdoff_jiffies=%d verbose=%d shutdown=%d\n",
679 scale_type
, tag
, gp_async
, gp_async_max
, gp_exp
, holdoff
, minruntime
, nrealreaders
, nrealwriters
, writer_holdoff
, writer_holdoff_jiffies
, verbose
, shutdown
);
683 * Return the number if non-negative. If -1, the number of CPUs.
684 * If less than -1, that much less than the number of CPUs, but
687 static int compute_real(int n
)
694 nr
= num_online_cpus() + 1 + n
;
702 * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
703 * of iterations and measure total time and number of GP for all iterations to complete.
706 torture_param(int, kfree_nthreads
, -1, "Number of threads running loops of kfree_rcu().");
707 torture_param(int, kfree_alloc_num
, 8000, "Number of allocations and frees done in an iteration.");
708 torture_param(int, kfree_loops
, 10, "Number of loops doing kfree_alloc_num allocations and frees.");
709 torture_param(bool, kfree_rcu_test_double
, false, "Do we run a kfree_rcu() double-argument scale test?");
710 torture_param(bool, kfree_rcu_test_single
, false, "Do we run a kfree_rcu() single-argument scale test?");
712 static struct task_struct
**kfree_reader_tasks
;
713 static int kfree_nrealthreads
;
714 static atomic_t n_kfree_scale_thread_started
;
715 static atomic_t n_kfree_scale_thread_ended
;
716 static struct task_struct
*kthread_tp
;
717 static u64 kthread_stime
;
724 /* Used if doing RCU-kfree'ing via call_rcu(). */
725 static void kfree_call_rcu(struct rcu_head
*rh
)
727 struct kfree_obj
*obj
= container_of(rh
, struct kfree_obj
, rh
);
733 kfree_scale_thread(void *arg
)
737 struct kfree_obj
*alloc_ptr
;
738 u64 start_time
, end_time
;
739 long long mem_begin
, mem_during
= 0;
740 bool kfree_rcu_test_both
;
741 DEFINE_TORTURE_RANDOM(tr
);
743 VERBOSE_SCALEOUT_STRING("kfree_scale_thread task started");
744 set_cpus_allowed_ptr(current
, cpumask_of(me
% nr_cpu_ids
));
745 set_user_nice(current
, MAX_NICE
);
746 kfree_rcu_test_both
= (kfree_rcu_test_single
== kfree_rcu_test_double
);
748 start_time
= ktime_get_mono_fast_ns();
750 if (atomic_inc_return(&n_kfree_scale_thread_started
) >= kfree_nrealthreads
) {
752 b_rcu_gp_test_started
= cur_ops
->exp_completed() / 2;
754 b_rcu_gp_test_started
= cur_ops
->get_gp_seq();
759 mem_during
= mem_begin
= si_mem_available();
760 } else if (loop
% (kfree_loops
/ 4) == 0) {
761 mem_during
= (mem_during
+ si_mem_available()) / 2;
764 for (i
= 0; i
< kfree_alloc_num
; i
++) {
765 alloc_ptr
= kmalloc(kfree_mult
* sizeof(struct kfree_obj
), GFP_KERNEL
);
769 if (kfree_by_call_rcu
) {
770 call_rcu(&(alloc_ptr
->rh
), kfree_call_rcu
);
774 // By default kfree_rcu_test_single and kfree_rcu_test_double are
775 // initialized to false. If both have the same value (false or true)
776 // both are randomly tested, otherwise only the one with value true
778 if ((kfree_rcu_test_single
&& !kfree_rcu_test_double
) ||
779 (kfree_rcu_test_both
&& torture_random(&tr
) & 0x800))
780 kfree_rcu_mightsleep(alloc_ptr
);
782 kfree_rcu(alloc_ptr
, rh
);
786 } while (!torture_must_stop() && ++loop
< kfree_loops
);
788 if (atomic_inc_return(&n_kfree_scale_thread_ended
) >= kfree_nrealthreads
) {
789 end_time
= ktime_get_mono_fast_ns();
792 b_rcu_gp_test_finished
= cur_ops
->exp_completed() / 2;
794 b_rcu_gp_test_finished
= cur_ops
->get_gp_seq();
796 pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
797 (unsigned long long)(end_time
- start_time
), kfree_loops
,
798 rcuscale_seq_diff(b_rcu_gp_test_finished
, b_rcu_gp_test_started
),
799 (mem_begin
- mem_during
) >> (20 - PAGE_SHIFT
));
802 smp_mb(); /* Assign before wake. */
803 wake_up(&shutdown_wq
);
807 torture_kthread_stopping("kfree_scale_thread");
812 kfree_scale_cleanup(void)
816 if (torture_cleanup_begin())
819 if (kfree_reader_tasks
) {
820 for (i
= 0; i
< kfree_nrealthreads
; i
++)
821 torture_stop_kthread(kfree_scale_thread
,
822 kfree_reader_tasks
[i
]);
823 kfree(kfree_reader_tasks
);
824 kfree_reader_tasks
= NULL
;
827 torture_cleanup_end();
831 * shutdown kthread. Just waits to be awakened, then shuts down system.
834 kfree_scale_shutdown(void *arg
)
836 wait_event_idle(shutdown_wq
,
837 atomic_read(&n_kfree_scale_thread_ended
) >= kfree_nrealthreads
);
839 smp_mb(); /* Wake before output. */
841 kfree_scale_cleanup();
846 // Used if doing RCU-kfree'ing via call_rcu().
847 static unsigned long jiffies_at_lazy_cb
;
848 static struct rcu_head lazy_test1_rh
;
849 static int rcu_lazy_test1_cb_called
;
850 static void call_rcu_lazy_test1(struct rcu_head
*rh
)
852 jiffies_at_lazy_cb
= jiffies
;
853 WRITE_ONCE(rcu_lazy_test1_cb_called
, 1);
857 kfree_scale_init(void)
861 unsigned long jif_start
;
862 unsigned long orig_jif
;
864 pr_alert("%s" SCALE_FLAG
865 "--- kfree_rcu_test: kfree_mult=%d kfree_by_call_rcu=%d kfree_nthreads=%d kfree_alloc_num=%d kfree_loops=%d kfree_rcu_test_double=%d kfree_rcu_test_single=%d\n",
866 scale_type
, kfree_mult
, kfree_by_call_rcu
, kfree_nthreads
, kfree_alloc_num
, kfree_loops
, kfree_rcu_test_double
, kfree_rcu_test_single
);
868 // Also, do a quick self-test to ensure laziness is as much as
870 if (kfree_by_call_rcu
&& !IS_ENABLED(CONFIG_RCU_LAZY
)) {
871 pr_alert("CONFIG_RCU_LAZY is disabled, falling back to kfree_rcu() for delayed RCU kfree'ing\n");
872 kfree_by_call_rcu
= 0;
875 if (kfree_by_call_rcu
) {
876 /* do a test to check the timeout. */
877 orig_jif
= rcu_get_jiffies_lazy_flush();
879 rcu_set_jiffies_lazy_flush(2 * HZ
);
883 jiffies_at_lazy_cb
= 0;
884 call_rcu(&lazy_test1_rh
, call_rcu_lazy_test1
);
886 smp_cond_load_relaxed(&rcu_lazy_test1_cb_called
, VAL
== 1);
888 rcu_set_jiffies_lazy_flush(orig_jif
);
890 if (WARN_ON_ONCE(jiffies_at_lazy_cb
- jif_start
< 2 * HZ
)) {
891 pr_alert("ERROR: call_rcu() CBs are not being lazy as expected!\n");
896 if (WARN_ON_ONCE(jiffies_at_lazy_cb
- jif_start
> 3 * HZ
)) {
897 pr_alert("ERROR: call_rcu() CBs are being too lazy!\n");
903 kfree_nrealthreads
= compute_real(kfree_nthreads
);
904 /* Start up the kthreads. */
906 init_waitqueue_head(&shutdown_wq
);
907 firsterr
= torture_create_kthread(kfree_scale_shutdown
, NULL
,
909 if (torture_init_error(firsterr
))
911 schedule_timeout_uninterruptible(1);
914 pr_alert("kfree object size=%zu, kfree_by_call_rcu=%d\n",
915 kfree_mult
* sizeof(struct kfree_obj
),
918 kfree_reader_tasks
= kcalloc(kfree_nrealthreads
, sizeof(kfree_reader_tasks
[0]),
920 if (kfree_reader_tasks
== NULL
) {
925 for (i
= 0; i
< kfree_nrealthreads
; i
++) {
926 firsterr
= torture_create_kthread(kfree_scale_thread
, (void *)i
,
927 kfree_reader_tasks
[i
]);
928 if (torture_init_error(firsterr
))
932 while (atomic_read(&n_kfree_scale_thread_started
) < kfree_nrealthreads
)
933 schedule_timeout_uninterruptible(1);
940 kfree_scale_cleanup();
945 rcu_scale_cleanup(void)
954 * Would like warning at start, but everything is expedited
955 * during the mid-boot phase, so have to wait till the end.
957 if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp
)
958 SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
959 if (rcu_gp_is_normal() && gp_exp
)
960 SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
961 if (gp_exp
&& gp_async
)
962 SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
964 // If built-in, just report all of the GP kthread's CPU time.
965 if (IS_BUILTIN(CONFIG_RCU_SCALE_TEST
) && !kthread_tp
&& cur_ops
->rso_gp_kthread
)
966 kthread_tp
= cur_ops
->rso_gp_kthread();
971 kthread_stime
= kthread_tp
->stime
- kthread_stime
;
972 us
= div_u64_rem(kthread_stime
, 1000, &ns
);
973 pr_info("rcu_scale: Grace-period kthread CPU time: %llu.%03u us\n", us
, ns
);
974 show_rcu_gp_kthreads();
976 if (kfree_rcu_test
) {
977 kfree_scale_cleanup();
981 if (torture_cleanup_begin())
984 torture_cleanup_end();
989 for (i
= 0; i
< nrealreaders
; i
++)
990 torture_stop_kthread(rcu_scale_reader
,
997 for (i
= 0; i
< nrealwriters
; i
++) {
998 torture_stop_kthread(rcu_scale_writer
,
1000 if (!writer_n_durations
)
1002 j
= writer_n_durations
[i
];
1003 pr_alert("%s%s writer %d gps: %d\n",
1004 scale_type
, SCALE_FLAG
, i
, j
);
1007 pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
1008 scale_type
, SCALE_FLAG
,
1009 t_rcu_scale_writer_started
, t_rcu_scale_writer_finished
,
1010 t_rcu_scale_writer_finished
-
1011 t_rcu_scale_writer_started
,
1013 rcuscale_seq_diff(b_rcu_gp_test_finished
,
1014 b_rcu_gp_test_started
));
1015 for (i
= 0; i
< nrealwriters
; i
++) {
1016 if (!writer_durations
)
1018 if (!writer_n_durations
)
1020 wdpp
= writer_durations
[i
];
1023 for (j
= 0; j
< writer_n_durations
[i
]; j
++) {
1025 pr_alert("%s%s %4d writer-duration: %5d %llu\n",
1026 scale_type
, SCALE_FLAG
,
1029 schedule_timeout_uninterruptible(1);
1031 kfree(writer_durations
[i
]);
1032 if (writer_freelists
) {
1034 struct llist_node
*llnp
;
1035 struct writer_freelist
*wflp
= &writer_freelists
[i
];
1037 if (wflp
->ws_mblocks
) {
1038 llist_for_each(llnp
, wflp
->ws_lhg
.first
)
1040 llist_for_each(llnp
, wflp
->ws_lhp
.first
)
1042 WARN_ONCE(ctr
!= gp_async_max
,
1043 "%s: ctr = %d gp_async_max = %d\n",
1044 __func__
, ctr
, gp_async_max
);
1045 kfree(wflp
->ws_mblocks
);
1049 kfree(writer_tasks
);
1050 writer_tasks
= NULL
;
1051 kfree(writer_durations
);
1052 writer_durations
= NULL
;
1053 kfree(writer_n_durations
);
1054 writer_n_durations
= NULL
;
1057 kfree(writer_freelists
);
1058 writer_freelists
= NULL
;
1061 /* Do torture-type-specific cleanup operations. */
1062 if (cur_ops
->cleanup
!= NULL
)
1065 torture_cleanup_end();
1069 * RCU scalability shutdown kthread. Just waits to be awakened, then shuts
1073 rcu_scale_shutdown(void *arg
)
1075 wait_event_idle(shutdown_wq
, atomic_read(&n_rcu_scale_writer_finished
) >= nrealwriters
);
1076 smp_mb(); /* Wake before output. */
1077 rcu_scale_cleanup();
1083 rcu_scale_init(void)
1088 static struct rcu_scale_ops
*scale_ops
[] = {
1089 &rcu_ops
, &srcu_ops
, &srcud_ops
, TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
1092 if (!torture_init_begin(scale_type
, verbose
))
1095 /* Process args and announce that the scalability'er is on the job. */
1096 for (i
= 0; i
< ARRAY_SIZE(scale_ops
); i
++) {
1097 cur_ops
= scale_ops
[i
];
1098 if (strcmp(scale_type
, cur_ops
->name
) == 0)
1101 if (i
== ARRAY_SIZE(scale_ops
)) {
1102 pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type
);
1103 pr_alert("rcu-scale types:");
1104 for (i
= 0; i
< ARRAY_SIZE(scale_ops
); i
++)
1105 pr_cont(" %s", scale_ops
[i
]->name
);
1114 if (cur_ops
->rso_gp_kthread
) {
1115 kthread_tp
= cur_ops
->rso_gp_kthread();
1117 kthread_stime
= kthread_tp
->stime
;
1120 return kfree_scale_init();
1122 nrealwriters
= compute_real(nwriters
);
1123 nrealreaders
= compute_real(nreaders
);
1124 atomic_set(&n_rcu_scale_reader_started
, 0);
1125 atomic_set(&n_rcu_scale_writer_started
, 0);
1126 atomic_set(&n_rcu_scale_writer_finished
, 0);
1127 rcu_scale_print_module_parms(cur_ops
, "Start of test");
1129 /* Start up the kthreads. */
1132 init_waitqueue_head(&shutdown_wq
);
1133 firsterr
= torture_create_kthread(rcu_scale_shutdown
, NULL
,
1135 if (torture_init_error(firsterr
))
1137 schedule_timeout_uninterruptible(1);
1139 reader_tasks
= kcalloc(nrealreaders
, sizeof(reader_tasks
[0]),
1141 if (reader_tasks
== NULL
) {
1142 SCALEOUT_ERRSTRING("out of memory");
1146 for (i
= 0; i
< nrealreaders
; i
++) {
1147 firsterr
= torture_create_kthread(rcu_scale_reader
, (void *)i
,
1149 if (torture_init_error(firsterr
))
1152 while (atomic_read(&n_rcu_scale_reader_started
) < nrealreaders
)
1153 schedule_timeout_uninterruptible(1);
1154 writer_tasks
= kcalloc(nrealwriters
, sizeof(writer_tasks
[0]), GFP_KERNEL
);
1155 writer_durations
= kcalloc(nrealwriters
, sizeof(*writer_durations
), GFP_KERNEL
);
1156 writer_n_durations
= kcalloc(nrealwriters
, sizeof(*writer_n_durations
), GFP_KERNEL
);
1157 writer_done
= kcalloc(nrealwriters
, sizeof(writer_done
[0]), GFP_KERNEL
);
1159 if (gp_async_max
<= 0) {
1160 pr_warn("%s: gp_async_max = %d must be greater than zero.\n",
1161 __func__
, gp_async_max
);
1162 WARN_ON_ONCE(IS_BUILTIN(CONFIG_RCU_TORTURE_TEST
));
1166 writer_freelists
= kcalloc(nrealwriters
, sizeof(writer_freelists
[0]), GFP_KERNEL
);
1168 if (!writer_tasks
|| !writer_durations
|| !writer_n_durations
|| !writer_done
||
1169 (gp_async
&& !writer_freelists
)) {
1170 SCALEOUT_ERRSTRING("out of memory");
1174 for (i
= 0; i
< nrealwriters
; i
++) {
1175 writer_durations
[i
] =
1176 kcalloc(MAX_MEAS
, sizeof(*writer_durations
[i
]),
1178 if (!writer_durations
[i
]) {
1182 if (writer_freelists
) {
1183 struct writer_freelist
*wflp
= &writer_freelists
[i
];
1185 init_llist_head(&wflp
->ws_lhg
);
1186 init_llist_head(&wflp
->ws_lhp
);
1187 wflp
->ws_mblocks
= kcalloc(gp_async_max
, sizeof(wflp
->ws_mblocks
[0]),
1189 if (!wflp
->ws_mblocks
) {
1193 for (j
= 0; j
< gp_async_max
; j
++) {
1194 struct writer_mblock
*wmbp
= &wflp
->ws_mblocks
[j
];
1196 wmbp
->wmb_wfl
= wflp
;
1197 llist_add(&wmbp
->wmb_node
, &wflp
->ws_lhp
);
1200 firsterr
= torture_create_kthread(rcu_scale_writer
, (void *)i
,
1202 if (torture_init_error(firsterr
))
1210 rcu_scale_cleanup();
1212 WARN_ON(!IS_MODULE(CONFIG_RCU_SCALE_TEST
));
1218 module_init(rcu_scale_init
);
1219 module_exit(rcu_scale_cleanup
);