1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update module-based torture test facility
5 * Copyright (C) IBM Corporation, 2005, 2006
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Josh Triplett <josh@joshtriplett.org>
10 * See also: Documentation/RCU/torture.txt
13 #define pr_fmt(fmt) fmt
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched/signal.h>
26 #include <uapi/linux/sched/types.h>
27 #include <linux/atomic.h>
28 #include <linux/bitops.h>
29 #include <linux/completion.h>
30 #include <linux/moduleparam.h>
31 #include <linux/percpu.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/freezer.h>
35 #include <linux/cpu.h>
36 #include <linux/delay.h>
37 #include <linux/stat.h>
38 #include <linux/srcu.h>
39 #include <linux/slab.h>
40 #include <linux/trace_clock.h>
41 #include <asm/byteorder.h>
42 #include <linux/torture.h>
43 #include <linux/vmalloc.h>
44 #include <linux/sched/debug.h>
45 #include <linux/sched/sysctl.h>
46 #include <linux/oom.h>
47 #include <linux/tick.h>
51 MODULE_LICENSE("GPL");
52 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
55 /* Bits for ->extendables field, extendables param, and related definitions. */
56 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
57 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
58 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
59 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
60 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
61 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
62 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
63 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
64 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
65 #define RCUTORTURE_MAX_EXTEND \
66 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
67 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
68 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
69 /* Must be power of two minus one. */
70 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
72 torture_param(int, extendables
, RCUTORTURE_MAX_EXTEND
,
73 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
74 torture_param(int, fqs_duration
, 0,
75 "Duration of fqs bursts (us), 0 to disable");
76 torture_param(int, fqs_holdoff
, 0, "Holdoff time within fqs bursts (us)");
77 torture_param(int, fqs_stutter
, 3, "Wait time between fqs bursts (s)");
78 torture_param(bool, fwd_progress
, 1, "Test grace-period forward progress");
79 torture_param(int, fwd_progress_div
, 4, "Fraction of CPU stall to wait");
80 torture_param(int, fwd_progress_holdoff
, 60,
81 "Time between forward-progress tests (s)");
82 torture_param(bool, fwd_progress_need_resched
, 1,
83 "Hide cond_resched() behind need_resched()");
84 torture_param(bool, gp_cond
, false, "Use conditional/async GP wait primitives");
85 torture_param(bool, gp_exp
, false, "Use expedited GP wait primitives");
86 torture_param(bool, gp_normal
, false,
87 "Use normal (non-expedited) GP wait primitives");
88 torture_param(bool, gp_sync
, false, "Use synchronous GP wait primitives");
89 torture_param(int, irqreader
, 1, "Allow RCU readers from irq handlers");
90 torture_param(int, n_barrier_cbs
, 0,
91 "# of callbacks/kthreads for barrier testing");
92 torture_param(int, nfakewriters
, 4, "Number of RCU fake writer threads");
93 torture_param(int, nreaders
, -1, "Number of RCU reader threads");
94 torture_param(int, object_debug
, 0,
95 "Enable debug-object double call_rcu() testing");
96 torture_param(int, onoff_holdoff
, 0, "Time after boot before CPU hotplugs (s)");
97 torture_param(int, onoff_interval
, 0,
98 "Time between CPU hotplugs (jiffies), 0=disable");
99 torture_param(int, shuffle_interval
, 3, "Number of seconds between shuffles");
100 torture_param(int, shutdown_secs
, 0, "Shutdown time (s), <= zero to disable.");
101 torture_param(int, stall_cpu
, 0, "Stall duration (s), zero to disable.");
102 torture_param(int, stall_cpu_holdoff
, 10,
103 "Time to wait before starting stall (s).");
104 torture_param(int, stall_cpu_irqsoff
, 0, "Disable interrupts while stalling.");
105 torture_param(int, stat_interval
, 60,
106 "Number of seconds between stats printk()s");
107 torture_param(int, stutter
, 5, "Number of seconds to run/halt test");
108 torture_param(int, test_boost
, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
109 torture_param(int, test_boost_duration
, 4,
110 "Duration of each boost test, seconds.");
111 torture_param(int, test_boost_interval
, 7,
112 "Interval between boost tests, seconds.");
113 torture_param(bool, test_no_idle_hz
, true,
114 "Test support for tickless idle CPUs");
115 torture_param(int, verbose
, 1,
116 "Enable verbose debugging printk()s");
118 static char *torture_type
= "rcu";
119 module_param(torture_type
, charp
, 0444);
120 MODULE_PARM_DESC(torture_type
, "Type of RCU to torture (rcu, srcu, ...)");
122 static int nrealreaders
;
123 static struct task_struct
*writer_task
;
124 static struct task_struct
**fakewriter_tasks
;
125 static struct task_struct
**reader_tasks
;
126 static struct task_struct
*stats_task
;
127 static struct task_struct
*fqs_task
;
128 static struct task_struct
*boost_tasks
[NR_CPUS
];
129 static struct task_struct
*stall_task
;
130 static struct task_struct
*fwd_prog_task
;
131 static struct task_struct
**barrier_cbs_tasks
;
132 static struct task_struct
*barrier_task
;
134 #define RCU_TORTURE_PIPE_LEN 10
137 struct rcu_head rtort_rcu
;
138 int rtort_pipe_count
;
139 struct list_head rtort_free
;
143 static LIST_HEAD(rcu_torture_freelist
);
144 static struct rcu_torture __rcu
*rcu_torture_current
;
145 static unsigned long rcu_torture_current_version
;
146 static struct rcu_torture rcu_tortures
[10 * RCU_TORTURE_PIPE_LEN
];
147 static DEFINE_SPINLOCK(rcu_torture_lock
);
148 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_count
);
149 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_batch
);
150 static atomic_t rcu_torture_wcount
[RCU_TORTURE_PIPE_LEN
+ 1];
151 static atomic_t n_rcu_torture_alloc
;
152 static atomic_t n_rcu_torture_alloc_fail
;
153 static atomic_t n_rcu_torture_free
;
154 static atomic_t n_rcu_torture_mberror
;
155 static atomic_t n_rcu_torture_error
;
156 static long n_rcu_torture_barrier_error
;
157 static long n_rcu_torture_boost_ktrerror
;
158 static long n_rcu_torture_boost_rterror
;
159 static long n_rcu_torture_boost_failure
;
160 static long n_rcu_torture_boosts
;
161 static atomic_long_t n_rcu_torture_timers
;
162 static long n_barrier_attempts
;
163 static long n_barrier_successes
; /* did rcu_barrier test succeed? */
164 static struct list_head rcu_torture_removed
;
165 static unsigned long shutdown_jiffies
;
167 static int rcu_torture_writer_state
;
168 #define RTWS_FIXED_DELAY 0
170 #define RTWS_REPLACE 2
171 #define RTWS_DEF_FREE 3
172 #define RTWS_EXP_SYNC 4
173 #define RTWS_COND_GET 5
174 #define RTWS_COND_SYNC 6
176 #define RTWS_STUTTER 8
177 #define RTWS_STOPPING 9
178 static const char * const rcu_torture_writer_state_names
[] = {
191 /* Record reader segment types and duration for first failing read. */
194 unsigned long rt_delay_jiffies
;
195 unsigned long rt_delay_ms
;
196 unsigned long rt_delay_us
;
199 static int err_segs_recorded
;
200 static struct rt_read_seg err_segs
[RCUTORTURE_RDR_MAX_SEGS
];
201 static int rt_read_nsegs
;
203 static const char *rcu_torture_writer_state_getname(void)
205 unsigned int i
= READ_ONCE(rcu_torture_writer_state
);
207 if (i
>= ARRAY_SIZE(rcu_torture_writer_state_names
))
209 return rcu_torture_writer_state_names
[i
];
212 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
213 #define rcu_can_boost() 1
214 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
215 #define rcu_can_boost() 0
216 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
218 #ifdef CONFIG_RCU_TRACE
219 static u64 notrace
rcu_trace_clock_local(void)
221 u64 ts
= trace_clock_local();
223 (void)do_div(ts
, NSEC_PER_USEC
);
226 #else /* #ifdef CONFIG_RCU_TRACE */
227 static u64 notrace
rcu_trace_clock_local(void)
231 #endif /* #else #ifdef CONFIG_RCU_TRACE */
234 * Stop aggressive CPU-hog tests a bit before the end of the test in order
235 * to avoid interfering with test shutdown.
237 static bool shutdown_time_arrived(void)
239 return shutdown_secs
&& time_after(jiffies
, shutdown_jiffies
- 30 * HZ
);
242 static unsigned long boost_starttime
; /* jiffies of next boost test start. */
243 static DEFINE_MUTEX(boost_mutex
); /* protect setting boost_starttime */
244 /* and boost task create/destroy. */
245 static atomic_t barrier_cbs_count
; /* Barrier callbacks registered. */
246 static bool barrier_phase
; /* Test phase. */
247 static atomic_t barrier_cbs_invoked
; /* Barrier callbacks invoked. */
248 static wait_queue_head_t
*barrier_cbs_wq
; /* Coordinate barrier testing. */
249 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq
);
251 static bool rcu_fwd_cb_nodelay
; /* Short rcu_torture_delay() delays. */
254 * Allocate an element from the rcu_tortures pool.
256 static struct rcu_torture
*
257 rcu_torture_alloc(void)
261 spin_lock_bh(&rcu_torture_lock
);
262 if (list_empty(&rcu_torture_freelist
)) {
263 atomic_inc(&n_rcu_torture_alloc_fail
);
264 spin_unlock_bh(&rcu_torture_lock
);
267 atomic_inc(&n_rcu_torture_alloc
);
268 p
= rcu_torture_freelist
.next
;
270 spin_unlock_bh(&rcu_torture_lock
);
271 return container_of(p
, struct rcu_torture
, rtort_free
);
275 * Free an element to the rcu_tortures pool.
278 rcu_torture_free(struct rcu_torture
*p
)
280 atomic_inc(&n_rcu_torture_free
);
281 spin_lock_bh(&rcu_torture_lock
);
282 list_add_tail(&p
->rtort_free
, &rcu_torture_freelist
);
283 spin_unlock_bh(&rcu_torture_lock
);
287 * Operations vector for selecting different types of tests.
290 struct rcu_torture_ops
{
293 void (*cleanup
)(void);
294 int (*readlock
)(void);
295 void (*read_delay
)(struct torture_random_state
*rrsp
,
296 struct rt_read_seg
*rtrsp
);
297 void (*readunlock
)(int idx
);
298 unsigned long (*get_gp_seq
)(void);
299 unsigned long (*gp_diff
)(unsigned long new, unsigned long old
);
300 void (*deferred_free
)(struct rcu_torture
*p
);
302 void (*exp_sync
)(void);
303 unsigned long (*get_state
)(void);
304 void (*cond_sync
)(unsigned long oldstate
);
305 call_rcu_func_t call
;
306 void (*cb_barrier
)(void);
309 int (*stall_dur
)(void);
317 static struct rcu_torture_ops
*cur_ops
;
320 * Definitions for rcu torture testing.
323 static int rcu_torture_read_lock(void) __acquires(RCU
)
330 rcu_read_delay(struct torture_random_state
*rrsp
, struct rt_read_seg
*rtrsp
)
332 unsigned long started
;
333 unsigned long completed
;
334 const unsigned long shortdelay_us
= 200;
335 unsigned long longdelay_ms
= 300;
336 unsigned long long ts
;
338 /* We want a short delay sometimes to make a reader delay the grace
339 * period, and we want a long delay occasionally to trigger
340 * force_quiescent_state. */
342 if (!rcu_fwd_cb_nodelay
&&
343 !(torture_random(rrsp
) % (nrealreaders
* 2000 * longdelay_ms
))) {
344 started
= cur_ops
->get_gp_seq();
345 ts
= rcu_trace_clock_local();
346 if (preempt_count() & (SOFTIRQ_MASK
| HARDIRQ_MASK
))
347 longdelay_ms
= 5; /* Avoid triggering BH limits. */
348 mdelay(longdelay_ms
);
349 rtrsp
->rt_delay_ms
= longdelay_ms
;
350 completed
= cur_ops
->get_gp_seq();
351 do_trace_rcu_torture_read(cur_ops
->name
, NULL
, ts
,
354 if (!(torture_random(rrsp
) % (nrealreaders
* 2 * shortdelay_us
))) {
355 udelay(shortdelay_us
);
356 rtrsp
->rt_delay_us
= shortdelay_us
;
358 if (!preempt_count() &&
359 !(torture_random(rrsp
) % (nrealreaders
* 500))) {
360 torture_preempt_schedule(); /* QS only if preemptible. */
361 rtrsp
->rt_preempted
= true;
365 static void rcu_torture_read_unlock(int idx
) __releases(RCU
)
371 * Update callback in the pipe. This should be invoked after a grace period.
374 rcu_torture_pipe_update_one(struct rcu_torture
*rp
)
378 i
= rp
->rtort_pipe_count
;
379 if (i
> RCU_TORTURE_PIPE_LEN
)
380 i
= RCU_TORTURE_PIPE_LEN
;
381 atomic_inc(&rcu_torture_wcount
[i
]);
382 if (++rp
->rtort_pipe_count
>= RCU_TORTURE_PIPE_LEN
) {
383 rp
->rtort_mbtest
= 0;
390 * Update all callbacks in the pipe. Suitable for synchronous grace-period
394 rcu_torture_pipe_update(struct rcu_torture
*old_rp
)
396 struct rcu_torture
*rp
;
397 struct rcu_torture
*rp1
;
400 list_add(&old_rp
->rtort_free
, &rcu_torture_removed
);
401 list_for_each_entry_safe(rp
, rp1
, &rcu_torture_removed
, rtort_free
) {
402 if (rcu_torture_pipe_update_one(rp
)) {
403 list_del(&rp
->rtort_free
);
404 rcu_torture_free(rp
);
410 rcu_torture_cb(struct rcu_head
*p
)
412 struct rcu_torture
*rp
= container_of(p
, struct rcu_torture
, rtort_rcu
);
414 if (torture_must_stop_irq()) {
415 /* Test is ending, just drop callbacks on the floor. */
416 /* The next initialization will pick up the pieces. */
419 if (rcu_torture_pipe_update_one(rp
))
420 rcu_torture_free(rp
);
422 cur_ops
->deferred_free(rp
);
425 static unsigned long rcu_no_completed(void)
430 static void rcu_torture_deferred_free(struct rcu_torture
*p
)
432 call_rcu(&p
->rtort_rcu
, rcu_torture_cb
);
435 static void rcu_sync_torture_init(void)
437 INIT_LIST_HEAD(&rcu_torture_removed
);
440 static struct rcu_torture_ops rcu_ops
= {
442 .init
= rcu_sync_torture_init
,
443 .readlock
= rcu_torture_read_lock
,
444 .read_delay
= rcu_read_delay
,
445 .readunlock
= rcu_torture_read_unlock
,
446 .get_gp_seq
= rcu_get_gp_seq
,
447 .gp_diff
= rcu_seq_diff
,
448 .deferred_free
= rcu_torture_deferred_free
,
449 .sync
= synchronize_rcu
,
450 .exp_sync
= synchronize_rcu_expedited
,
451 .get_state
= get_state_synchronize_rcu
,
452 .cond_sync
= cond_synchronize_rcu
,
454 .cb_barrier
= rcu_barrier
,
455 .fqs
= rcu_force_quiescent_state
,
457 .stall_dur
= rcu_jiffies_till_stall_check
,
459 .can_boost
= rcu_can_boost(),
460 .extendables
= RCUTORTURE_MAX_EXTEND
,
465 * Don't even think about trying any of these in real life!!!
466 * The names includes "busted", and they really means it!
467 * The only purpose of these functions is to provide a buggy RCU
468 * implementation to make sure that rcutorture correctly emits
469 * buggy-RCU error messages.
471 static void rcu_busted_torture_deferred_free(struct rcu_torture
*p
)
473 /* This is a deliberate bug for testing purposes only! */
474 rcu_torture_cb(&p
->rtort_rcu
);
477 static void synchronize_rcu_busted(void)
479 /* This is a deliberate bug for testing purposes only! */
483 call_rcu_busted(struct rcu_head
*head
, rcu_callback_t func
)
485 /* This is a deliberate bug for testing purposes only! */
489 static struct rcu_torture_ops rcu_busted_ops
= {
490 .ttype
= INVALID_RCU_FLAVOR
,
491 .init
= rcu_sync_torture_init
,
492 .readlock
= rcu_torture_read_lock
,
493 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
494 .readunlock
= rcu_torture_read_unlock
,
495 .get_gp_seq
= rcu_no_completed
,
496 .deferred_free
= rcu_busted_torture_deferred_free
,
497 .sync
= synchronize_rcu_busted
,
498 .exp_sync
= synchronize_rcu_busted
,
499 .call
= call_rcu_busted
,
508 * Definitions for srcu torture testing.
511 DEFINE_STATIC_SRCU(srcu_ctl
);
512 static struct srcu_struct srcu_ctld
;
513 static struct srcu_struct
*srcu_ctlp
= &srcu_ctl
;
515 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp
)
517 return srcu_read_lock(srcu_ctlp
);
521 srcu_read_delay(struct torture_random_state
*rrsp
, struct rt_read_seg
*rtrsp
)
524 const long uspertick
= 1000000 / HZ
;
525 const long longdelay
= 10;
527 /* We want there to be long-running readers, but not all the time. */
529 delay
= torture_random(rrsp
) %
530 (nrealreaders
* 2 * longdelay
* uspertick
);
531 if (!delay
&& in_task()) {
532 schedule_timeout_interruptible(longdelay
);
533 rtrsp
->rt_delay_jiffies
= longdelay
;
535 rcu_read_delay(rrsp
, rtrsp
);
539 static void srcu_torture_read_unlock(int idx
) __releases(srcu_ctlp
)
541 srcu_read_unlock(srcu_ctlp
, idx
);
544 static unsigned long srcu_torture_completed(void)
546 return srcu_batches_completed(srcu_ctlp
);
549 static void srcu_torture_deferred_free(struct rcu_torture
*rp
)
551 call_srcu(srcu_ctlp
, &rp
->rtort_rcu
, rcu_torture_cb
);
554 static void srcu_torture_synchronize(void)
556 synchronize_srcu(srcu_ctlp
);
559 static void srcu_torture_call(struct rcu_head
*head
,
562 call_srcu(srcu_ctlp
, head
, func
);
565 static void srcu_torture_barrier(void)
567 srcu_barrier(srcu_ctlp
);
570 static void srcu_torture_stats(void)
572 srcu_torture_stats_print(srcu_ctlp
, torture_type
, TORTURE_FLAG
);
575 static void srcu_torture_synchronize_expedited(void)
577 synchronize_srcu_expedited(srcu_ctlp
);
580 static struct rcu_torture_ops srcu_ops
= {
581 .ttype
= SRCU_FLAVOR
,
582 .init
= rcu_sync_torture_init
,
583 .readlock
= srcu_torture_read_lock
,
584 .read_delay
= srcu_read_delay
,
585 .readunlock
= srcu_torture_read_unlock
,
586 .get_gp_seq
= srcu_torture_completed
,
587 .deferred_free
= srcu_torture_deferred_free
,
588 .sync
= srcu_torture_synchronize
,
589 .exp_sync
= srcu_torture_synchronize_expedited
,
590 .call
= srcu_torture_call
,
591 .cb_barrier
= srcu_torture_barrier
,
592 .stats
= srcu_torture_stats
,
597 static void srcu_torture_init(void)
599 rcu_sync_torture_init();
600 WARN_ON(init_srcu_struct(&srcu_ctld
));
601 srcu_ctlp
= &srcu_ctld
;
604 static void srcu_torture_cleanup(void)
606 cleanup_srcu_struct(&srcu_ctld
);
607 srcu_ctlp
= &srcu_ctl
; /* In case of a later rcutorture run. */
610 /* As above, but dynamically allocated. */
611 static struct rcu_torture_ops srcud_ops
= {
612 .ttype
= SRCU_FLAVOR
,
613 .init
= srcu_torture_init
,
614 .cleanup
= srcu_torture_cleanup
,
615 .readlock
= srcu_torture_read_lock
,
616 .read_delay
= srcu_read_delay
,
617 .readunlock
= srcu_torture_read_unlock
,
618 .get_gp_seq
= srcu_torture_completed
,
619 .deferred_free
= srcu_torture_deferred_free
,
620 .sync
= srcu_torture_synchronize
,
621 .exp_sync
= srcu_torture_synchronize_expedited
,
622 .call
= srcu_torture_call
,
623 .cb_barrier
= srcu_torture_barrier
,
624 .stats
= srcu_torture_stats
,
629 /* As above, but broken due to inappropriate reader extension. */
630 static struct rcu_torture_ops busted_srcud_ops
= {
631 .ttype
= SRCU_FLAVOR
,
632 .init
= srcu_torture_init
,
633 .cleanup
= srcu_torture_cleanup
,
634 .readlock
= srcu_torture_read_lock
,
635 .read_delay
= rcu_read_delay
,
636 .readunlock
= srcu_torture_read_unlock
,
637 .get_gp_seq
= srcu_torture_completed
,
638 .deferred_free
= srcu_torture_deferred_free
,
639 .sync
= srcu_torture_synchronize
,
640 .exp_sync
= srcu_torture_synchronize_expedited
,
641 .call
= srcu_torture_call
,
642 .cb_barrier
= srcu_torture_barrier
,
643 .stats
= srcu_torture_stats
,
645 .extendables
= RCUTORTURE_MAX_EXTEND
,
646 .name
= "busted_srcud"
650 * Definitions for RCU-tasks torture testing.
653 static int tasks_torture_read_lock(void)
658 static void tasks_torture_read_unlock(int idx
)
662 static void rcu_tasks_torture_deferred_free(struct rcu_torture
*p
)
664 call_rcu_tasks(&p
->rtort_rcu
, rcu_torture_cb
);
667 static struct rcu_torture_ops tasks_ops
= {
668 .ttype
= RCU_TASKS_FLAVOR
,
669 .init
= rcu_sync_torture_init
,
670 .readlock
= tasks_torture_read_lock
,
671 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
672 .readunlock
= tasks_torture_read_unlock
,
673 .get_gp_seq
= rcu_no_completed
,
674 .deferred_free
= rcu_tasks_torture_deferred_free
,
675 .sync
= synchronize_rcu_tasks
,
676 .exp_sync
= synchronize_rcu_tasks
,
677 .call
= call_rcu_tasks
,
678 .cb_barrier
= rcu_barrier_tasks
,
687 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
688 * This implementation does not necessarily work well with CPU hotplug.
691 static void synchronize_rcu_trivial(void)
695 for_each_online_cpu(cpu
) {
696 rcutorture_sched_setaffinity(current
->pid
, cpumask_of(cpu
));
697 WARN_ON_ONCE(raw_smp_processor_id() != cpu
);
701 static int rcu_torture_read_lock_trivial(void) __acquires(RCU
)
707 static void rcu_torture_read_unlock_trivial(int idx
) __releases(RCU
)
712 static struct rcu_torture_ops trivial_ops
= {
713 .ttype
= RCU_TRIVIAL_FLAVOR
,
714 .init
= rcu_sync_torture_init
,
715 .readlock
= rcu_torture_read_lock_trivial
,
716 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
717 .readunlock
= rcu_torture_read_unlock_trivial
,
718 .get_gp_seq
= rcu_no_completed
,
719 .sync
= synchronize_rcu_trivial
,
720 .exp_sync
= synchronize_rcu_trivial
,
727 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old
)
729 if (!cur_ops
->gp_diff
)
731 return cur_ops
->gp_diff(new, old
);
734 static bool __maybe_unused
torturing_tasks(void)
736 return cur_ops
== &tasks_ops
;
740 * RCU torture priority-boost testing. Runs one real-time thread per
741 * CPU for moderate bursts, repeatedly registering RCU callbacks and
742 * spinning waiting for them to be invoked. If a given callback takes
743 * too long to be invoked, we assume that priority inversion has occurred.
746 struct rcu_boost_inflight
{
751 static void rcu_torture_boost_cb(struct rcu_head
*head
)
753 struct rcu_boost_inflight
*rbip
=
754 container_of(head
, struct rcu_boost_inflight
, rcu
);
756 /* Ensure RCU-core accesses precede clearing ->inflight */
757 smp_store_release(&rbip
->inflight
, 0);
760 static int old_rt_runtime
= -1;
762 static void rcu_torture_disable_rt_throttle(void)
765 * Disable RT throttling so that rcutorture's boost threads don't get
766 * throttled. Only possible if rcutorture is built-in otherwise the
767 * user should manually do this by setting the sched_rt_period_us and
768 * sched_rt_runtime sysctls.
770 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST
) || old_rt_runtime
!= -1)
773 old_rt_runtime
= sysctl_sched_rt_runtime
;
774 sysctl_sched_rt_runtime
= -1;
777 static void rcu_torture_enable_rt_throttle(void)
779 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST
) || old_rt_runtime
== -1)
782 sysctl_sched_rt_runtime
= old_rt_runtime
;
786 static bool rcu_torture_boost_failed(unsigned long start
, unsigned long end
)
788 if (end
- start
> test_boost_duration
* HZ
- HZ
/ 2) {
789 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
790 n_rcu_torture_boost_failure
++;
792 return true; /* failed */
795 return false; /* passed */
798 static int rcu_torture_boost(void *arg
)
800 unsigned long call_rcu_time
;
801 unsigned long endtime
;
802 unsigned long oldstarttime
;
803 struct rcu_boost_inflight rbi
= { .inflight
= 0 };
804 struct sched_param sp
;
806 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
808 /* Set real-time priority. */
809 sp
.sched_priority
= 1;
810 if (sched_setscheduler(current
, SCHED_FIFO
, &sp
) < 0) {
811 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
812 n_rcu_torture_boost_rterror
++;
815 init_rcu_head_on_stack(&rbi
.rcu
);
816 /* Each pass through the following loop does one boost-test cycle. */
818 /* Track if the test failed already in this test interval? */
821 /* Increment n_rcu_torture_boosts once per boost-test */
822 while (!kthread_should_stop()) {
823 if (mutex_trylock(&boost_mutex
)) {
824 n_rcu_torture_boosts
++;
825 mutex_unlock(&boost_mutex
);
828 schedule_timeout_uninterruptible(1);
830 if (kthread_should_stop())
833 /* Wait for the next test interval. */
834 oldstarttime
= boost_starttime
;
835 while (ULONG_CMP_LT(jiffies
, oldstarttime
)) {
836 schedule_timeout_interruptible(oldstarttime
- jiffies
);
837 stutter_wait("rcu_torture_boost");
838 if (torture_must_stop())
842 /* Do one boost-test interval. */
843 endtime
= oldstarttime
+ test_boost_duration
* HZ
;
844 call_rcu_time
= jiffies
;
845 while (ULONG_CMP_LT(jiffies
, endtime
)) {
846 /* If we don't have a callback in flight, post one. */
847 if (!smp_load_acquire(&rbi
.inflight
)) {
848 /* RCU core before ->inflight = 1. */
849 smp_store_release(&rbi
.inflight
, 1);
850 call_rcu(&rbi
.rcu
, rcu_torture_boost_cb
);
851 /* Check if the boost test failed */
853 rcu_torture_boost_failed(call_rcu_time
,
855 call_rcu_time
= jiffies
;
857 stutter_wait("rcu_torture_boost");
858 if (torture_must_stop())
863 * If boost never happened, then inflight will always be 1, in
864 * this case the boost check would never happen in the above
865 * loop so do another one here.
867 if (!failed
&& smp_load_acquire(&rbi
.inflight
))
868 rcu_torture_boost_failed(call_rcu_time
, jiffies
);
871 * Set the start time of the next test interval.
872 * Yes, this is vulnerable to long delays, but such
873 * delays simply cause a false negative for the next
874 * interval. Besides, we are running at RT priority,
875 * so delays should be relatively rare.
877 while (oldstarttime
== boost_starttime
&&
878 !kthread_should_stop()) {
879 if (mutex_trylock(&boost_mutex
)) {
880 boost_starttime
= jiffies
+
881 test_boost_interval
* HZ
;
882 mutex_unlock(&boost_mutex
);
885 schedule_timeout_uninterruptible(1);
888 /* Go do the stutter. */
889 checkwait
: stutter_wait("rcu_torture_boost");
890 } while (!torture_must_stop());
892 /* Clean up and exit. */
893 while (!kthread_should_stop() || smp_load_acquire(&rbi
.inflight
)) {
894 torture_shutdown_absorb("rcu_torture_boost");
895 schedule_timeout_uninterruptible(1);
897 destroy_rcu_head_on_stack(&rbi
.rcu
);
898 torture_kthread_stopping("rcu_torture_boost");
903 * RCU torture force-quiescent-state kthread. Repeatedly induces
904 * bursts of calls to force_quiescent_state(), increasing the probability
905 * of occurrence of some important types of race conditions.
908 rcu_torture_fqs(void *arg
)
910 unsigned long fqs_resume_time
;
911 int fqs_burst_remaining
;
913 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
915 fqs_resume_time
= jiffies
+ fqs_stutter
* HZ
;
916 while (ULONG_CMP_LT(jiffies
, fqs_resume_time
) &&
917 !kthread_should_stop()) {
918 schedule_timeout_interruptible(1);
920 fqs_burst_remaining
= fqs_duration
;
921 while (fqs_burst_remaining
> 0 &&
922 !kthread_should_stop()) {
925 fqs_burst_remaining
-= fqs_holdoff
;
927 stutter_wait("rcu_torture_fqs");
928 } while (!torture_must_stop());
929 torture_kthread_stopping("rcu_torture_fqs");
934 * RCU torture writer kthread. Repeatedly substitutes a new structure
935 * for that pointed to by rcu_torture_current, freeing the old structure
936 * after a series of grace periods (the "pipeline").
939 rcu_torture_writer(void *arg
)
941 bool can_expedite
= !rcu_gp_is_expedited() && !rcu_gp_is_normal();
943 unsigned long gp_snap
;
944 bool gp_cond1
= gp_cond
, gp_exp1
= gp_exp
, gp_normal1
= gp_normal
;
945 bool gp_sync1
= gp_sync
;
947 struct rcu_torture
*rp
;
948 struct rcu_torture
*old_rp
;
949 static DEFINE_TORTURE_RANDOM(rand
);
950 int synctype
[] = { RTWS_DEF_FREE
, RTWS_EXP_SYNC
,
951 RTWS_COND_GET
, RTWS_SYNC
};
954 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
956 pr_alert("%s" TORTURE_FLAG
957 " GP expediting controlled from boot/sysfs for %s.\n",
958 torture_type
, cur_ops
->name
);
960 /* Initialize synctype[] array. If none set, take default. */
961 if (!gp_cond1
&& !gp_exp1
&& !gp_normal1
&& !gp_sync1
)
962 gp_cond1
= gp_exp1
= gp_normal1
= gp_sync1
= true;
963 if (gp_cond1
&& cur_ops
->get_state
&& cur_ops
->cond_sync
) {
964 synctype
[nsynctypes
++] = RTWS_COND_GET
;
965 pr_info("%s: Testing conditional GPs.\n", __func__
);
966 } else if (gp_cond
&& (!cur_ops
->get_state
|| !cur_ops
->cond_sync
)) {
967 pr_alert("%s: gp_cond without primitives.\n", __func__
);
969 if (gp_exp1
&& cur_ops
->exp_sync
) {
970 synctype
[nsynctypes
++] = RTWS_EXP_SYNC
;
971 pr_info("%s: Testing expedited GPs.\n", __func__
);
972 } else if (gp_exp
&& !cur_ops
->exp_sync
) {
973 pr_alert("%s: gp_exp without primitives.\n", __func__
);
975 if (gp_normal1
&& cur_ops
->deferred_free
) {
976 synctype
[nsynctypes
++] = RTWS_DEF_FREE
;
977 pr_info("%s: Testing asynchronous GPs.\n", __func__
);
978 } else if (gp_normal
&& !cur_ops
->deferred_free
) {
979 pr_alert("%s: gp_normal without primitives.\n", __func__
);
981 if (gp_sync1
&& cur_ops
->sync
) {
982 synctype
[nsynctypes
++] = RTWS_SYNC
;
983 pr_info("%s: Testing normal GPs.\n", __func__
);
984 } else if (gp_sync
&& !cur_ops
->sync
) {
985 pr_alert("%s: gp_sync without primitives.\n", __func__
);
987 if (WARN_ONCE(nsynctypes
== 0,
988 "rcu_torture_writer: No update-side primitives.\n")) {
990 * No updates primitives, so don't try updating.
991 * The resulting test won't be testing much, hence the
994 rcu_torture_writer_state
= RTWS_STOPPING
;
995 torture_kthread_stopping("rcu_torture_writer");
999 rcu_torture_writer_state
= RTWS_FIXED_DELAY
;
1000 schedule_timeout_uninterruptible(1);
1001 rp
= rcu_torture_alloc();
1004 rp
->rtort_pipe_count
= 0;
1005 rcu_torture_writer_state
= RTWS_DELAY
;
1006 udelay(torture_random(&rand
) & 0x3ff);
1007 rcu_torture_writer_state
= RTWS_REPLACE
;
1008 old_rp
= rcu_dereference_check(rcu_torture_current
,
1009 current
== writer_task
);
1010 rp
->rtort_mbtest
= 1;
1011 rcu_assign_pointer(rcu_torture_current
, rp
);
1012 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1014 i
= old_rp
->rtort_pipe_count
;
1015 if (i
> RCU_TORTURE_PIPE_LEN
)
1016 i
= RCU_TORTURE_PIPE_LEN
;
1017 atomic_inc(&rcu_torture_wcount
[i
]);
1018 old_rp
->rtort_pipe_count
++;
1019 switch (synctype
[torture_random(&rand
) % nsynctypes
]) {
1021 rcu_torture_writer_state
= RTWS_DEF_FREE
;
1022 cur_ops
->deferred_free(old_rp
);
1025 rcu_torture_writer_state
= RTWS_EXP_SYNC
;
1026 cur_ops
->exp_sync();
1027 rcu_torture_pipe_update(old_rp
);
1030 rcu_torture_writer_state
= RTWS_COND_GET
;
1031 gp_snap
= cur_ops
->get_state();
1032 i
= torture_random(&rand
) % 16;
1034 schedule_timeout_interruptible(i
);
1035 udelay(torture_random(&rand
) % 1000);
1036 rcu_torture_writer_state
= RTWS_COND_SYNC
;
1037 cur_ops
->cond_sync(gp_snap
);
1038 rcu_torture_pipe_update(old_rp
);
1041 rcu_torture_writer_state
= RTWS_SYNC
;
1043 rcu_torture_pipe_update(old_rp
);
1050 WRITE_ONCE(rcu_torture_current_version
,
1051 rcu_torture_current_version
+ 1);
1052 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1054 !(torture_random(&rand
) & 0xff & (!!expediting
- 1))) {
1055 WARN_ON_ONCE(expediting
== 0 && rcu_gp_is_expedited());
1056 if (expediting
>= 0)
1059 rcu_unexpedite_gp();
1060 if (++expediting
> 3)
1061 expediting
= -expediting
;
1062 } else if (!can_expedite
) { /* Disabled during boot, recheck. */
1063 can_expedite
= !rcu_gp_is_expedited() &&
1064 !rcu_gp_is_normal();
1066 rcu_torture_writer_state
= RTWS_STUTTER
;
1067 if (stutter_wait("rcu_torture_writer") &&
1068 !READ_ONCE(rcu_fwd_cb_nodelay
) &&
1069 !cur_ops
->slow_gps
&&
1070 !torture_must_stop())
1071 for (i
= 0; i
< ARRAY_SIZE(rcu_tortures
); i
++)
1072 if (list_empty(&rcu_tortures
[i
].rtort_free
) &&
1073 rcu_access_pointer(rcu_torture_current
) !=
1075 rcu_ftrace_dump(DUMP_ALL
);
1076 WARN(1, "%s: rtort_pipe_count: %d\n", __func__
, rcu_tortures
[i
].rtort_pipe_count
);
1078 } while (!torture_must_stop());
1079 /* Reset expediting back to unexpedited. */
1081 expediting
= -expediting
;
1082 while (can_expedite
&& expediting
++ < 0)
1083 rcu_unexpedite_gp();
1084 WARN_ON_ONCE(can_expedite
&& rcu_gp_is_expedited());
1086 pr_alert("%s" TORTURE_FLAG
1087 " Dynamic grace-period expediting was disabled.\n",
1089 rcu_torture_writer_state
= RTWS_STOPPING
;
1090 torture_kthread_stopping("rcu_torture_writer");
1095 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1096 * delay between calls.
1099 rcu_torture_fakewriter(void *arg
)
1101 DEFINE_TORTURE_RANDOM(rand
);
1103 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1104 set_user_nice(current
, MAX_NICE
);
1107 schedule_timeout_uninterruptible(1 + torture_random(&rand
)%10);
1108 udelay(torture_random(&rand
) & 0x3ff);
1109 if (cur_ops
->cb_barrier
!= NULL
&&
1110 torture_random(&rand
) % (nfakewriters
* 8) == 0) {
1111 cur_ops
->cb_barrier();
1112 } else if (gp_normal
== gp_exp
) {
1113 if (cur_ops
->sync
&& torture_random(&rand
) & 0x80)
1115 else if (cur_ops
->exp_sync
)
1116 cur_ops
->exp_sync();
1117 } else if (gp_normal
&& cur_ops
->sync
) {
1119 } else if (cur_ops
->exp_sync
) {
1120 cur_ops
->exp_sync();
1122 stutter_wait("rcu_torture_fakewriter");
1123 } while (!torture_must_stop());
1125 torture_kthread_stopping("rcu_torture_fakewriter");
1129 static void rcu_torture_timer_cb(struct rcu_head
*rhp
)
1135 * Do one extension of an RCU read-side critical section using the
1136 * current reader state in readstate (set to zero for initial entry
1137 * to extended critical section), set the new state as specified by
1138 * newstate (set to zero for final exit from extended critical section),
1139 * and random-number-generator state in trsp. If this is neither the
1140 * beginning or end of the critical section and if there was actually a
1141 * change, do a ->read_delay().
1143 static void rcutorture_one_extend(int *readstate
, int newstate
,
1144 struct torture_random_state
*trsp
,
1145 struct rt_read_seg
*rtrsp
)
1148 int idxold
= *readstate
;
1149 int statesnew
= ~*readstate
& newstate
;
1150 int statesold
= *readstate
& ~newstate
;
1152 WARN_ON_ONCE(idxold
< 0);
1153 WARN_ON_ONCE((idxold
>> RCUTORTURE_RDR_SHIFT
) > 1);
1154 rtrsp
->rt_readstate
= newstate
;
1156 /* First, put new protection in place to avoid critical-section gap. */
1157 if (statesnew
& RCUTORTURE_RDR_BH
)
1159 if (statesnew
& RCUTORTURE_RDR_IRQ
)
1160 local_irq_disable();
1161 if (statesnew
& RCUTORTURE_RDR_PREEMPT
)
1163 if (statesnew
& RCUTORTURE_RDR_RBH
)
1165 if (statesnew
& RCUTORTURE_RDR_SCHED
)
1166 rcu_read_lock_sched();
1167 if (statesnew
& RCUTORTURE_RDR_RCU
)
1168 idxnew
= cur_ops
->readlock() << RCUTORTURE_RDR_SHIFT
;
1170 /* Next, remove old protection, irq first due to bh conflict. */
1171 if (statesold
& RCUTORTURE_RDR_IRQ
)
1173 if (statesold
& RCUTORTURE_RDR_BH
)
1175 if (statesold
& RCUTORTURE_RDR_PREEMPT
)
1177 if (statesold
& RCUTORTURE_RDR_RBH
)
1178 rcu_read_unlock_bh();
1179 if (statesold
& RCUTORTURE_RDR_SCHED
)
1180 rcu_read_unlock_sched();
1181 if (statesold
& RCUTORTURE_RDR_RCU
)
1182 cur_ops
->readunlock(idxold
>> RCUTORTURE_RDR_SHIFT
);
1184 /* Delay if neither beginning nor end and there was a change. */
1185 if ((statesnew
|| statesold
) && *readstate
&& newstate
)
1186 cur_ops
->read_delay(trsp
, rtrsp
);
1188 /* Update the reader state. */
1190 idxnew
= idxold
& ~RCUTORTURE_RDR_MASK
;
1191 WARN_ON_ONCE(idxnew
< 0);
1192 WARN_ON_ONCE((idxnew
>> RCUTORTURE_RDR_SHIFT
) > 1);
1193 *readstate
= idxnew
| newstate
;
1194 WARN_ON_ONCE((*readstate
>> RCUTORTURE_RDR_SHIFT
) < 0);
1195 WARN_ON_ONCE((*readstate
>> RCUTORTURE_RDR_SHIFT
) > 1);
1198 /* Return the biggest extendables mask given current RCU and boot parameters. */
1199 static int rcutorture_extend_mask_max(void)
1203 WARN_ON_ONCE(extendables
& ~RCUTORTURE_MAX_EXTEND
);
1204 mask
= extendables
& RCUTORTURE_MAX_EXTEND
& cur_ops
->extendables
;
1205 mask
= mask
| RCUTORTURE_RDR_RCU
;
1209 /* Return a random protection state mask, but with at least one bit set. */
1211 rcutorture_extend_mask(int oldmask
, struct torture_random_state
*trsp
)
1213 int mask
= rcutorture_extend_mask_max();
1214 unsigned long randmask1
= torture_random(trsp
) >> 8;
1215 unsigned long randmask2
= randmask1
>> 3;
1217 WARN_ON_ONCE(mask
>> RCUTORTURE_RDR_SHIFT
);
1218 /* Mostly only one bit (need preemption!), sometimes lots of bits. */
1219 if (!(randmask1
& 0x7))
1220 mask
= mask
& randmask2
;
1222 mask
= mask
& (1 << (randmask2
% RCUTORTURE_RDR_NBITS
));
1223 /* Can't enable bh w/irq disabled. */
1224 if ((mask
& RCUTORTURE_RDR_IRQ
) &&
1225 ((!(mask
& RCUTORTURE_RDR_BH
) && (oldmask
& RCUTORTURE_RDR_BH
)) ||
1226 (!(mask
& RCUTORTURE_RDR_RBH
) && (oldmask
& RCUTORTURE_RDR_RBH
))))
1227 mask
|= RCUTORTURE_RDR_BH
| RCUTORTURE_RDR_RBH
;
1228 return mask
?: RCUTORTURE_RDR_RCU
;
1232 * Do a randomly selected number of extensions of an existing RCU read-side
1235 static struct rt_read_seg
*
1236 rcutorture_loop_extend(int *readstate
, struct torture_random_state
*trsp
,
1237 struct rt_read_seg
*rtrsp
)
1241 int mask
= rcutorture_extend_mask_max();
1243 WARN_ON_ONCE(!*readstate
); /* -Existing- RCU read-side critsect! */
1244 if (!((mask
- 1) & mask
))
1245 return rtrsp
; /* Current RCU reader not extendable. */
1246 /* Bias towards larger numbers of loops. */
1247 i
= (torture_random(trsp
) >> 3);
1248 i
= ((i
| (i
>> 3)) & RCUTORTURE_RDR_MAX_LOOPS
) + 1;
1249 for (j
= 0; j
< i
; j
++) {
1250 mask
= rcutorture_extend_mask(*readstate
, trsp
);
1251 rcutorture_one_extend(readstate
, mask
, trsp
, &rtrsp
[j
]);
1257 * Do one read-side critical section, returning false if there was
1258 * no data to read. Can be invoked both from process context and
1259 * from a timer handler.
1261 static bool rcu_torture_one_read(struct torture_random_state
*trsp
)
1264 unsigned long started
;
1265 unsigned long completed
;
1267 struct rcu_torture
*p
;
1270 struct rt_read_seg rtseg
[RCUTORTURE_RDR_MAX_SEGS
] = { { 0 } };
1271 struct rt_read_seg
*rtrsp
= &rtseg
[0];
1272 struct rt_read_seg
*rtrsp1
;
1273 unsigned long long ts
;
1275 newstate
= rcutorture_extend_mask(readstate
, trsp
);
1276 rcutorture_one_extend(&readstate
, newstate
, trsp
, rtrsp
++);
1277 started
= cur_ops
->get_gp_seq();
1278 ts
= rcu_trace_clock_local();
1279 p
= rcu_dereference_check(rcu_torture_current
,
1280 rcu_read_lock_bh_held() ||
1281 rcu_read_lock_sched_held() ||
1282 srcu_read_lock_held(srcu_ctlp
) ||
1285 /* Wait for rcu_torture_writer to get underway */
1286 rcutorture_one_extend(&readstate
, 0, trsp
, rtrsp
);
1289 if (p
->rtort_mbtest
== 0)
1290 atomic_inc(&n_rcu_torture_mberror
);
1291 rtrsp
= rcutorture_loop_extend(&readstate
, trsp
, rtrsp
);
1293 pipe_count
= p
->rtort_pipe_count
;
1294 if (pipe_count
> RCU_TORTURE_PIPE_LEN
) {
1295 /* Should not happen, but... */
1296 pipe_count
= RCU_TORTURE_PIPE_LEN
;
1298 completed
= cur_ops
->get_gp_seq();
1299 if (pipe_count
> 1) {
1300 do_trace_rcu_torture_read(cur_ops
->name
, &p
->rtort_rcu
,
1301 ts
, started
, completed
);
1302 rcu_ftrace_dump(DUMP_ALL
);
1304 __this_cpu_inc(rcu_torture_count
[pipe_count
]);
1305 completed
= rcutorture_seq_diff(completed
, started
);
1306 if (completed
> RCU_TORTURE_PIPE_LEN
) {
1307 /* Should not happen, but... */
1308 completed
= RCU_TORTURE_PIPE_LEN
;
1310 __this_cpu_inc(rcu_torture_batch
[completed
]);
1312 rcutorture_one_extend(&readstate
, 0, trsp
, rtrsp
);
1313 WARN_ON_ONCE(readstate
& RCUTORTURE_RDR_MASK
);
1315 /* If error or close call, record the sequence of reader protections. */
1316 if ((pipe_count
> 1 || completed
> 1) && !xchg(&err_segs_recorded
, 1)) {
1318 for (rtrsp1
= &rtseg
[0]; rtrsp1
< rtrsp
; rtrsp1
++)
1319 err_segs
[i
++] = *rtrsp1
;
1326 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand
);
1329 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1330 * incrementing the corresponding element of the pipeline array. The
1331 * counter in the element should never be greater than 1, otherwise, the
1332 * RCU implementation is broken.
1334 static void rcu_torture_timer(struct timer_list
*unused
)
1336 atomic_long_inc(&n_rcu_torture_timers
);
1337 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand
));
1339 /* Test call_rcu() invocation from interrupt handler. */
1340 if (cur_ops
->call
) {
1341 struct rcu_head
*rhp
= kmalloc(sizeof(*rhp
), GFP_NOWAIT
);
1344 cur_ops
->call(rhp
, rcu_torture_timer_cb
);
1349 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1350 * incrementing the corresponding element of the pipeline array. The
1351 * counter in the element should never be greater than 1, otherwise, the
1352 * RCU implementation is broken.
1355 rcu_torture_reader(void *arg
)
1357 unsigned long lastsleep
= jiffies
;
1358 long myid
= (long)arg
;
1359 int mynumonline
= myid
;
1360 DEFINE_TORTURE_RANDOM(rand
);
1361 struct timer_list t
;
1363 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1364 set_user_nice(current
, MAX_NICE
);
1365 if (irqreader
&& cur_ops
->irq_capable
)
1366 timer_setup_on_stack(&t
, rcu_torture_timer
, 0);
1367 tick_dep_set_task(current
, TICK_DEP_BIT_RCU
);
1369 if (irqreader
&& cur_ops
->irq_capable
) {
1370 if (!timer_pending(&t
))
1371 mod_timer(&t
, jiffies
+ 1);
1373 if (!rcu_torture_one_read(&rand
) && !torture_must_stop())
1374 schedule_timeout_interruptible(HZ
);
1375 if (time_after(jiffies
, lastsleep
) && !torture_must_stop()) {
1376 schedule_timeout_interruptible(1);
1377 lastsleep
= jiffies
+ 10;
1379 while (num_online_cpus() < mynumonline
&& !torture_must_stop())
1380 schedule_timeout_interruptible(HZ
/ 5);
1381 stutter_wait("rcu_torture_reader");
1382 } while (!torture_must_stop());
1383 if (irqreader
&& cur_ops
->irq_capable
) {
1385 destroy_timer_on_stack(&t
);
1387 tick_dep_clear_task(current
, TICK_DEP_BIT_RCU
);
1388 torture_kthread_stopping("rcu_torture_reader");
1393 * Print torture statistics. Caller must ensure that there is only
1394 * one call to this function at a given time!!! This is normally
1395 * accomplished by relying on the module system to only have one copy
1396 * of the module loaded, and then by giving the rcu_torture_stats
1397 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1398 * thread is not running).
1401 rcu_torture_stats_print(void)
1405 long pipesummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
1406 long batchsummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
1407 static unsigned long rtcv_snap
= ULONG_MAX
;
1408 static bool splatted
;
1409 struct task_struct
*wtp
;
1411 for_each_possible_cpu(cpu
) {
1412 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
1413 pipesummary
[i
] += per_cpu(rcu_torture_count
, cpu
)[i
];
1414 batchsummary
[i
] += per_cpu(rcu_torture_batch
, cpu
)[i
];
1417 for (i
= RCU_TORTURE_PIPE_LEN
- 1; i
>= 0; i
--) {
1418 if (pipesummary
[i
] != 0)
1422 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1423 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1424 rcu_torture_current
,
1425 rcu_torture_current
? "ver" : "VER",
1426 rcu_torture_current_version
,
1427 list_empty(&rcu_torture_freelist
),
1428 atomic_read(&n_rcu_torture_alloc
),
1429 atomic_read(&n_rcu_torture_alloc_fail
),
1430 atomic_read(&n_rcu_torture_free
));
1431 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
1432 atomic_read(&n_rcu_torture_mberror
),
1433 n_rcu_torture_barrier_error
,
1434 n_rcu_torture_boost_ktrerror
,
1435 n_rcu_torture_boost_rterror
);
1436 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1437 n_rcu_torture_boost_failure
,
1438 n_rcu_torture_boosts
,
1439 atomic_long_read(&n_rcu_torture_timers
));
1440 torture_onoff_stats();
1441 pr_cont("barrier: %ld/%ld:%ld\n",
1442 n_barrier_successes
,
1444 n_rcu_torture_barrier_error
);
1446 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1447 if (atomic_read(&n_rcu_torture_mberror
) ||
1448 n_rcu_torture_barrier_error
|| n_rcu_torture_boost_ktrerror
||
1449 n_rcu_torture_boost_rterror
|| n_rcu_torture_boost_failure
||
1451 pr_cont("%s", "!!! ");
1452 atomic_inc(&n_rcu_torture_error
);
1453 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror
));
1454 WARN_ON_ONCE(n_rcu_torture_barrier_error
); // rcu_barrier()
1455 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror
); // no boost kthread
1456 WARN_ON_ONCE(n_rcu_torture_boost_rterror
); // can't set RT prio
1457 WARN_ON_ONCE(n_rcu_torture_boost_failure
); // RCU boost failed
1458 WARN_ON_ONCE(i
> 1); // Too-short grace period
1460 pr_cont("Reader Pipe: ");
1461 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
1462 pr_cont(" %ld", pipesummary
[i
]);
1465 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1466 pr_cont("Reader Batch: ");
1467 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
1468 pr_cont(" %ld", batchsummary
[i
]);
1471 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1472 pr_cont("Free-Block Circulation: ");
1473 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
1474 pr_cont(" %d", atomic_read(&rcu_torture_wcount
[i
]));
1480 if (rtcv_snap
== rcu_torture_current_version
&&
1481 rcu_torture_current
!= NULL
) {
1482 int __maybe_unused flags
= 0;
1483 unsigned long __maybe_unused gp_seq
= 0;
1485 rcutorture_get_gp_data(cur_ops
->ttype
,
1487 srcutorture_get_gp_data(cur_ops
->ttype
, srcu_ctlp
,
1489 wtp
= READ_ONCE(writer_task
);
1490 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
1491 rcu_torture_writer_state_getname(),
1492 rcu_torture_writer_state
, gp_seq
, flags
,
1493 wtp
== NULL
? ~0UL : wtp
->state
,
1494 wtp
== NULL
? -1 : (int)task_cpu(wtp
));
1495 if (!splatted
&& wtp
) {
1496 sched_show_task(wtp
);
1499 show_rcu_gp_kthreads();
1500 rcu_ftrace_dump(DUMP_ALL
);
1502 rtcv_snap
= rcu_torture_current_version
;
1506 * Periodically prints torture statistics, if periodic statistics printing
1507 * was specified via the stat_interval module parameter.
1510 rcu_torture_stats(void *arg
)
1512 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1514 schedule_timeout_interruptible(stat_interval
* HZ
);
1515 rcu_torture_stats_print();
1516 torture_shutdown_absorb("rcu_torture_stats");
1517 } while (!torture_must_stop());
1518 torture_kthread_stopping("rcu_torture_stats");
1523 rcu_torture_print_module_parms(struct rcu_torture_ops
*cur_ops
, const char *tag
)
1525 pr_alert("%s" TORTURE_FLAG
1526 "--- %s: nreaders=%d nfakewriters=%d "
1527 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1528 "shuffle_interval=%d stutter=%d irqreader=%d "
1529 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1530 "test_boost=%d/%d test_boost_interval=%d "
1531 "test_boost_duration=%d shutdown_secs=%d "
1532 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1534 "onoff_interval=%d onoff_holdoff=%d\n",
1535 torture_type
, tag
, nrealreaders
, nfakewriters
,
1536 stat_interval
, verbose
, test_no_idle_hz
, shuffle_interval
,
1537 stutter
, irqreader
, fqs_duration
, fqs_holdoff
, fqs_stutter
,
1538 test_boost
, cur_ops
->can_boost
,
1539 test_boost_interval
, test_boost_duration
, shutdown_secs
,
1540 stall_cpu
, stall_cpu_holdoff
, stall_cpu_irqsoff
,
1542 onoff_interval
, onoff_holdoff
);
1545 static int rcutorture_booster_cleanup(unsigned int cpu
)
1547 struct task_struct
*t
;
1549 if (boost_tasks
[cpu
] == NULL
)
1551 mutex_lock(&boost_mutex
);
1552 t
= boost_tasks
[cpu
];
1553 boost_tasks
[cpu
] = NULL
;
1554 rcu_torture_enable_rt_throttle();
1555 mutex_unlock(&boost_mutex
);
1557 /* This must be outside of the mutex, otherwise deadlock! */
1558 torture_stop_kthread(rcu_torture_boost
, t
);
1562 static int rcutorture_booster_init(unsigned int cpu
)
1566 if (boost_tasks
[cpu
] != NULL
)
1567 return 0; /* Already created, nothing more to do. */
1569 /* Don't allow time recalculation while creating a new task. */
1570 mutex_lock(&boost_mutex
);
1571 rcu_torture_disable_rt_throttle();
1572 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1573 boost_tasks
[cpu
] = kthread_create_on_node(rcu_torture_boost
, NULL
,
1575 "rcu_torture_boost");
1576 if (IS_ERR(boost_tasks
[cpu
])) {
1577 retval
= PTR_ERR(boost_tasks
[cpu
]);
1578 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1579 n_rcu_torture_boost_ktrerror
++;
1580 boost_tasks
[cpu
] = NULL
;
1581 mutex_unlock(&boost_mutex
);
1584 kthread_bind(boost_tasks
[cpu
], cpu
);
1585 wake_up_process(boost_tasks
[cpu
]);
1586 mutex_unlock(&boost_mutex
);
1591 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1592 * induces a CPU stall for the time specified by stall_cpu.
1594 static int rcu_torture_stall(void *args
)
1596 unsigned long stop_at
;
1598 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1599 if (stall_cpu_holdoff
> 0) {
1600 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1601 schedule_timeout_interruptible(stall_cpu_holdoff
* HZ
);
1602 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1604 if (!kthread_should_stop()) {
1605 stop_at
= ktime_get_seconds() + stall_cpu
;
1606 /* RCU CPU stall is expected behavior in following code. */
1608 if (stall_cpu_irqsoff
)
1609 local_irq_disable();
1612 pr_alert("rcu_torture_stall start on CPU %d.\n",
1613 smp_processor_id());
1614 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1616 continue; /* Induce RCU CPU stall warning. */
1617 if (stall_cpu_irqsoff
)
1622 pr_alert("rcu_torture_stall end.\n");
1624 torture_shutdown_absorb("rcu_torture_stall");
1625 while (!kthread_should_stop())
1626 schedule_timeout_interruptible(10 * HZ
);
1630 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1631 static int __init
rcu_torture_stall_init(void)
1635 return torture_create_kthread(rcu_torture_stall
, NULL
, stall_task
);
1638 /* State structure for forward-progress self-propagating RCU callback. */
1639 struct fwd_cb_state
{
1645 * Forward-progress self-propagating RCU callback function. Because
1646 * callbacks run from softirq, this function is an implicit RCU read-side
1649 static void rcu_torture_fwd_prog_cb(struct rcu_head
*rhp
)
1651 struct fwd_cb_state
*fcsp
= container_of(rhp
, struct fwd_cb_state
, rh
);
1653 if (READ_ONCE(fcsp
->stop
)) {
1654 WRITE_ONCE(fcsp
->stop
, 2);
1657 cur_ops
->call(&fcsp
->rh
, rcu_torture_fwd_prog_cb
);
1660 /* State for continuous-flood RCU callbacks. */
1663 struct rcu_fwd_cb
*rfc_next
;
1664 struct rcu_fwd
*rfc_rfp
;
1668 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
1669 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
1670 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
1671 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
1672 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
1674 struct rcu_launder_hist
{
1676 unsigned long launder_gp_seq
;
1680 spinlock_t rcu_fwd_lock
;
1681 struct rcu_fwd_cb
*rcu_fwd_cb_head
;
1682 struct rcu_fwd_cb
**rcu_fwd_cb_tail
;
1684 unsigned long rcu_fwd_startat
;
1685 struct rcu_launder_hist n_launders_hist
[N_LAUNDERS_HIST
];
1686 unsigned long rcu_launder_gp_seq_start
;
1689 struct rcu_fwd
*rcu_fwds
;
1690 bool rcu_fwd_emergency_stop
;
1692 static void rcu_torture_fwd_cb_hist(struct rcu_fwd
*rfp
)
1695 unsigned long gps_old
;
1699 for (i
= ARRAY_SIZE(rfp
->n_launders_hist
) - 1; i
> 0; i
--)
1700 if (rfp
->n_launders_hist
[i
].n_launders
> 0)
1702 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
1703 __func__
, jiffies
- rfp
->rcu_fwd_startat
);
1704 gps_old
= rfp
->rcu_launder_gp_seq_start
;
1705 for (j
= 0; j
<= i
; j
++) {
1706 gps
= rfp
->n_launders_hist
[j
].launder_gp_seq
;
1707 pr_cont(" %ds/%d: %ld:%ld",
1708 j
+ 1, FWD_CBS_HIST_DIV
,
1709 rfp
->n_launders_hist
[j
].n_launders
,
1710 rcutorture_seq_diff(gps
, gps_old
));
1716 /* Callback function for continuous-flood RCU callbacks. */
1717 static void rcu_torture_fwd_cb_cr(struct rcu_head
*rhp
)
1719 unsigned long flags
;
1721 struct rcu_fwd_cb
*rfcp
= container_of(rhp
, struct rcu_fwd_cb
, rh
);
1722 struct rcu_fwd_cb
**rfcpp
;
1723 struct rcu_fwd
*rfp
= rfcp
->rfc_rfp
;
1725 rfcp
->rfc_next
= NULL
;
1727 spin_lock_irqsave(&rfp
->rcu_fwd_lock
, flags
);
1728 rfcpp
= rfp
->rcu_fwd_cb_tail
;
1729 rfp
->rcu_fwd_cb_tail
= &rfcp
->rfc_next
;
1730 WRITE_ONCE(*rfcpp
, rfcp
);
1731 WRITE_ONCE(rfp
->n_launders_cb
, rfp
->n_launders_cb
+ 1);
1732 i
= ((jiffies
- rfp
->rcu_fwd_startat
) / (HZ
/ FWD_CBS_HIST_DIV
));
1733 if (i
>= ARRAY_SIZE(rfp
->n_launders_hist
))
1734 i
= ARRAY_SIZE(rfp
->n_launders_hist
) - 1;
1735 rfp
->n_launders_hist
[i
].n_launders
++;
1736 rfp
->n_launders_hist
[i
].launder_gp_seq
= cur_ops
->get_gp_seq();
1737 spin_unlock_irqrestore(&rfp
->rcu_fwd_lock
, flags
);
1740 // Give the scheduler a chance, even on nohz_full CPUs.
1741 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter
)
1743 if (IS_ENABLED(CONFIG_PREEMPTION
) && IS_ENABLED(CONFIG_NO_HZ_FULL
)) {
1744 // Real call_rcu() floods hit userspace, so emulate that.
1745 if (need_resched() || (iter
& 0xfff))
1749 // No userspace emulation: CB invocation throttles call_rcu()
1754 * Free all callbacks on the rcu_fwd_cb_head list, either because the
1755 * test is over or because we hit an OOM event.
1757 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd
*rfp
)
1759 unsigned long flags
;
1760 unsigned long freed
= 0;
1761 struct rcu_fwd_cb
*rfcp
;
1764 spin_lock_irqsave(&rfp
->rcu_fwd_lock
, flags
);
1765 rfcp
= rfp
->rcu_fwd_cb_head
;
1767 spin_unlock_irqrestore(&rfp
->rcu_fwd_lock
, flags
);
1770 rfp
->rcu_fwd_cb_head
= rfcp
->rfc_next
;
1771 if (!rfp
->rcu_fwd_cb_head
)
1772 rfp
->rcu_fwd_cb_tail
= &rfp
->rcu_fwd_cb_head
;
1773 spin_unlock_irqrestore(&rfp
->rcu_fwd_lock
, flags
);
1776 rcu_torture_fwd_prog_cond_resched(freed
);
1777 if (tick_nohz_full_enabled()) {
1778 local_irq_save(flags
);
1779 rcu_momentary_dyntick_idle();
1780 local_irq_restore(flags
);
1786 /* Carry out need_resched()/cond_resched() forward-progress testing. */
1787 static void rcu_torture_fwd_prog_nr(struct rcu_fwd
*rfp
,
1788 int *tested
, int *tested_tries
)
1792 struct fwd_cb_state fcs
;
1797 bool selfpropcb
= false;
1798 unsigned long stopat
;
1799 static DEFINE_TORTURE_RANDOM(trs
);
1801 if (cur_ops
->call
&& cur_ops
->sync
&& cur_ops
->cb_barrier
) {
1802 init_rcu_head_on_stack(&fcs
.rh
);
1806 /* Tight loop containing cond_resched(). */
1807 WRITE_ONCE(rcu_fwd_cb_nodelay
, true);
1808 cur_ops
->sync(); /* Later readers see above write. */
1810 WRITE_ONCE(fcs
.stop
, 0);
1811 cur_ops
->call(&fcs
.rh
, rcu_torture_fwd_prog_cb
);
1813 cver
= READ_ONCE(rcu_torture_current_version
);
1814 gps
= cur_ops
->get_gp_seq();
1815 sd
= cur_ops
->stall_dur() + 1;
1816 sd4
= (sd
+ fwd_progress_div
- 1) / fwd_progress_div
;
1817 dur
= sd4
+ torture_random(&trs
) % (sd
- sd4
);
1818 WRITE_ONCE(rfp
->rcu_fwd_startat
, jiffies
);
1819 stopat
= rfp
->rcu_fwd_startat
+ dur
;
1820 while (time_before(jiffies
, stopat
) &&
1821 !shutdown_time_arrived() &&
1822 !READ_ONCE(rcu_fwd_emergency_stop
) && !torture_must_stop()) {
1823 idx
= cur_ops
->readlock();
1825 cur_ops
->readunlock(idx
);
1826 if (!fwd_progress_need_resched
|| need_resched())
1830 if (!time_before(jiffies
, stopat
) &&
1831 !shutdown_time_arrived() &&
1832 !READ_ONCE(rcu_fwd_emergency_stop
) && !torture_must_stop()) {
1834 cver
= READ_ONCE(rcu_torture_current_version
) - cver
;
1835 gps
= rcutorture_seq_diff(cur_ops
->get_gp_seq(), gps
);
1836 WARN_ON(!cver
&& gps
< 2);
1837 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__
, dur
, cver
, gps
);
1840 WRITE_ONCE(fcs
.stop
, 1);
1841 cur_ops
->sync(); /* Wait for running CB to complete. */
1842 cur_ops
->cb_barrier(); /* Wait for queued callbacks. */
1846 WARN_ON(READ_ONCE(fcs
.stop
) != 2);
1847 destroy_rcu_head_on_stack(&fcs
.rh
);
1849 schedule_timeout_uninterruptible(HZ
/ 10); /* Let kthreads recover. */
1850 WRITE_ONCE(rcu_fwd_cb_nodelay
, false);
1853 /* Carry out call_rcu() forward-progress testing. */
1854 static void rcu_torture_fwd_prog_cr(struct rcu_fwd
*rfp
)
1857 unsigned long flags
;
1861 long n_launders_cb_snap
;
1865 struct rcu_fwd_cb
*rfcp
;
1866 struct rcu_fwd_cb
*rfcpn
;
1867 unsigned long stopat
;
1868 unsigned long stoppedat
;
1870 if (READ_ONCE(rcu_fwd_emergency_stop
))
1871 return; /* Get out of the way quickly, no GP wait! */
1873 return; /* Can't do call_rcu() fwd prog without ->call. */
1875 /* Loop continuously posting RCU callbacks. */
1876 WRITE_ONCE(rcu_fwd_cb_nodelay
, true);
1877 cur_ops
->sync(); /* Later readers see above write. */
1878 WRITE_ONCE(rfp
->rcu_fwd_startat
, jiffies
);
1879 stopat
= rfp
->rcu_fwd_startat
+ MAX_FWD_CB_JIFFIES
;
1881 rfp
->n_launders_cb
= 0; // Hoist initialization for multi-kthread
1885 for (i
= 0; i
< ARRAY_SIZE(rfp
->n_launders_hist
); i
++)
1886 rfp
->n_launders_hist
[i
].n_launders
= 0;
1887 cver
= READ_ONCE(rcu_torture_current_version
);
1888 gps
= cur_ops
->get_gp_seq();
1889 rfp
->rcu_launder_gp_seq_start
= gps
;
1890 tick_dep_set_task(current
, TICK_DEP_BIT_RCU
);
1891 while (time_before(jiffies
, stopat
) &&
1892 !shutdown_time_arrived() &&
1893 !READ_ONCE(rcu_fwd_emergency_stop
) && !torture_must_stop()) {
1894 rfcp
= READ_ONCE(rfp
->rcu_fwd_cb_head
);
1897 rfcpn
= READ_ONCE(rfcp
->rfc_next
);
1899 if (rfcp
->rfc_gps
>= MIN_FWD_CB_LAUNDERS
&&
1900 ++n_max_gps
>= MIN_FWD_CBS_LAUNDERED
)
1902 rfp
->rcu_fwd_cb_head
= rfcpn
;
1906 rfcp
= kmalloc(sizeof(*rfcp
), GFP_KERNEL
);
1907 if (WARN_ON_ONCE(!rfcp
)) {
1908 schedule_timeout_interruptible(1);
1914 rfcp
->rfc_rfp
= rfp
;
1916 cur_ops
->call(&rfcp
->rh
, rcu_torture_fwd_cb_cr
);
1917 rcu_torture_fwd_prog_cond_resched(n_launders
+ n_max_cbs
);
1918 if (tick_nohz_full_enabled()) {
1919 local_irq_save(flags
);
1920 rcu_momentary_dyntick_idle();
1921 local_irq_restore(flags
);
1924 stoppedat
= jiffies
;
1925 n_launders_cb_snap
= READ_ONCE(rfp
->n_launders_cb
);
1926 cver
= READ_ONCE(rcu_torture_current_version
) - cver
;
1927 gps
= rcutorture_seq_diff(cur_ops
->get_gp_seq(), gps
);
1928 cur_ops
->cb_barrier(); /* Wait for callbacks to be invoked. */
1929 (void)rcu_torture_fwd_prog_cbfree(rfp
);
1931 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop
) &&
1932 !shutdown_time_arrived()) {
1933 WARN_ON(n_max_gps
< MIN_FWD_CBS_LAUNDERED
);
1934 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
1936 stoppedat
- rfp
->rcu_fwd_startat
, jiffies
- stoppedat
,
1937 n_launders
+ n_max_cbs
- n_launders_cb_snap
,
1938 n_launders
, n_launders_sa
,
1939 n_max_gps
, n_max_cbs
, cver
, gps
);
1940 rcu_torture_fwd_cb_hist(rfp
);
1942 schedule_timeout_uninterruptible(HZ
); /* Let CBs drain. */
1943 tick_dep_clear_task(current
, TICK_DEP_BIT_RCU
);
1944 WRITE_ONCE(rcu_fwd_cb_nodelay
, false);
1949 * OOM notifier, but this only prints diagnostic information for the
1950 * current forward-progress test.
1952 static int rcutorture_oom_notify(struct notifier_block
*self
,
1953 unsigned long notused
, void *nfreed
)
1955 struct rcu_fwd
*rfp
= rcu_fwds
;
1957 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
1959 rcu_torture_fwd_cb_hist(rfp
);
1960 rcu_fwd_progress_check(1 + (jiffies
- READ_ONCE(rfp
->rcu_fwd_startat
)) / 2);
1961 WRITE_ONCE(rcu_fwd_emergency_stop
, true);
1962 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
1963 pr_info("%s: Freed %lu RCU callbacks.\n",
1964 __func__
, rcu_torture_fwd_prog_cbfree(rfp
));
1966 pr_info("%s: Freed %lu RCU callbacks.\n",
1967 __func__
, rcu_torture_fwd_prog_cbfree(rfp
));
1969 pr_info("%s: Freed %lu RCU callbacks.\n",
1970 __func__
, rcu_torture_fwd_prog_cbfree(rfp
));
1971 smp_mb(); /* Frees before return to avoid redoing OOM. */
1972 (*(unsigned long *)nfreed
)++; /* Forward progress CBs freed! */
1973 pr_info("%s returning after OOM processing.\n", __func__
);
1977 static struct notifier_block rcutorture_oom_nb
= {
1978 .notifier_call
= rcutorture_oom_notify
1981 /* Carry out grace-period forward-progress testing. */
1982 static int rcu_torture_fwd_prog(void *args
)
1984 struct rcu_fwd
*rfp
= args
;
1986 int tested_tries
= 0;
1988 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
1989 rcu_bind_current_to_nocb();
1990 if (!IS_ENABLED(CONFIG_SMP
) || !IS_ENABLED(CONFIG_RCU_BOOST
))
1991 set_user_nice(current
, MAX_NICE
);
1993 schedule_timeout_interruptible(fwd_progress_holdoff
* HZ
);
1994 WRITE_ONCE(rcu_fwd_emergency_stop
, false);
1995 register_oom_notifier(&rcutorture_oom_nb
);
1996 rcu_torture_fwd_prog_nr(rfp
, &tested
, &tested_tries
);
1997 rcu_torture_fwd_prog_cr(rfp
);
1998 unregister_oom_notifier(&rcutorture_oom_nb
);
2000 /* Avoid slow periods, better to test when busy. */
2001 stutter_wait("rcu_torture_fwd_prog");
2002 } while (!torture_must_stop());
2003 /* Short runs might not contain a valid forward-progress attempt. */
2004 WARN_ON(!tested
&& tested_tries
>= 5);
2005 pr_alert("%s: tested %d tested_tries %d\n", __func__
, tested
, tested_tries
);
2006 torture_kthread_stopping("rcu_torture_fwd_prog");
2010 /* If forward-progress checking is requested and feasible, spawn the thread. */
2011 static int __init
rcu_torture_fwd_prog_init(void)
2013 struct rcu_fwd
*rfp
;
2016 return 0; /* Not requested, so don't do it. */
2017 if (!cur_ops
->stall_dur
|| cur_ops
->stall_dur() <= 0 ||
2018 cur_ops
== &rcu_busted_ops
) {
2019 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2022 if (stall_cpu
> 0) {
2023 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2024 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS
))
2025 return -EINVAL
; /* In module, can fail back to user. */
2026 WARN_ON(1); /* Make sure rcutorture notices conflict. */
2029 if (fwd_progress_holdoff
<= 0)
2030 fwd_progress_holdoff
= 1;
2031 if (fwd_progress_div
<= 0)
2032 fwd_progress_div
= 4;
2033 rfp
= kzalloc(sizeof(*rfp
), GFP_KERNEL
);
2036 spin_lock_init(&rfp
->rcu_fwd_lock
);
2037 rfp
->rcu_fwd_cb_tail
= &rfp
->rcu_fwd_cb_head
;
2038 return torture_create_kthread(rcu_torture_fwd_prog
, rfp
, fwd_prog_task
);
2041 /* Callback function for RCU barrier testing. */
2042 static void rcu_torture_barrier_cbf(struct rcu_head
*rcu
)
2044 atomic_inc(&barrier_cbs_invoked
);
2047 /* kthread function to register callbacks used to test RCU barriers. */
2048 static int rcu_torture_barrier_cbs(void *arg
)
2050 long myid
= (long)arg
;
2053 struct rcu_head rcu
;
2055 init_rcu_head_on_stack(&rcu
);
2056 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
2057 set_user_nice(current
, MAX_NICE
);
2059 wait_event(barrier_cbs_wq
[myid
],
2061 smp_load_acquire(&barrier_phase
)) != lastphase
||
2062 torture_must_stop());
2063 lastphase
= newphase
;
2064 if (torture_must_stop())
2067 * The above smp_load_acquire() ensures barrier_phase load
2068 * is ordered before the following ->call().
2070 local_irq_disable(); /* Just to test no-irq call_rcu(). */
2071 cur_ops
->call(&rcu
, rcu_torture_barrier_cbf
);
2073 if (atomic_dec_and_test(&barrier_cbs_count
))
2074 wake_up(&barrier_wq
);
2075 } while (!torture_must_stop());
2076 if (cur_ops
->cb_barrier
!= NULL
)
2077 cur_ops
->cb_barrier();
2078 destroy_rcu_head_on_stack(&rcu
);
2079 torture_kthread_stopping("rcu_torture_barrier_cbs");
2083 /* kthread function to drive and coordinate RCU barrier testing. */
2084 static int rcu_torture_barrier(void *arg
)
2088 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
2090 atomic_set(&barrier_cbs_invoked
, 0);
2091 atomic_set(&barrier_cbs_count
, n_barrier_cbs
);
2092 /* Ensure barrier_phase ordered after prior assignments. */
2093 smp_store_release(&barrier_phase
, !barrier_phase
);
2094 for (i
= 0; i
< n_barrier_cbs
; i
++)
2095 wake_up(&barrier_cbs_wq
[i
]);
2096 wait_event(barrier_wq
,
2097 atomic_read(&barrier_cbs_count
) == 0 ||
2098 torture_must_stop());
2099 if (torture_must_stop())
2101 n_barrier_attempts
++;
2102 cur_ops
->cb_barrier(); /* Implies smp_mb() for wait_event(). */
2103 if (atomic_read(&barrier_cbs_invoked
) != n_barrier_cbs
) {
2104 n_rcu_torture_barrier_error
++;
2105 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2106 atomic_read(&barrier_cbs_invoked
),
2110 n_barrier_successes
++;
2112 schedule_timeout_interruptible(HZ
/ 10);
2113 } while (!torture_must_stop());
2114 torture_kthread_stopping("rcu_torture_barrier");
2118 /* Initialize RCU barrier testing. */
2119 static int rcu_torture_barrier_init(void)
2124 if (n_barrier_cbs
<= 0)
2126 if (cur_ops
->call
== NULL
|| cur_ops
->cb_barrier
== NULL
) {
2127 pr_alert("%s" TORTURE_FLAG
2128 " Call or barrier ops missing for %s,\n",
2129 torture_type
, cur_ops
->name
);
2130 pr_alert("%s" TORTURE_FLAG
2131 " RCU barrier testing omitted from run.\n",
2135 atomic_set(&barrier_cbs_count
, 0);
2136 atomic_set(&barrier_cbs_invoked
, 0);
2138 kcalloc(n_barrier_cbs
, sizeof(barrier_cbs_tasks
[0]),
2141 kcalloc(n_barrier_cbs
, sizeof(barrier_cbs_wq
[0]), GFP_KERNEL
);
2142 if (barrier_cbs_tasks
== NULL
|| !barrier_cbs_wq
)
2144 for (i
= 0; i
< n_barrier_cbs
; i
++) {
2145 init_waitqueue_head(&barrier_cbs_wq
[i
]);
2146 ret
= torture_create_kthread(rcu_torture_barrier_cbs
,
2148 barrier_cbs_tasks
[i
]);
2152 return torture_create_kthread(rcu_torture_barrier
, NULL
, barrier_task
);
2155 /* Clean up after RCU barrier testing. */
2156 static void rcu_torture_barrier_cleanup(void)
2160 torture_stop_kthread(rcu_torture_barrier
, barrier_task
);
2161 if (barrier_cbs_tasks
!= NULL
) {
2162 for (i
= 0; i
< n_barrier_cbs
; i
++)
2163 torture_stop_kthread(rcu_torture_barrier_cbs
,
2164 barrier_cbs_tasks
[i
]);
2165 kfree(barrier_cbs_tasks
);
2166 barrier_cbs_tasks
= NULL
;
2168 if (barrier_cbs_wq
!= NULL
) {
2169 kfree(barrier_cbs_wq
);
2170 barrier_cbs_wq
= NULL
;
2174 static bool rcu_torture_can_boost(void)
2176 static int boost_warn_once
;
2179 if (!(test_boost
== 1 && cur_ops
->can_boost
) && test_boost
!= 2)
2182 prio
= rcu_get_gp_kthreads_prio();
2187 if (boost_warn_once
== 1)
2190 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME
);
2191 boost_warn_once
= 1;
2198 static enum cpuhp_state rcutor_hp
;
2201 rcu_torture_cleanup(void)
2205 unsigned long gp_seq
= 0;
2208 if (torture_cleanup_begin()) {
2209 if (cur_ops
->cb_barrier
!= NULL
)
2210 cur_ops
->cb_barrier();
2214 torture_cleanup_end();
2218 show_rcu_gp_kthreads();
2219 rcu_torture_barrier_cleanup();
2220 torture_stop_kthread(rcu_torture_fwd_prog
, fwd_prog_task
);
2221 torture_stop_kthread(rcu_torture_stall
, stall_task
);
2222 torture_stop_kthread(rcu_torture_writer
, writer_task
);
2225 for (i
= 0; i
< nrealreaders
; i
++)
2226 torture_stop_kthread(rcu_torture_reader
,
2228 kfree(reader_tasks
);
2230 rcu_torture_current
= NULL
;
2232 if (fakewriter_tasks
) {
2233 for (i
= 0; i
< nfakewriters
; i
++) {
2234 torture_stop_kthread(rcu_torture_fakewriter
,
2235 fakewriter_tasks
[i
]);
2237 kfree(fakewriter_tasks
);
2238 fakewriter_tasks
= NULL
;
2241 rcutorture_get_gp_data(cur_ops
->ttype
, &flags
, &gp_seq
);
2242 srcutorture_get_gp_data(cur_ops
->ttype
, srcu_ctlp
, &flags
, &gp_seq
);
2243 pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
2244 cur_ops
->name
, gp_seq
, flags
);
2245 torture_stop_kthread(rcu_torture_stats
, stats_task
);
2246 torture_stop_kthread(rcu_torture_fqs
, fqs_task
);
2247 if (rcu_torture_can_boost())
2248 cpuhp_remove_state(rcutor_hp
);
2251 * Wait for all RCU callbacks to fire, then do torture-type-specific
2252 * cleanup operations.
2254 if (cur_ops
->cb_barrier
!= NULL
)
2255 cur_ops
->cb_barrier();
2256 if (cur_ops
->cleanup
!= NULL
)
2259 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
2261 if (err_segs_recorded
) {
2262 pr_alert("Failure/close-call rcutorture reader segments:\n");
2263 if (rt_read_nsegs
== 0)
2264 pr_alert("\t: No segments recorded!!!\n");
2266 for (i
= 0; i
< rt_read_nsegs
; i
++) {
2267 pr_alert("\t%d: %#x ", i
, err_segs
[i
].rt_readstate
);
2268 if (err_segs
[i
].rt_delay_jiffies
!= 0) {
2269 pr_cont("%s%ldjiffies", firsttime
? "" : "+",
2270 err_segs
[i
].rt_delay_jiffies
);
2273 if (err_segs
[i
].rt_delay_ms
!= 0) {
2274 pr_cont("%s%ldms", firsttime
? "" : "+",
2275 err_segs
[i
].rt_delay_ms
);
2278 if (err_segs
[i
].rt_delay_us
!= 0) {
2279 pr_cont("%s%ldus", firsttime
? "" : "+",
2280 err_segs
[i
].rt_delay_us
);
2284 err_segs
[i
].rt_preempted
? "preempted" : "");
2288 if (atomic_read(&n_rcu_torture_error
) || n_rcu_torture_barrier_error
)
2289 rcu_torture_print_module_parms(cur_ops
, "End of test: FAILURE");
2290 else if (torture_onoff_failures())
2291 rcu_torture_print_module_parms(cur_ops
,
2292 "End of test: RCU_HOTPLUG");
2294 rcu_torture_print_module_parms(cur_ops
, "End of test: SUCCESS");
2295 torture_cleanup_end();
2298 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2299 static void rcu_torture_leak_cb(struct rcu_head
*rhp
)
2303 static void rcu_torture_err_cb(struct rcu_head
*rhp
)
2306 * This -might- happen due to race conditions, but is unlikely.
2307 * The scenario that leads to this happening is that the
2308 * first of the pair of duplicate callbacks is queued,
2309 * someone else starts a grace period that includes that
2310 * callback, then the second of the pair must wait for the
2311 * next grace period. Unlikely, but can happen. If it
2312 * does happen, the debug-objects subsystem won't have splatted.
2314 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME
);
2316 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2319 * Verify that double-free causes debug-objects to complain, but only
2320 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
2321 * cannot be carried out.
2323 static void rcu_test_debug_objects(void)
2325 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2326 struct rcu_head rh1
;
2327 struct rcu_head rh2
;
2329 init_rcu_head_on_stack(&rh1
);
2330 init_rcu_head_on_stack(&rh2
);
2331 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME
);
2333 /* Try to queue the rh2 pair of callbacks for the same grace period. */
2334 preempt_disable(); /* Prevent preemption from interrupting test. */
2335 rcu_read_lock(); /* Make it impossible to finish a grace period. */
2336 call_rcu(&rh1
, rcu_torture_leak_cb
); /* Start grace period. */
2337 local_irq_disable(); /* Make it harder to start a new grace period. */
2338 call_rcu(&rh2
, rcu_torture_leak_cb
);
2339 call_rcu(&rh2
, rcu_torture_err_cb
); /* Duplicate callback. */
2344 /* Wait for them all to get done so we can safely return. */
2346 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME
);
2347 destroy_rcu_head_on_stack(&rh1
);
2348 destroy_rcu_head_on_stack(&rh2
);
2349 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2350 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME
);
2351 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2354 static void rcutorture_sync(void)
2356 static unsigned long n
;
2358 if (cur_ops
->sync
&& !(++n
& 0xfff))
2363 rcu_torture_init(void)
2368 static struct rcu_torture_ops
*torture_ops
[] = {
2369 &rcu_ops
, &rcu_busted_ops
, &srcu_ops
, &srcud_ops
,
2370 &busted_srcud_ops
, &tasks_ops
, &trivial_ops
,
2373 if (!torture_init_begin(torture_type
, verbose
))
2376 /* Process args and tell the world that the torturer is on the job. */
2377 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++) {
2378 cur_ops
= torture_ops
[i
];
2379 if (strcmp(torture_type
, cur_ops
->name
) == 0)
2382 if (i
== ARRAY_SIZE(torture_ops
)) {
2383 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2385 pr_alert("rcu-torture types:");
2386 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++)
2387 pr_cont(" %s", torture_ops
[i
]->name
);
2389 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST
));
2394 if (cur_ops
->fqs
== NULL
&& fqs_duration
!= 0) {
2395 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
2401 if (nreaders
>= 0) {
2402 nrealreaders
= nreaders
;
2404 nrealreaders
= num_online_cpus() - 2 - nreaders
;
2405 if (nrealreaders
<= 0)
2408 rcu_torture_print_module_parms(cur_ops
, "Start of test");
2410 /* Set up the freelist. */
2412 INIT_LIST_HEAD(&rcu_torture_freelist
);
2413 for (i
= 0; i
< ARRAY_SIZE(rcu_tortures
); i
++) {
2414 rcu_tortures
[i
].rtort_mbtest
= 0;
2415 list_add_tail(&rcu_tortures
[i
].rtort_free
,
2416 &rcu_torture_freelist
);
2419 /* Initialize the statistics so that each run gets its own numbers. */
2421 rcu_torture_current
= NULL
;
2422 rcu_torture_current_version
= 0;
2423 atomic_set(&n_rcu_torture_alloc
, 0);
2424 atomic_set(&n_rcu_torture_alloc_fail
, 0);
2425 atomic_set(&n_rcu_torture_free
, 0);
2426 atomic_set(&n_rcu_torture_mberror
, 0);
2427 atomic_set(&n_rcu_torture_error
, 0);
2428 n_rcu_torture_barrier_error
= 0;
2429 n_rcu_torture_boost_ktrerror
= 0;
2430 n_rcu_torture_boost_rterror
= 0;
2431 n_rcu_torture_boost_failure
= 0;
2432 n_rcu_torture_boosts
= 0;
2433 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
2434 atomic_set(&rcu_torture_wcount
[i
], 0);
2435 for_each_possible_cpu(cpu
) {
2436 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
2437 per_cpu(rcu_torture_count
, cpu
)[i
] = 0;
2438 per_cpu(rcu_torture_batch
, cpu
)[i
] = 0;
2441 err_segs_recorded
= 0;
2444 /* Start up the kthreads. */
2446 firsterr
= torture_create_kthread(rcu_torture_writer
, NULL
,
2450 if (nfakewriters
> 0) {
2451 fakewriter_tasks
= kcalloc(nfakewriters
,
2452 sizeof(fakewriter_tasks
[0]),
2454 if (fakewriter_tasks
== NULL
) {
2455 VERBOSE_TOROUT_ERRSTRING("out of memory");
2460 for (i
= 0; i
< nfakewriters
; i
++) {
2461 firsterr
= torture_create_kthread(rcu_torture_fakewriter
,
2462 NULL
, fakewriter_tasks
[i
]);
2466 reader_tasks
= kcalloc(nrealreaders
, sizeof(reader_tasks
[0]),
2468 if (reader_tasks
== NULL
) {
2469 VERBOSE_TOROUT_ERRSTRING("out of memory");
2473 for (i
= 0; i
< nrealreaders
; i
++) {
2474 firsterr
= torture_create_kthread(rcu_torture_reader
, (void *)i
,
2479 if (stat_interval
> 0) {
2480 firsterr
= torture_create_kthread(rcu_torture_stats
, NULL
,
2485 if (test_no_idle_hz
&& shuffle_interval
> 0) {
2486 firsterr
= torture_shuffle_init(shuffle_interval
* HZ
);
2495 t
= cur_ops
->stall_dur
? cur_ops
->stall_dur() : stutter
* HZ
;
2496 firsterr
= torture_stutter_init(stutter
* HZ
, t
);
2500 if (fqs_duration
< 0)
2503 /* Create the fqs thread */
2504 firsterr
= torture_create_kthread(rcu_torture_fqs
, NULL
,
2509 if (test_boost_interval
< 1)
2510 test_boost_interval
= 1;
2511 if (test_boost_duration
< 2)
2512 test_boost_duration
= 2;
2513 if (rcu_torture_can_boost()) {
2515 boost_starttime
= jiffies
+ test_boost_interval
* HZ
;
2517 firsterr
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "RCU_TORTURE",
2518 rcutorture_booster_init
,
2519 rcutorture_booster_cleanup
);
2522 rcutor_hp
= firsterr
;
2524 shutdown_jiffies
= jiffies
+ shutdown_secs
* HZ
;
2525 firsterr
= torture_shutdown_init(shutdown_secs
, rcu_torture_cleanup
);
2528 firsterr
= torture_onoff_init(onoff_holdoff
* HZ
, onoff_interval
,
2532 firsterr
= rcu_torture_stall_init();
2535 firsterr
= rcu_torture_fwd_prog_init();
2538 firsterr
= rcu_torture_barrier_init();
2542 rcu_test_debug_objects();
2548 rcu_torture_cleanup();
2552 module_init(rcu_torture_init
);
2553 module_exit(rcu_torture_cleanup
);