2 * Read-Copy Update module-based torture test facility
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright (C) IBM Corporation, 2005, 2006
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@joshtriplett.org>
23 * See also: Documentation/RCU/torture.txt
26 #define pr_fmt(fmt) fmt
28 #include <linux/types.h>
29 #include <linux/kernel.h>
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/kthread.h>
33 #include <linux/err.h>
34 #include <linux/spinlock.h>
35 #include <linux/smp.h>
36 #include <linux/rcupdate.h>
37 #include <linux/interrupt.h>
38 #include <linux/sched/signal.h>
39 #include <uapi/linux/sched/types.h>
40 #include <linux/atomic.h>
41 #include <linux/bitops.h>
42 #include <linux/completion.h>
43 #include <linux/moduleparam.h>
44 #include <linux/percpu.h>
45 #include <linux/notifier.h>
46 #include <linux/reboot.h>
47 #include <linux/freezer.h>
48 #include <linux/cpu.h>
49 #include <linux/delay.h>
50 #include <linux/stat.h>
51 #include <linux/srcu.h>
52 #include <linux/slab.h>
53 #include <linux/trace_clock.h>
54 #include <asm/byteorder.h>
55 #include <linux/torture.h>
56 #include <linux/vmalloc.h>
57 #include <linux/sched/debug.h>
58 #include <linux/sched/sysctl.h>
62 MODULE_LICENSE("GPL");
63 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
66 /* Bits for ->extendables field, extendables param, and related definitions. */
67 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
68 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
69 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
70 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
71 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
72 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
73 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
74 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
75 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
76 #define RCUTORTURE_MAX_EXTEND \
77 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
78 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
79 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
80 /* Must be power of two minus one. */
81 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
83 torture_param(int, cbflood_inter_holdoff
, HZ
,
84 "Holdoff between floods (jiffies)");
85 torture_param(int, cbflood_intra_holdoff
, 1,
86 "Holdoff between bursts (jiffies)");
87 torture_param(int, cbflood_n_burst
, 3, "# bursts in flood, zero to disable");
88 torture_param(int, cbflood_n_per_burst
, 20000,
89 "# callbacks per burst in flood");
90 torture_param(int, extendables
, RCUTORTURE_MAX_EXTEND
,
91 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
92 torture_param(int, fqs_duration
, 0,
93 "Duration of fqs bursts (us), 0 to disable");
94 torture_param(int, fqs_holdoff
, 0, "Holdoff time within fqs bursts (us)");
95 torture_param(int, fqs_stutter
, 3, "Wait time between fqs bursts (s)");
96 torture_param(bool, fwd_progress
, 1, "Test grace-period forward progress");
97 torture_param(int, fwd_progress_div
, 4, "Fraction of CPU stall to wait");
98 torture_param(int, fwd_progress_holdoff
, 60,
99 "Time between forward-progress tests (s)");
100 torture_param(bool, fwd_progress_need_resched
, 1,
101 "Hide cond_resched() behind need_resched()");
102 torture_param(bool, gp_cond
, false, "Use conditional/async GP wait primitives");
103 torture_param(bool, gp_exp
, false, "Use expedited GP wait primitives");
104 torture_param(bool, gp_normal
, false,
105 "Use normal (non-expedited) GP wait primitives");
106 torture_param(bool, gp_sync
, false, "Use synchronous GP wait primitives");
107 torture_param(int, irqreader
, 1, "Allow RCU readers from irq handlers");
108 torture_param(int, n_barrier_cbs
, 0,
109 "# of callbacks/kthreads for barrier testing");
110 torture_param(int, nfakewriters
, 4, "Number of RCU fake writer threads");
111 torture_param(int, nreaders
, -1, "Number of RCU reader threads");
112 torture_param(int, object_debug
, 0,
113 "Enable debug-object double call_rcu() testing");
114 torture_param(int, onoff_holdoff
, 0, "Time after boot before CPU hotplugs (s)");
115 torture_param(int, onoff_interval
, 0,
116 "Time between CPU hotplugs (jiffies), 0=disable");
117 torture_param(int, shuffle_interval
, 3, "Number of seconds between shuffles");
118 torture_param(int, shutdown_secs
, 0, "Shutdown time (s), <= zero to disable.");
119 torture_param(int, stall_cpu
, 0, "Stall duration (s), zero to disable.");
120 torture_param(int, stall_cpu_holdoff
, 10,
121 "Time to wait before starting stall (s).");
122 torture_param(int, stall_cpu_irqsoff
, 0, "Disable interrupts while stalling.");
123 torture_param(int, stat_interval
, 60,
124 "Number of seconds between stats printk()s");
125 torture_param(int, stutter
, 5, "Number of seconds to run/halt test");
126 torture_param(int, test_boost
, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
127 torture_param(int, test_boost_duration
, 4,
128 "Duration of each boost test, seconds.");
129 torture_param(int, test_boost_interval
, 7,
130 "Interval between boost tests, seconds.");
131 torture_param(bool, test_no_idle_hz
, true,
132 "Test support for tickless idle CPUs");
133 torture_param(int, verbose
, 1,
134 "Enable verbose debugging printk()s");
136 static char *torture_type
= "rcu";
137 module_param(torture_type
, charp
, 0444);
138 MODULE_PARM_DESC(torture_type
, "Type of RCU to torture (rcu, srcu, ...)");
140 static int nrealreaders
;
141 static int ncbflooders
;
142 static struct task_struct
*writer_task
;
143 static struct task_struct
**fakewriter_tasks
;
144 static struct task_struct
**reader_tasks
;
145 static struct task_struct
*stats_task
;
146 static struct task_struct
**cbflood_task
;
147 static struct task_struct
*fqs_task
;
148 static struct task_struct
*boost_tasks
[NR_CPUS
];
149 static struct task_struct
*stall_task
;
150 static struct task_struct
*fwd_prog_task
;
151 static struct task_struct
**barrier_cbs_tasks
;
152 static struct task_struct
*barrier_task
;
154 #define RCU_TORTURE_PIPE_LEN 10
157 struct rcu_head rtort_rcu
;
158 int rtort_pipe_count
;
159 struct list_head rtort_free
;
163 static LIST_HEAD(rcu_torture_freelist
);
164 static struct rcu_torture __rcu
*rcu_torture_current
;
165 static unsigned long rcu_torture_current_version
;
166 static struct rcu_torture rcu_tortures
[10 * RCU_TORTURE_PIPE_LEN
];
167 static DEFINE_SPINLOCK(rcu_torture_lock
);
168 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_count
);
169 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN
+ 1], rcu_torture_batch
);
170 static atomic_t rcu_torture_wcount
[RCU_TORTURE_PIPE_LEN
+ 1];
171 static atomic_t n_rcu_torture_alloc
;
172 static atomic_t n_rcu_torture_alloc_fail
;
173 static atomic_t n_rcu_torture_free
;
174 static atomic_t n_rcu_torture_mberror
;
175 static atomic_t n_rcu_torture_error
;
176 static long n_rcu_torture_barrier_error
;
177 static long n_rcu_torture_boost_ktrerror
;
178 static long n_rcu_torture_boost_rterror
;
179 static long n_rcu_torture_boost_failure
;
180 static long n_rcu_torture_boosts
;
181 static atomic_long_t n_rcu_torture_timers
;
182 static long n_barrier_attempts
;
183 static long n_barrier_successes
; /* did rcu_barrier test succeed? */
184 static atomic_long_t n_cbfloods
;
185 static struct list_head rcu_torture_removed
;
187 static int rcu_torture_writer_state
;
188 #define RTWS_FIXED_DELAY 0
190 #define RTWS_REPLACE 2
191 #define RTWS_DEF_FREE 3
192 #define RTWS_EXP_SYNC 4
193 #define RTWS_COND_GET 5
194 #define RTWS_COND_SYNC 6
196 #define RTWS_STUTTER 8
197 #define RTWS_STOPPING 9
198 static const char * const rcu_torture_writer_state_names
[] = {
211 /* Record reader segment types and duration for first failing read. */
214 unsigned long rt_delay_jiffies
;
215 unsigned long rt_delay_ms
;
216 unsigned long rt_delay_us
;
219 static int err_segs_recorded
;
220 static struct rt_read_seg err_segs
[RCUTORTURE_RDR_MAX_SEGS
];
221 static int rt_read_nsegs
;
223 static const char *rcu_torture_writer_state_getname(void)
225 unsigned int i
= READ_ONCE(rcu_torture_writer_state
);
227 if (i
>= ARRAY_SIZE(rcu_torture_writer_state_names
))
229 return rcu_torture_writer_state_names
[i
];
232 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
233 #define rcu_can_boost() 1
234 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
235 #define rcu_can_boost() 0
236 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
238 #ifdef CONFIG_RCU_TRACE
239 static u64 notrace
rcu_trace_clock_local(void)
241 u64 ts
= trace_clock_local();
243 (void)do_div(ts
, NSEC_PER_USEC
);
246 #else /* #ifdef CONFIG_RCU_TRACE */
247 static u64 notrace
rcu_trace_clock_local(void)
251 #endif /* #else #ifdef CONFIG_RCU_TRACE */
253 static unsigned long boost_starttime
; /* jiffies of next boost test start. */
254 static DEFINE_MUTEX(boost_mutex
); /* protect setting boost_starttime */
255 /* and boost task create/destroy. */
256 static atomic_t barrier_cbs_count
; /* Barrier callbacks registered. */
257 static bool barrier_phase
; /* Test phase. */
258 static atomic_t barrier_cbs_invoked
; /* Barrier callbacks invoked. */
259 static wait_queue_head_t
*barrier_cbs_wq
; /* Coordinate barrier testing. */
260 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq
);
263 * Allocate an element from the rcu_tortures pool.
265 static struct rcu_torture
*
266 rcu_torture_alloc(void)
270 spin_lock_bh(&rcu_torture_lock
);
271 if (list_empty(&rcu_torture_freelist
)) {
272 atomic_inc(&n_rcu_torture_alloc_fail
);
273 spin_unlock_bh(&rcu_torture_lock
);
276 atomic_inc(&n_rcu_torture_alloc
);
277 p
= rcu_torture_freelist
.next
;
279 spin_unlock_bh(&rcu_torture_lock
);
280 return container_of(p
, struct rcu_torture
, rtort_free
);
284 * Free an element to the rcu_tortures pool.
287 rcu_torture_free(struct rcu_torture
*p
)
289 atomic_inc(&n_rcu_torture_free
);
290 spin_lock_bh(&rcu_torture_lock
);
291 list_add_tail(&p
->rtort_free
, &rcu_torture_freelist
);
292 spin_unlock_bh(&rcu_torture_lock
);
296 * Operations vector for selecting different types of tests.
299 struct rcu_torture_ops
{
302 void (*cleanup
)(void);
303 int (*readlock
)(void);
304 void (*read_delay
)(struct torture_random_state
*rrsp
,
305 struct rt_read_seg
*rtrsp
);
306 void (*readunlock
)(int idx
);
307 unsigned long (*get_gp_seq
)(void);
308 unsigned long (*gp_diff
)(unsigned long new, unsigned long old
);
309 void (*deferred_free
)(struct rcu_torture
*p
);
311 void (*exp_sync
)(void);
312 unsigned long (*get_state
)(void);
313 void (*cond_sync
)(unsigned long oldstate
);
314 call_rcu_func_t call
;
315 void (*cb_barrier
)(void);
318 int (*stall_dur
)(void);
322 int ext_irq_conflict
;
326 static struct rcu_torture_ops
*cur_ops
;
329 * Definitions for rcu torture testing.
332 static int rcu_torture_read_lock(void) __acquires(RCU
)
339 rcu_read_delay(struct torture_random_state
*rrsp
, struct rt_read_seg
*rtrsp
)
341 unsigned long started
;
342 unsigned long completed
;
343 const unsigned long shortdelay_us
= 200;
344 unsigned long longdelay_ms
= 300;
345 unsigned long long ts
;
347 /* We want a short delay sometimes to make a reader delay the grace
348 * period, and we want a long delay occasionally to trigger
349 * force_quiescent_state. */
351 if (!(torture_random(rrsp
) % (nrealreaders
* 2000 * longdelay_ms
))) {
352 started
= cur_ops
->get_gp_seq();
353 ts
= rcu_trace_clock_local();
354 if (preempt_count() & (SOFTIRQ_MASK
| HARDIRQ_MASK
))
355 longdelay_ms
= 5; /* Avoid triggering BH limits. */
356 mdelay(longdelay_ms
);
357 rtrsp
->rt_delay_ms
= longdelay_ms
;
358 completed
= cur_ops
->get_gp_seq();
359 do_trace_rcu_torture_read(cur_ops
->name
, NULL
, ts
,
362 if (!(torture_random(rrsp
) % (nrealreaders
* 2 * shortdelay_us
))) {
363 udelay(shortdelay_us
);
364 rtrsp
->rt_delay_us
= shortdelay_us
;
366 if (!preempt_count() &&
367 !(torture_random(rrsp
) % (nrealreaders
* 500))) {
368 torture_preempt_schedule(); /* QS only if preemptible. */
369 rtrsp
->rt_preempted
= true;
373 static void rcu_torture_read_unlock(int idx
) __releases(RCU
)
379 * Update callback in the pipe. This should be invoked after a grace period.
382 rcu_torture_pipe_update_one(struct rcu_torture
*rp
)
386 i
= rp
->rtort_pipe_count
;
387 if (i
> RCU_TORTURE_PIPE_LEN
)
388 i
= RCU_TORTURE_PIPE_LEN
;
389 atomic_inc(&rcu_torture_wcount
[i
]);
390 if (++rp
->rtort_pipe_count
>= RCU_TORTURE_PIPE_LEN
) {
391 rp
->rtort_mbtest
= 0;
398 * Update all callbacks in the pipe. Suitable for synchronous grace-period
402 rcu_torture_pipe_update(struct rcu_torture
*old_rp
)
404 struct rcu_torture
*rp
;
405 struct rcu_torture
*rp1
;
408 list_add(&old_rp
->rtort_free
, &rcu_torture_removed
);
409 list_for_each_entry_safe(rp
, rp1
, &rcu_torture_removed
, rtort_free
) {
410 if (rcu_torture_pipe_update_one(rp
)) {
411 list_del(&rp
->rtort_free
);
412 rcu_torture_free(rp
);
418 rcu_torture_cb(struct rcu_head
*p
)
420 struct rcu_torture
*rp
= container_of(p
, struct rcu_torture
, rtort_rcu
);
422 if (torture_must_stop_irq()) {
423 /* Test is ending, just drop callbacks on the floor. */
424 /* The next initialization will pick up the pieces. */
427 if (rcu_torture_pipe_update_one(rp
))
428 rcu_torture_free(rp
);
430 cur_ops
->deferred_free(rp
);
433 static unsigned long rcu_no_completed(void)
438 static void rcu_torture_deferred_free(struct rcu_torture
*p
)
440 call_rcu(&p
->rtort_rcu
, rcu_torture_cb
);
443 static void rcu_sync_torture_init(void)
445 INIT_LIST_HEAD(&rcu_torture_removed
);
448 static struct rcu_torture_ops rcu_ops
= {
450 .init
= rcu_sync_torture_init
,
451 .readlock
= rcu_torture_read_lock
,
452 .read_delay
= rcu_read_delay
,
453 .readunlock
= rcu_torture_read_unlock
,
454 .get_gp_seq
= rcu_get_gp_seq
,
455 .gp_diff
= rcu_seq_diff
,
456 .deferred_free
= rcu_torture_deferred_free
,
457 .sync
= synchronize_rcu
,
458 .exp_sync
= synchronize_rcu_expedited
,
459 .get_state
= get_state_synchronize_rcu
,
460 .cond_sync
= cond_synchronize_rcu
,
462 .cb_barrier
= rcu_barrier
,
463 .fqs
= rcu_force_quiescent_state
,
465 .stall_dur
= rcu_jiffies_till_stall_check
,
467 .can_boost
= rcu_can_boost(),
468 .extendables
= RCUTORTURE_MAX_EXTEND
,
473 * Don't even think about trying any of these in real life!!!
474 * The names includes "busted", and they really means it!
475 * The only purpose of these functions is to provide a buggy RCU
476 * implementation to make sure that rcutorture correctly emits
477 * buggy-RCU error messages.
479 static void rcu_busted_torture_deferred_free(struct rcu_torture
*p
)
481 /* This is a deliberate bug for testing purposes only! */
482 rcu_torture_cb(&p
->rtort_rcu
);
485 static void synchronize_rcu_busted(void)
487 /* This is a deliberate bug for testing purposes only! */
491 call_rcu_busted(struct rcu_head
*head
, rcu_callback_t func
)
493 /* This is a deliberate bug for testing purposes only! */
497 static struct rcu_torture_ops rcu_busted_ops
= {
498 .ttype
= INVALID_RCU_FLAVOR
,
499 .init
= rcu_sync_torture_init
,
500 .readlock
= rcu_torture_read_lock
,
501 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
502 .readunlock
= rcu_torture_read_unlock
,
503 .get_gp_seq
= rcu_no_completed
,
504 .deferred_free
= rcu_busted_torture_deferred_free
,
505 .sync
= synchronize_rcu_busted
,
506 .exp_sync
= synchronize_rcu_busted
,
507 .call
= call_rcu_busted
,
516 * Definitions for srcu torture testing.
519 DEFINE_STATIC_SRCU(srcu_ctl
);
520 static struct srcu_struct srcu_ctld
;
521 static struct srcu_struct
*srcu_ctlp
= &srcu_ctl
;
523 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp
)
525 return srcu_read_lock(srcu_ctlp
);
529 srcu_read_delay(struct torture_random_state
*rrsp
, struct rt_read_seg
*rtrsp
)
532 const long uspertick
= 1000000 / HZ
;
533 const long longdelay
= 10;
535 /* We want there to be long-running readers, but not all the time. */
537 delay
= torture_random(rrsp
) %
538 (nrealreaders
* 2 * longdelay
* uspertick
);
539 if (!delay
&& in_task()) {
540 schedule_timeout_interruptible(longdelay
);
541 rtrsp
->rt_delay_jiffies
= longdelay
;
543 rcu_read_delay(rrsp
, rtrsp
);
547 static void srcu_torture_read_unlock(int idx
) __releases(srcu_ctlp
)
549 srcu_read_unlock(srcu_ctlp
, idx
);
552 static unsigned long srcu_torture_completed(void)
554 return srcu_batches_completed(srcu_ctlp
);
557 static void srcu_torture_deferred_free(struct rcu_torture
*rp
)
559 call_srcu(srcu_ctlp
, &rp
->rtort_rcu
, rcu_torture_cb
);
562 static void srcu_torture_synchronize(void)
564 synchronize_srcu(srcu_ctlp
);
567 static void srcu_torture_call(struct rcu_head
*head
,
570 call_srcu(srcu_ctlp
, head
, func
);
573 static void srcu_torture_barrier(void)
575 srcu_barrier(srcu_ctlp
);
578 static void srcu_torture_stats(void)
580 srcu_torture_stats_print(srcu_ctlp
, torture_type
, TORTURE_FLAG
);
583 static void srcu_torture_synchronize_expedited(void)
585 synchronize_srcu_expedited(srcu_ctlp
);
588 static struct rcu_torture_ops srcu_ops
= {
589 .ttype
= SRCU_FLAVOR
,
590 .init
= rcu_sync_torture_init
,
591 .readlock
= srcu_torture_read_lock
,
592 .read_delay
= srcu_read_delay
,
593 .readunlock
= srcu_torture_read_unlock
,
594 .get_gp_seq
= srcu_torture_completed
,
595 .deferred_free
= srcu_torture_deferred_free
,
596 .sync
= srcu_torture_synchronize
,
597 .exp_sync
= srcu_torture_synchronize_expedited
,
598 .call
= srcu_torture_call
,
599 .cb_barrier
= srcu_torture_barrier
,
600 .stats
= srcu_torture_stats
,
605 static void srcu_torture_init(void)
607 rcu_sync_torture_init();
608 WARN_ON(init_srcu_struct(&srcu_ctld
));
609 srcu_ctlp
= &srcu_ctld
;
612 static void srcu_torture_cleanup(void)
614 static DEFINE_TORTURE_RANDOM(rand
);
616 if (torture_random(&rand
) & 0x800)
617 cleanup_srcu_struct(&srcu_ctld
);
619 cleanup_srcu_struct_quiesced(&srcu_ctld
);
620 srcu_ctlp
= &srcu_ctl
; /* In case of a later rcutorture run. */
623 /* As above, but dynamically allocated. */
624 static struct rcu_torture_ops srcud_ops
= {
625 .ttype
= SRCU_FLAVOR
,
626 .init
= srcu_torture_init
,
627 .cleanup
= srcu_torture_cleanup
,
628 .readlock
= srcu_torture_read_lock
,
629 .read_delay
= srcu_read_delay
,
630 .readunlock
= srcu_torture_read_unlock
,
631 .get_gp_seq
= srcu_torture_completed
,
632 .deferred_free
= srcu_torture_deferred_free
,
633 .sync
= srcu_torture_synchronize
,
634 .exp_sync
= srcu_torture_synchronize_expedited
,
635 .call
= srcu_torture_call
,
636 .cb_barrier
= srcu_torture_barrier
,
637 .stats
= srcu_torture_stats
,
642 /* As above, but broken due to inappropriate reader extension. */
643 static struct rcu_torture_ops busted_srcud_ops
= {
644 .ttype
= SRCU_FLAVOR
,
645 .init
= srcu_torture_init
,
646 .cleanup
= srcu_torture_cleanup
,
647 .readlock
= srcu_torture_read_lock
,
648 .read_delay
= rcu_read_delay
,
649 .readunlock
= srcu_torture_read_unlock
,
650 .get_gp_seq
= srcu_torture_completed
,
651 .deferred_free
= srcu_torture_deferred_free
,
652 .sync
= srcu_torture_synchronize
,
653 .exp_sync
= srcu_torture_synchronize_expedited
,
654 .call
= srcu_torture_call
,
655 .cb_barrier
= srcu_torture_barrier
,
656 .stats
= srcu_torture_stats
,
658 .extendables
= RCUTORTURE_MAX_EXTEND
,
659 .name
= "busted_srcud"
663 * Definitions for RCU-tasks torture testing.
666 static int tasks_torture_read_lock(void)
671 static void tasks_torture_read_unlock(int idx
)
675 static void rcu_tasks_torture_deferred_free(struct rcu_torture
*p
)
677 call_rcu_tasks(&p
->rtort_rcu
, rcu_torture_cb
);
680 static struct rcu_torture_ops tasks_ops
= {
681 .ttype
= RCU_TASKS_FLAVOR
,
682 .init
= rcu_sync_torture_init
,
683 .readlock
= tasks_torture_read_lock
,
684 .read_delay
= rcu_read_delay
, /* just reuse rcu's version. */
685 .readunlock
= tasks_torture_read_unlock
,
686 .get_gp_seq
= rcu_no_completed
,
687 .deferred_free
= rcu_tasks_torture_deferred_free
,
688 .sync
= synchronize_rcu_tasks
,
689 .exp_sync
= synchronize_rcu_tasks
,
690 .call
= call_rcu_tasks
,
691 .cb_barrier
= rcu_barrier_tasks
,
698 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old
)
700 if (!cur_ops
->gp_diff
)
702 return cur_ops
->gp_diff(new, old
);
705 static bool __maybe_unused
torturing_tasks(void)
707 return cur_ops
== &tasks_ops
;
711 * RCU torture priority-boost testing. Runs one real-time thread per
712 * CPU for moderate bursts, repeatedly registering RCU callbacks and
713 * spinning waiting for them to be invoked. If a given callback takes
714 * too long to be invoked, we assume that priority inversion has occurred.
717 struct rcu_boost_inflight
{
722 static void rcu_torture_boost_cb(struct rcu_head
*head
)
724 struct rcu_boost_inflight
*rbip
=
725 container_of(head
, struct rcu_boost_inflight
, rcu
);
727 /* Ensure RCU-core accesses precede clearing ->inflight */
728 smp_store_release(&rbip
->inflight
, 0);
731 static int old_rt_runtime
= -1;
733 static void rcu_torture_disable_rt_throttle(void)
736 * Disable RT throttling so that rcutorture's boost threads don't get
737 * throttled. Only possible if rcutorture is built-in otherwise the
738 * user should manually do this by setting the sched_rt_period_us and
739 * sched_rt_runtime sysctls.
741 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST
) || old_rt_runtime
!= -1)
744 old_rt_runtime
= sysctl_sched_rt_runtime
;
745 sysctl_sched_rt_runtime
= -1;
748 static void rcu_torture_enable_rt_throttle(void)
750 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST
) || old_rt_runtime
== -1)
753 sysctl_sched_rt_runtime
= old_rt_runtime
;
757 static bool rcu_torture_boost_failed(unsigned long start
, unsigned long end
)
759 if (end
- start
> test_boost_duration
* HZ
- HZ
/ 2) {
760 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
761 n_rcu_torture_boost_failure
++;
763 return true; /* failed */
766 return false; /* passed */
769 static int rcu_torture_boost(void *arg
)
771 unsigned long call_rcu_time
;
772 unsigned long endtime
;
773 unsigned long oldstarttime
;
774 struct rcu_boost_inflight rbi
= { .inflight
= 0 };
775 struct sched_param sp
;
777 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
779 /* Set real-time priority. */
780 sp
.sched_priority
= 1;
781 if (sched_setscheduler(current
, SCHED_FIFO
, &sp
) < 0) {
782 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
783 n_rcu_torture_boost_rterror
++;
786 init_rcu_head_on_stack(&rbi
.rcu
);
787 /* Each pass through the following loop does one boost-test cycle. */
789 /* Track if the test failed already in this test interval? */
792 /* Increment n_rcu_torture_boosts once per boost-test */
793 while (!kthread_should_stop()) {
794 if (mutex_trylock(&boost_mutex
)) {
795 n_rcu_torture_boosts
++;
796 mutex_unlock(&boost_mutex
);
799 schedule_timeout_uninterruptible(1);
801 if (kthread_should_stop())
804 /* Wait for the next test interval. */
805 oldstarttime
= boost_starttime
;
806 while (ULONG_CMP_LT(jiffies
, oldstarttime
)) {
807 schedule_timeout_interruptible(oldstarttime
- jiffies
);
808 stutter_wait("rcu_torture_boost");
809 if (torture_must_stop())
813 /* Do one boost-test interval. */
814 endtime
= oldstarttime
+ test_boost_duration
* HZ
;
815 call_rcu_time
= jiffies
;
816 while (ULONG_CMP_LT(jiffies
, endtime
)) {
817 /* If we don't have a callback in flight, post one. */
818 if (!smp_load_acquire(&rbi
.inflight
)) {
819 /* RCU core before ->inflight = 1. */
820 smp_store_release(&rbi
.inflight
, 1);
821 call_rcu(&rbi
.rcu
, rcu_torture_boost_cb
);
822 /* Check if the boost test failed */
824 rcu_torture_boost_failed(call_rcu_time
,
826 call_rcu_time
= jiffies
;
828 stutter_wait("rcu_torture_boost");
829 if (torture_must_stop())
834 * If boost never happened, then inflight will always be 1, in
835 * this case the boost check would never happen in the above
836 * loop so do another one here.
838 if (!failed
&& smp_load_acquire(&rbi
.inflight
))
839 rcu_torture_boost_failed(call_rcu_time
, jiffies
);
842 * Set the start time of the next test interval.
843 * Yes, this is vulnerable to long delays, but such
844 * delays simply cause a false negative for the next
845 * interval. Besides, we are running at RT priority,
846 * so delays should be relatively rare.
848 while (oldstarttime
== boost_starttime
&&
849 !kthread_should_stop()) {
850 if (mutex_trylock(&boost_mutex
)) {
851 boost_starttime
= jiffies
+
852 test_boost_interval
* HZ
;
853 mutex_unlock(&boost_mutex
);
856 schedule_timeout_uninterruptible(1);
859 /* Go do the stutter. */
860 checkwait
: stutter_wait("rcu_torture_boost");
861 } while (!torture_must_stop());
863 /* Clean up and exit. */
864 while (!kthread_should_stop() || smp_load_acquire(&rbi
.inflight
)) {
865 torture_shutdown_absorb("rcu_torture_boost");
866 schedule_timeout_uninterruptible(1);
868 destroy_rcu_head_on_stack(&rbi
.rcu
);
869 torture_kthread_stopping("rcu_torture_boost");
873 static void rcu_torture_cbflood_cb(struct rcu_head
*rhp
)
878 * RCU torture callback-flood kthread. Repeatedly induces bursts of calls
879 * to call_rcu() or analogous, increasing the probability of occurrence
880 * of callback-overflow corner cases.
883 rcu_torture_cbflood(void *arg
)
888 struct rcu_head
*rhp
;
890 if (cbflood_n_per_burst
> 0 &&
891 cbflood_inter_holdoff
> 0 &&
892 cbflood_intra_holdoff
> 0 &&
894 cur_ops
->cb_barrier
) {
895 rhp
= vmalloc(array3_size(cbflood_n_burst
,
901 VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM");
904 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
906 schedule_timeout_interruptible(cbflood_inter_holdoff
);
907 atomic_long_inc(&n_cbfloods
);
908 WARN_ON(signal_pending(current
));
909 for (i
= 0; i
< cbflood_n_burst
; i
++) {
910 for (j
= 0; j
< cbflood_n_per_burst
; j
++) {
911 cur_ops
->call(&rhp
[i
* cbflood_n_per_burst
+ j
],
912 rcu_torture_cbflood_cb
);
914 schedule_timeout_interruptible(cbflood_intra_holdoff
);
915 WARN_ON(signal_pending(current
));
917 cur_ops
->cb_barrier();
918 stutter_wait("rcu_torture_cbflood");
919 } while (!torture_must_stop());
922 torture_kthread_stopping("rcu_torture_cbflood");
927 * RCU torture force-quiescent-state kthread. Repeatedly induces
928 * bursts of calls to force_quiescent_state(), increasing the probability
929 * of occurrence of some important types of race conditions.
932 rcu_torture_fqs(void *arg
)
934 unsigned long fqs_resume_time
;
935 int fqs_burst_remaining
;
937 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
939 fqs_resume_time
= jiffies
+ fqs_stutter
* HZ
;
940 while (ULONG_CMP_LT(jiffies
, fqs_resume_time
) &&
941 !kthread_should_stop()) {
942 schedule_timeout_interruptible(1);
944 fqs_burst_remaining
= fqs_duration
;
945 while (fqs_burst_remaining
> 0 &&
946 !kthread_should_stop()) {
949 fqs_burst_remaining
-= fqs_holdoff
;
951 stutter_wait("rcu_torture_fqs");
952 } while (!torture_must_stop());
953 torture_kthread_stopping("rcu_torture_fqs");
958 * RCU torture writer kthread. Repeatedly substitutes a new structure
959 * for that pointed to by rcu_torture_current, freeing the old structure
960 * after a series of grace periods (the "pipeline").
963 rcu_torture_writer(void *arg
)
965 bool can_expedite
= !rcu_gp_is_expedited() && !rcu_gp_is_normal();
967 unsigned long gp_snap
;
968 bool gp_cond1
= gp_cond
, gp_exp1
= gp_exp
, gp_normal1
= gp_normal
;
969 bool gp_sync1
= gp_sync
;
971 struct rcu_torture
*rp
;
972 struct rcu_torture
*old_rp
;
973 static DEFINE_TORTURE_RANDOM(rand
);
974 int synctype
[] = { RTWS_DEF_FREE
, RTWS_EXP_SYNC
,
975 RTWS_COND_GET
, RTWS_SYNC
};
978 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
980 pr_alert("%s" TORTURE_FLAG
981 " GP expediting controlled from boot/sysfs for %s.\n",
982 torture_type
, cur_ops
->name
);
984 /* Initialize synctype[] array. If none set, take default. */
985 if (!gp_cond1
&& !gp_exp1
&& !gp_normal1
&& !gp_sync1
)
986 gp_cond1
= gp_exp1
= gp_normal1
= gp_sync1
= true;
987 if (gp_cond1
&& cur_ops
->get_state
&& cur_ops
->cond_sync
) {
988 synctype
[nsynctypes
++] = RTWS_COND_GET
;
989 pr_info("%s: Testing conditional GPs.\n", __func__
);
990 } else if (gp_cond
&& (!cur_ops
->get_state
|| !cur_ops
->cond_sync
)) {
991 pr_alert("%s: gp_cond without primitives.\n", __func__
);
993 if (gp_exp1
&& cur_ops
->exp_sync
) {
994 synctype
[nsynctypes
++] = RTWS_EXP_SYNC
;
995 pr_info("%s: Testing expedited GPs.\n", __func__
);
996 } else if (gp_exp
&& !cur_ops
->exp_sync
) {
997 pr_alert("%s: gp_exp without primitives.\n", __func__
);
999 if (gp_normal1
&& cur_ops
->deferred_free
) {
1000 synctype
[nsynctypes
++] = RTWS_DEF_FREE
;
1001 pr_info("%s: Testing asynchronous GPs.\n", __func__
);
1002 } else if (gp_normal
&& !cur_ops
->deferred_free
) {
1003 pr_alert("%s: gp_normal without primitives.\n", __func__
);
1005 if (gp_sync1
&& cur_ops
->sync
) {
1006 synctype
[nsynctypes
++] = RTWS_SYNC
;
1007 pr_info("%s: Testing normal GPs.\n", __func__
);
1008 } else if (gp_sync
&& !cur_ops
->sync
) {
1009 pr_alert("%s: gp_sync without primitives.\n", __func__
);
1011 if (WARN_ONCE(nsynctypes
== 0,
1012 "rcu_torture_writer: No update-side primitives.\n")) {
1014 * No updates primitives, so don't try updating.
1015 * The resulting test won't be testing much, hence the
1016 * above WARN_ONCE().
1018 rcu_torture_writer_state
= RTWS_STOPPING
;
1019 torture_kthread_stopping("rcu_torture_writer");
1023 rcu_torture_writer_state
= RTWS_FIXED_DELAY
;
1024 schedule_timeout_uninterruptible(1);
1025 rp
= rcu_torture_alloc();
1028 rp
->rtort_pipe_count
= 0;
1029 rcu_torture_writer_state
= RTWS_DELAY
;
1030 udelay(torture_random(&rand
) & 0x3ff);
1031 rcu_torture_writer_state
= RTWS_REPLACE
;
1032 old_rp
= rcu_dereference_check(rcu_torture_current
,
1033 current
== writer_task
);
1034 rp
->rtort_mbtest
= 1;
1035 rcu_assign_pointer(rcu_torture_current
, rp
);
1036 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1038 i
= old_rp
->rtort_pipe_count
;
1039 if (i
> RCU_TORTURE_PIPE_LEN
)
1040 i
= RCU_TORTURE_PIPE_LEN
;
1041 atomic_inc(&rcu_torture_wcount
[i
]);
1042 old_rp
->rtort_pipe_count
++;
1043 switch (synctype
[torture_random(&rand
) % nsynctypes
]) {
1045 rcu_torture_writer_state
= RTWS_DEF_FREE
;
1046 cur_ops
->deferred_free(old_rp
);
1049 rcu_torture_writer_state
= RTWS_EXP_SYNC
;
1050 cur_ops
->exp_sync();
1051 rcu_torture_pipe_update(old_rp
);
1054 rcu_torture_writer_state
= RTWS_COND_GET
;
1055 gp_snap
= cur_ops
->get_state();
1056 i
= torture_random(&rand
) % 16;
1058 schedule_timeout_interruptible(i
);
1059 udelay(torture_random(&rand
) % 1000);
1060 rcu_torture_writer_state
= RTWS_COND_SYNC
;
1061 cur_ops
->cond_sync(gp_snap
);
1062 rcu_torture_pipe_update(old_rp
);
1065 rcu_torture_writer_state
= RTWS_SYNC
;
1067 rcu_torture_pipe_update(old_rp
);
1074 WRITE_ONCE(rcu_torture_current_version
,
1075 rcu_torture_current_version
+ 1);
1076 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1078 !(torture_random(&rand
) & 0xff & (!!expediting
- 1))) {
1079 WARN_ON_ONCE(expediting
== 0 && rcu_gp_is_expedited());
1080 if (expediting
>= 0)
1083 rcu_unexpedite_gp();
1084 if (++expediting
> 3)
1085 expediting
= -expediting
;
1086 } else if (!can_expedite
) { /* Disabled during boot, recheck. */
1087 can_expedite
= !rcu_gp_is_expedited() &&
1088 !rcu_gp_is_normal();
1090 rcu_torture_writer_state
= RTWS_STUTTER
;
1091 if (stutter_wait("rcu_torture_writer"))
1092 for (i
= 0; i
< ARRAY_SIZE(rcu_tortures
); i
++)
1093 if (list_empty(&rcu_tortures
[i
].rtort_free
))
1095 } while (!torture_must_stop());
1096 /* Reset expediting back to unexpedited. */
1098 expediting
= -expediting
;
1099 while (can_expedite
&& expediting
++ < 0)
1100 rcu_unexpedite_gp();
1101 WARN_ON_ONCE(can_expedite
&& rcu_gp_is_expedited());
1103 pr_alert("%s" TORTURE_FLAG
1104 " Dynamic grace-period expediting was disabled.\n",
1106 rcu_torture_writer_state
= RTWS_STOPPING
;
1107 torture_kthread_stopping("rcu_torture_writer");
1112 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1113 * delay between calls.
1116 rcu_torture_fakewriter(void *arg
)
1118 DEFINE_TORTURE_RANDOM(rand
);
1120 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1121 set_user_nice(current
, MAX_NICE
);
1124 schedule_timeout_uninterruptible(1 + torture_random(&rand
)%10);
1125 udelay(torture_random(&rand
) & 0x3ff);
1126 if (cur_ops
->cb_barrier
!= NULL
&&
1127 torture_random(&rand
) % (nfakewriters
* 8) == 0) {
1128 cur_ops
->cb_barrier();
1129 } else if (gp_normal
== gp_exp
) {
1130 if (cur_ops
->sync
&& torture_random(&rand
) & 0x80)
1132 else if (cur_ops
->exp_sync
)
1133 cur_ops
->exp_sync();
1134 } else if (gp_normal
&& cur_ops
->sync
) {
1136 } else if (cur_ops
->exp_sync
) {
1137 cur_ops
->exp_sync();
1139 stutter_wait("rcu_torture_fakewriter");
1140 } while (!torture_must_stop());
1142 torture_kthread_stopping("rcu_torture_fakewriter");
1146 static void rcu_torture_timer_cb(struct rcu_head
*rhp
)
1152 * Do one extension of an RCU read-side critical section using the
1153 * current reader state in readstate (set to zero for initial entry
1154 * to extended critical section), set the new state as specified by
1155 * newstate (set to zero for final exit from extended critical section),
1156 * and random-number-generator state in trsp. If this is neither the
1157 * beginning or end of the critical section and if there was actually a
1158 * change, do a ->read_delay().
1160 static void rcutorture_one_extend(int *readstate
, int newstate
,
1161 struct torture_random_state
*trsp
,
1162 struct rt_read_seg
*rtrsp
)
1165 int idxold
= *readstate
;
1166 int statesnew
= ~*readstate
& newstate
;
1167 int statesold
= *readstate
& ~newstate
;
1169 WARN_ON_ONCE(idxold
< 0);
1170 WARN_ON_ONCE((idxold
>> RCUTORTURE_RDR_SHIFT
) > 1);
1171 rtrsp
->rt_readstate
= newstate
;
1173 /* First, put new protection in place to avoid critical-section gap. */
1174 if (statesnew
& RCUTORTURE_RDR_BH
)
1176 if (statesnew
& RCUTORTURE_RDR_IRQ
)
1177 local_irq_disable();
1178 if (statesnew
& RCUTORTURE_RDR_PREEMPT
)
1180 if (statesnew
& RCUTORTURE_RDR_RBH
)
1182 if (statesnew
& RCUTORTURE_RDR_SCHED
)
1183 rcu_read_lock_sched();
1184 if (statesnew
& RCUTORTURE_RDR_RCU
)
1185 idxnew
= cur_ops
->readlock() << RCUTORTURE_RDR_SHIFT
;
1187 /* Next, remove old protection, irq first due to bh conflict. */
1188 if (statesold
& RCUTORTURE_RDR_IRQ
)
1190 if (statesold
& RCUTORTURE_RDR_BH
)
1192 if (statesold
& RCUTORTURE_RDR_PREEMPT
)
1194 if (statesold
& RCUTORTURE_RDR_RBH
)
1195 rcu_read_unlock_bh();
1196 if (statesold
& RCUTORTURE_RDR_SCHED
)
1197 rcu_read_unlock_sched();
1198 if (statesold
& RCUTORTURE_RDR_RCU
)
1199 cur_ops
->readunlock(idxold
>> RCUTORTURE_RDR_SHIFT
);
1201 /* Delay if neither beginning nor end and there was a change. */
1202 if ((statesnew
|| statesold
) && *readstate
&& newstate
)
1203 cur_ops
->read_delay(trsp
, rtrsp
);
1205 /* Update the reader state. */
1207 idxnew
= idxold
& ~RCUTORTURE_RDR_MASK
;
1208 WARN_ON_ONCE(idxnew
< 0);
1209 WARN_ON_ONCE((idxnew
>> RCUTORTURE_RDR_SHIFT
) > 1);
1210 *readstate
= idxnew
| newstate
;
1211 WARN_ON_ONCE((*readstate
>> RCUTORTURE_RDR_SHIFT
) < 0);
1212 WARN_ON_ONCE((*readstate
>> RCUTORTURE_RDR_SHIFT
) > 1);
1215 /* Return the biggest extendables mask given current RCU and boot parameters. */
1216 static int rcutorture_extend_mask_max(void)
1220 WARN_ON_ONCE(extendables
& ~RCUTORTURE_MAX_EXTEND
);
1221 mask
= extendables
& RCUTORTURE_MAX_EXTEND
& cur_ops
->extendables
;
1222 mask
= mask
| RCUTORTURE_RDR_RCU
;
1226 /* Return a random protection state mask, but with at least one bit set. */
1228 rcutorture_extend_mask(int oldmask
, struct torture_random_state
*trsp
)
1230 int mask
= rcutorture_extend_mask_max();
1231 unsigned long randmask1
= torture_random(trsp
) >> 8;
1232 unsigned long randmask2
= randmask1
>> 3;
1234 WARN_ON_ONCE(mask
>> RCUTORTURE_RDR_SHIFT
);
1235 /* Most of the time lots of bits, half the time only one bit. */
1236 if (!(randmask1
& 0x7))
1237 mask
= mask
& randmask2
;
1239 mask
= mask
& (1 << (randmask2
% RCUTORTURE_RDR_NBITS
));
1240 /* Can't enable bh w/irq disabled. */
1241 if ((mask
& RCUTORTURE_RDR_IRQ
) &&
1242 ((!(mask
& RCUTORTURE_RDR_BH
) && (oldmask
& RCUTORTURE_RDR_BH
)) ||
1243 (!(mask
& RCUTORTURE_RDR_RBH
) && (oldmask
& RCUTORTURE_RDR_RBH
))))
1244 mask
|= RCUTORTURE_RDR_BH
| RCUTORTURE_RDR_RBH
;
1245 if ((mask
& RCUTORTURE_RDR_IRQ
) &&
1246 !(mask
& cur_ops
->ext_irq_conflict
) &&
1247 (oldmask
& cur_ops
->ext_irq_conflict
))
1248 mask
|= cur_ops
->ext_irq_conflict
; /* Or if readers object. */
1249 return mask
?: RCUTORTURE_RDR_RCU
;
1253 * Do a randomly selected number of extensions of an existing RCU read-side
1256 static struct rt_read_seg
*
1257 rcutorture_loop_extend(int *readstate
, struct torture_random_state
*trsp
,
1258 struct rt_read_seg
*rtrsp
)
1262 int mask
= rcutorture_extend_mask_max();
1264 WARN_ON_ONCE(!*readstate
); /* -Existing- RCU read-side critsect! */
1265 if (!((mask
- 1) & mask
))
1266 return rtrsp
; /* Current RCU reader not extendable. */
1267 /* Bias towards larger numbers of loops. */
1268 i
= (torture_random(trsp
) >> 3);
1269 i
= ((i
| (i
>> 3)) & RCUTORTURE_RDR_MAX_LOOPS
) + 1;
1270 for (j
= 0; j
< i
; j
++) {
1271 mask
= rcutorture_extend_mask(*readstate
, trsp
);
1272 rcutorture_one_extend(readstate
, mask
, trsp
, &rtrsp
[j
]);
1278 * Do one read-side critical section, returning false if there was
1279 * no data to read. Can be invoked both from process context and
1280 * from a timer handler.
1282 static bool rcu_torture_one_read(struct torture_random_state
*trsp
)
1285 unsigned long started
;
1286 unsigned long completed
;
1288 struct rcu_torture
*p
;
1291 struct rt_read_seg rtseg
[RCUTORTURE_RDR_MAX_SEGS
] = { { 0 } };
1292 struct rt_read_seg
*rtrsp
= &rtseg
[0];
1293 struct rt_read_seg
*rtrsp1
;
1294 unsigned long long ts
;
1296 newstate
= rcutorture_extend_mask(readstate
, trsp
);
1297 rcutorture_one_extend(&readstate
, newstate
, trsp
, rtrsp
++);
1298 started
= cur_ops
->get_gp_seq();
1299 ts
= rcu_trace_clock_local();
1300 p
= rcu_dereference_check(rcu_torture_current
,
1301 rcu_read_lock_bh_held() ||
1302 rcu_read_lock_sched_held() ||
1303 srcu_read_lock_held(srcu_ctlp
) ||
1306 /* Wait for rcu_torture_writer to get underway */
1307 rcutorture_one_extend(&readstate
, 0, trsp
, rtrsp
);
1310 if (p
->rtort_mbtest
== 0)
1311 atomic_inc(&n_rcu_torture_mberror
);
1312 rtrsp
= rcutorture_loop_extend(&readstate
, trsp
, rtrsp
);
1314 pipe_count
= p
->rtort_pipe_count
;
1315 if (pipe_count
> RCU_TORTURE_PIPE_LEN
) {
1316 /* Should not happen, but... */
1317 pipe_count
= RCU_TORTURE_PIPE_LEN
;
1319 completed
= cur_ops
->get_gp_seq();
1320 if (pipe_count
> 1) {
1321 do_trace_rcu_torture_read(cur_ops
->name
, &p
->rtort_rcu
,
1322 ts
, started
, completed
);
1323 rcu_ftrace_dump(DUMP_ALL
);
1325 __this_cpu_inc(rcu_torture_count
[pipe_count
]);
1326 completed
= rcutorture_seq_diff(completed
, started
);
1327 if (completed
> RCU_TORTURE_PIPE_LEN
) {
1328 /* Should not happen, but... */
1329 completed
= RCU_TORTURE_PIPE_LEN
;
1331 __this_cpu_inc(rcu_torture_batch
[completed
]);
1333 rcutorture_one_extend(&readstate
, 0, trsp
, rtrsp
);
1334 WARN_ON_ONCE(readstate
& RCUTORTURE_RDR_MASK
);
1336 /* If error or close call, record the sequence of reader protections. */
1337 if ((pipe_count
> 1 || completed
> 1) && !xchg(&err_segs_recorded
, 1)) {
1339 for (rtrsp1
= &rtseg
[0]; rtrsp1
< rtrsp
; rtrsp1
++)
1340 err_segs
[i
++] = *rtrsp1
;
1347 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand
);
1350 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1351 * incrementing the corresponding element of the pipeline array. The
1352 * counter in the element should never be greater than 1, otherwise, the
1353 * RCU implementation is broken.
1355 static void rcu_torture_timer(struct timer_list
*unused
)
1357 atomic_long_inc(&n_rcu_torture_timers
);
1358 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand
));
1360 /* Test call_rcu() invocation from interrupt handler. */
1361 if (cur_ops
->call
) {
1362 struct rcu_head
*rhp
= kmalloc(sizeof(*rhp
), GFP_NOWAIT
);
1365 cur_ops
->call(rhp
, rcu_torture_timer_cb
);
1370 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1371 * incrementing the corresponding element of the pipeline array. The
1372 * counter in the element should never be greater than 1, otherwise, the
1373 * RCU implementation is broken.
1376 rcu_torture_reader(void *arg
)
1378 unsigned long lastsleep
= jiffies
;
1379 long myid
= (long)arg
;
1380 int mynumonline
= myid
;
1381 DEFINE_TORTURE_RANDOM(rand
);
1382 struct timer_list t
;
1384 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1385 set_user_nice(current
, MAX_NICE
);
1386 if (irqreader
&& cur_ops
->irq_capable
)
1387 timer_setup_on_stack(&t
, rcu_torture_timer
, 0);
1390 if (irqreader
&& cur_ops
->irq_capable
) {
1391 if (!timer_pending(&t
))
1392 mod_timer(&t
, jiffies
+ 1);
1394 if (!rcu_torture_one_read(&rand
))
1395 schedule_timeout_interruptible(HZ
);
1396 if (time_after(jiffies
, lastsleep
)) {
1397 schedule_timeout_interruptible(1);
1398 lastsleep
= jiffies
+ 10;
1400 while (num_online_cpus() < mynumonline
&& !torture_must_stop())
1401 schedule_timeout_interruptible(HZ
/ 5);
1402 stutter_wait("rcu_torture_reader");
1403 } while (!torture_must_stop());
1404 if (irqreader
&& cur_ops
->irq_capable
) {
1406 destroy_timer_on_stack(&t
);
1408 torture_kthread_stopping("rcu_torture_reader");
1413 * Print torture statistics. Caller must ensure that there is only
1414 * one call to this function at a given time!!! This is normally
1415 * accomplished by relying on the module system to only have one copy
1416 * of the module loaded, and then by giving the rcu_torture_stats
1417 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1418 * thread is not running).
1421 rcu_torture_stats_print(void)
1425 long pipesummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
1426 long batchsummary
[RCU_TORTURE_PIPE_LEN
+ 1] = { 0 };
1427 static unsigned long rtcv_snap
= ULONG_MAX
;
1428 static bool splatted
;
1429 struct task_struct
*wtp
;
1431 for_each_possible_cpu(cpu
) {
1432 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
1433 pipesummary
[i
] += per_cpu(rcu_torture_count
, cpu
)[i
];
1434 batchsummary
[i
] += per_cpu(rcu_torture_batch
, cpu
)[i
];
1437 for (i
= RCU_TORTURE_PIPE_LEN
- 1; i
>= 0; i
--) {
1438 if (pipesummary
[i
] != 0)
1442 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1443 pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1444 rcu_torture_current
,
1445 rcu_torture_current_version
,
1446 list_empty(&rcu_torture_freelist
),
1447 atomic_read(&n_rcu_torture_alloc
),
1448 atomic_read(&n_rcu_torture_alloc_fail
),
1449 atomic_read(&n_rcu_torture_free
));
1450 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
1451 atomic_read(&n_rcu_torture_mberror
),
1452 n_rcu_torture_barrier_error
,
1453 n_rcu_torture_boost_ktrerror
,
1454 n_rcu_torture_boost_rterror
);
1455 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1456 n_rcu_torture_boost_failure
,
1457 n_rcu_torture_boosts
,
1458 atomic_long_read(&n_rcu_torture_timers
));
1459 torture_onoff_stats();
1460 pr_cont("barrier: %ld/%ld:%ld ",
1461 n_barrier_successes
,
1463 n_rcu_torture_barrier_error
);
1464 pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods
));
1466 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1467 if (atomic_read(&n_rcu_torture_mberror
) != 0 ||
1468 n_rcu_torture_barrier_error
!= 0 ||
1469 n_rcu_torture_boost_ktrerror
!= 0 ||
1470 n_rcu_torture_boost_rterror
!= 0 ||
1471 n_rcu_torture_boost_failure
!= 0 ||
1473 pr_cont("%s", "!!! ");
1474 atomic_inc(&n_rcu_torture_error
);
1477 pr_cont("Reader Pipe: ");
1478 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
1479 pr_cont(" %ld", pipesummary
[i
]);
1482 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1483 pr_cont("Reader Batch: ");
1484 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
1485 pr_cont(" %ld", batchsummary
[i
]);
1488 pr_alert("%s%s ", torture_type
, TORTURE_FLAG
);
1489 pr_cont("Free-Block Circulation: ");
1490 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
1491 pr_cont(" %d", atomic_read(&rcu_torture_wcount
[i
]));
1497 if (rtcv_snap
== rcu_torture_current_version
&&
1498 rcu_torture_current
!= NULL
) {
1499 int __maybe_unused flags
= 0;
1500 unsigned long __maybe_unused gp_seq
= 0;
1502 rcutorture_get_gp_data(cur_ops
->ttype
,
1504 srcutorture_get_gp_data(cur_ops
->ttype
, srcu_ctlp
,
1506 wtp
= READ_ONCE(writer_task
);
1507 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
1508 rcu_torture_writer_state_getname(),
1509 rcu_torture_writer_state
, gp_seq
, flags
,
1510 wtp
== NULL
? ~0UL : wtp
->state
,
1511 wtp
== NULL
? -1 : (int)task_cpu(wtp
));
1512 if (!splatted
&& wtp
) {
1513 sched_show_task(wtp
);
1516 show_rcu_gp_kthreads();
1517 rcu_ftrace_dump(DUMP_ALL
);
1519 rtcv_snap
= rcu_torture_current_version
;
1523 * Periodically prints torture statistics, if periodic statistics printing
1524 * was specified via the stat_interval module parameter.
1527 rcu_torture_stats(void *arg
)
1529 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1531 schedule_timeout_interruptible(stat_interval
* HZ
);
1532 rcu_torture_stats_print();
1533 torture_shutdown_absorb("rcu_torture_stats");
1534 } while (!torture_must_stop());
1535 torture_kthread_stopping("rcu_torture_stats");
1540 rcu_torture_print_module_parms(struct rcu_torture_ops
*cur_ops
, const char *tag
)
1542 pr_alert("%s" TORTURE_FLAG
1543 "--- %s: nreaders=%d nfakewriters=%d "
1544 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1545 "shuffle_interval=%d stutter=%d irqreader=%d "
1546 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1547 "test_boost=%d/%d test_boost_interval=%d "
1548 "test_boost_duration=%d shutdown_secs=%d "
1549 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1551 "onoff_interval=%d onoff_holdoff=%d\n",
1552 torture_type
, tag
, nrealreaders
, nfakewriters
,
1553 stat_interval
, verbose
, test_no_idle_hz
, shuffle_interval
,
1554 stutter
, irqreader
, fqs_duration
, fqs_holdoff
, fqs_stutter
,
1555 test_boost
, cur_ops
->can_boost
,
1556 test_boost_interval
, test_boost_duration
, shutdown_secs
,
1557 stall_cpu
, stall_cpu_holdoff
, stall_cpu_irqsoff
,
1559 onoff_interval
, onoff_holdoff
);
1562 static int rcutorture_booster_cleanup(unsigned int cpu
)
1564 struct task_struct
*t
;
1566 if (boost_tasks
[cpu
] == NULL
)
1568 mutex_lock(&boost_mutex
);
1569 t
= boost_tasks
[cpu
];
1570 boost_tasks
[cpu
] = NULL
;
1571 rcu_torture_enable_rt_throttle();
1572 mutex_unlock(&boost_mutex
);
1574 /* This must be outside of the mutex, otherwise deadlock! */
1575 torture_stop_kthread(rcu_torture_boost
, t
);
1579 static int rcutorture_booster_init(unsigned int cpu
)
1583 if (boost_tasks
[cpu
] != NULL
)
1584 return 0; /* Already created, nothing more to do. */
1586 /* Don't allow time recalculation while creating a new task. */
1587 mutex_lock(&boost_mutex
);
1588 rcu_torture_disable_rt_throttle();
1589 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1590 boost_tasks
[cpu
] = kthread_create_on_node(rcu_torture_boost
, NULL
,
1592 "rcu_torture_boost");
1593 if (IS_ERR(boost_tasks
[cpu
])) {
1594 retval
= PTR_ERR(boost_tasks
[cpu
]);
1595 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1596 n_rcu_torture_boost_ktrerror
++;
1597 boost_tasks
[cpu
] = NULL
;
1598 mutex_unlock(&boost_mutex
);
1601 kthread_bind(boost_tasks
[cpu
], cpu
);
1602 wake_up_process(boost_tasks
[cpu
]);
1603 mutex_unlock(&boost_mutex
);
1608 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1609 * induces a CPU stall for the time specified by stall_cpu.
1611 static int rcu_torture_stall(void *args
)
1613 unsigned long stop_at
;
1615 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1616 if (stall_cpu_holdoff
> 0) {
1617 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1618 schedule_timeout_interruptible(stall_cpu_holdoff
* HZ
);
1619 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1621 if (!kthread_should_stop()) {
1622 stop_at
= ktime_get_seconds() + stall_cpu
;
1623 /* RCU CPU stall is expected behavior in following code. */
1625 if (stall_cpu_irqsoff
)
1626 local_irq_disable();
1629 pr_alert("rcu_torture_stall start on CPU %d.\n",
1630 smp_processor_id());
1631 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1633 continue; /* Induce RCU CPU stall warning. */
1634 if (stall_cpu_irqsoff
)
1639 pr_alert("rcu_torture_stall end.\n");
1641 torture_shutdown_absorb("rcu_torture_stall");
1642 while (!kthread_should_stop())
1643 schedule_timeout_interruptible(10 * HZ
);
1647 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1648 static int __init
rcu_torture_stall_init(void)
1652 return torture_create_kthread(rcu_torture_stall
, NULL
, stall_task
);
1655 /* State structure for forward-progress self-propagating RCU callback. */
1656 struct fwd_cb_state
{
1662 * Forward-progress self-propagating RCU callback function. Because
1663 * callbacks run from softirq, this function is an implicit RCU read-side
1666 static void rcu_torture_fwd_prog_cb(struct rcu_head
*rhp
)
1668 struct fwd_cb_state
*fcsp
= container_of(rhp
, struct fwd_cb_state
, rh
);
1670 if (READ_ONCE(fcsp
->stop
)) {
1671 WRITE_ONCE(fcsp
->stop
, 2);
1674 cur_ops
->call(&fcsp
->rh
, rcu_torture_fwd_prog_cb
);
1677 /* Carry out grace-period forward-progress testing. */
1678 static int rcu_torture_fwd_prog(void *args
)
1682 struct fwd_cb_state fcs
;
1687 bool selfpropcb
= false;
1688 unsigned long stopat
;
1690 int tested_tries
= 0;
1691 static DEFINE_TORTURE_RANDOM(trs
);
1693 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
1694 if (!IS_ENABLED(CONFIG_SMP
) || !IS_ENABLED(CONFIG_RCU_BOOST
))
1695 set_user_nice(current
, MAX_NICE
);
1696 if (cur_ops
->call
&& cur_ops
->sync
&& cur_ops
->cb_barrier
) {
1697 init_rcu_head_on_stack(&fcs
.rh
);
1701 schedule_timeout_interruptible(fwd_progress_holdoff
* HZ
);
1703 WRITE_ONCE(fcs
.stop
, 0);
1704 cur_ops
->call(&fcs
.rh
, rcu_torture_fwd_prog_cb
);
1706 cver
= READ_ONCE(rcu_torture_current_version
);
1707 gps
= cur_ops
->get_gp_seq();
1708 sd
= cur_ops
->stall_dur() + 1;
1709 sd4
= (sd
+ fwd_progress_div
- 1) / fwd_progress_div
;
1710 dur
= sd4
+ torture_random(&trs
) % (sd
- sd4
);
1711 stopat
= jiffies
+ dur
;
1712 while (time_before(jiffies
, stopat
) && !torture_must_stop()) {
1713 idx
= cur_ops
->readlock();
1715 cur_ops
->readunlock(idx
);
1716 if (!fwd_progress_need_resched
|| need_resched())
1720 if (!time_before(jiffies
, stopat
) && !torture_must_stop()) {
1722 cver
= READ_ONCE(rcu_torture_current_version
) - cver
;
1723 gps
= rcutorture_seq_diff(cur_ops
->get_gp_seq(), gps
);
1724 WARN_ON(!cver
&& gps
< 2);
1725 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__
, dur
, cver
, gps
);
1728 WRITE_ONCE(fcs
.stop
, 1);
1729 cur_ops
->sync(); /* Wait for running CB to complete. */
1730 cur_ops
->cb_barrier(); /* Wait for queued callbacks. */
1732 /* Avoid slow periods, better to test when busy. */
1733 stutter_wait("rcu_torture_fwd_prog");
1734 } while (!torture_must_stop());
1736 WARN_ON(READ_ONCE(fcs
.stop
) != 2);
1737 destroy_rcu_head_on_stack(&fcs
.rh
);
1739 /* Short runs might not contain a valid forward-progress attempt. */
1740 WARN_ON(!tested
&& tested_tries
>= 5);
1741 pr_alert("%s: tested %d tested_tries %d\n", __func__
, tested
, tested_tries
);
1742 torture_kthread_stopping("rcu_torture_fwd_prog");
1746 /* If forward-progress checking is requested and feasible, spawn the thread. */
1747 static int __init
rcu_torture_fwd_prog_init(void)
1750 return 0; /* Not requested, so don't do it. */
1751 if (!cur_ops
->stall_dur
|| cur_ops
->stall_dur() <= 0) {
1752 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
1755 if (stall_cpu
> 0) {
1756 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
1757 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS
))
1758 return -EINVAL
; /* In module, can fail back to user. */
1759 WARN_ON(1); /* Make sure rcutorture notices conflict. */
1762 if (fwd_progress_holdoff
<= 0)
1763 fwd_progress_holdoff
= 1;
1764 if (fwd_progress_div
<= 0)
1765 fwd_progress_div
= 4;
1766 return torture_create_kthread(rcu_torture_fwd_prog
,
1767 NULL
, fwd_prog_task
);
1770 /* Callback function for RCU barrier testing. */
1771 static void rcu_torture_barrier_cbf(struct rcu_head
*rcu
)
1773 atomic_inc(&barrier_cbs_invoked
);
1776 /* kthread function to register callbacks used to test RCU barriers. */
1777 static int rcu_torture_barrier_cbs(void *arg
)
1779 long myid
= (long)arg
;
1782 struct rcu_head rcu
;
1784 init_rcu_head_on_stack(&rcu
);
1785 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
1786 set_user_nice(current
, MAX_NICE
);
1788 wait_event(barrier_cbs_wq
[myid
],
1790 smp_load_acquire(&barrier_phase
)) != lastphase
||
1791 torture_must_stop());
1792 lastphase
= newphase
;
1793 if (torture_must_stop())
1796 * The above smp_load_acquire() ensures barrier_phase load
1797 * is ordered before the following ->call().
1799 local_irq_disable(); /* Just to test no-irq call_rcu(). */
1800 cur_ops
->call(&rcu
, rcu_torture_barrier_cbf
);
1802 if (atomic_dec_and_test(&barrier_cbs_count
))
1803 wake_up(&barrier_wq
);
1804 } while (!torture_must_stop());
1805 if (cur_ops
->cb_barrier
!= NULL
)
1806 cur_ops
->cb_barrier();
1807 destroy_rcu_head_on_stack(&rcu
);
1808 torture_kthread_stopping("rcu_torture_barrier_cbs");
1812 /* kthread function to drive and coordinate RCU barrier testing. */
1813 static int rcu_torture_barrier(void *arg
)
1817 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
1819 atomic_set(&barrier_cbs_invoked
, 0);
1820 atomic_set(&barrier_cbs_count
, n_barrier_cbs
);
1821 /* Ensure barrier_phase ordered after prior assignments. */
1822 smp_store_release(&barrier_phase
, !barrier_phase
);
1823 for (i
= 0; i
< n_barrier_cbs
; i
++)
1824 wake_up(&barrier_cbs_wq
[i
]);
1825 wait_event(barrier_wq
,
1826 atomic_read(&barrier_cbs_count
) == 0 ||
1827 torture_must_stop());
1828 if (torture_must_stop())
1830 n_barrier_attempts
++;
1831 cur_ops
->cb_barrier(); /* Implies smp_mb() for wait_event(). */
1832 if (atomic_read(&barrier_cbs_invoked
) != n_barrier_cbs
) {
1833 n_rcu_torture_barrier_error
++;
1834 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
1835 atomic_read(&barrier_cbs_invoked
),
1839 n_barrier_successes
++;
1841 schedule_timeout_interruptible(HZ
/ 10);
1842 } while (!torture_must_stop());
1843 torture_kthread_stopping("rcu_torture_barrier");
1847 /* Initialize RCU barrier testing. */
1848 static int rcu_torture_barrier_init(void)
1853 if (n_barrier_cbs
<= 0)
1855 if (cur_ops
->call
== NULL
|| cur_ops
->cb_barrier
== NULL
) {
1856 pr_alert("%s" TORTURE_FLAG
1857 " Call or barrier ops missing for %s,\n",
1858 torture_type
, cur_ops
->name
);
1859 pr_alert("%s" TORTURE_FLAG
1860 " RCU barrier testing omitted from run.\n",
1864 atomic_set(&barrier_cbs_count
, 0);
1865 atomic_set(&barrier_cbs_invoked
, 0);
1867 kcalloc(n_barrier_cbs
, sizeof(barrier_cbs_tasks
[0]),
1870 kcalloc(n_barrier_cbs
, sizeof(barrier_cbs_wq
[0]), GFP_KERNEL
);
1871 if (barrier_cbs_tasks
== NULL
|| !barrier_cbs_wq
)
1873 for (i
= 0; i
< n_barrier_cbs
; i
++) {
1874 init_waitqueue_head(&barrier_cbs_wq
[i
]);
1875 ret
= torture_create_kthread(rcu_torture_barrier_cbs
,
1877 barrier_cbs_tasks
[i
]);
1881 return torture_create_kthread(rcu_torture_barrier
, NULL
, barrier_task
);
1884 /* Clean up after RCU barrier testing. */
1885 static void rcu_torture_barrier_cleanup(void)
1889 torture_stop_kthread(rcu_torture_barrier
, barrier_task
);
1890 if (barrier_cbs_tasks
!= NULL
) {
1891 for (i
= 0; i
< n_barrier_cbs
; i
++)
1892 torture_stop_kthread(rcu_torture_barrier_cbs
,
1893 barrier_cbs_tasks
[i
]);
1894 kfree(barrier_cbs_tasks
);
1895 barrier_cbs_tasks
= NULL
;
1897 if (barrier_cbs_wq
!= NULL
) {
1898 kfree(barrier_cbs_wq
);
1899 barrier_cbs_wq
= NULL
;
1903 static bool rcu_torture_can_boost(void)
1905 static int boost_warn_once
;
1908 if (!(test_boost
== 1 && cur_ops
->can_boost
) && test_boost
!= 2)
1911 prio
= rcu_get_gp_kthreads_prio();
1916 if (boost_warn_once
== 1)
1919 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME
);
1920 boost_warn_once
= 1;
1927 static enum cpuhp_state rcutor_hp
;
1930 rcu_torture_cleanup(void)
1934 unsigned long gp_seq
= 0;
1937 if (torture_cleanup_begin()) {
1938 if (cur_ops
->cb_barrier
!= NULL
)
1939 cur_ops
->cb_barrier();
1943 rcu_torture_barrier_cleanup();
1944 torture_stop_kthread(rcu_torture_fwd_prog
, fwd_prog_task
);
1945 torture_stop_kthread(rcu_torture_stall
, stall_task
);
1946 torture_stop_kthread(rcu_torture_writer
, writer_task
);
1949 for (i
= 0; i
< nrealreaders
; i
++)
1950 torture_stop_kthread(rcu_torture_reader
,
1952 kfree(reader_tasks
);
1954 rcu_torture_current
= NULL
;
1956 if (fakewriter_tasks
) {
1957 for (i
= 0; i
< nfakewriters
; i
++) {
1958 torture_stop_kthread(rcu_torture_fakewriter
,
1959 fakewriter_tasks
[i
]);
1961 kfree(fakewriter_tasks
);
1962 fakewriter_tasks
= NULL
;
1965 rcutorture_get_gp_data(cur_ops
->ttype
, &flags
, &gp_seq
);
1966 srcutorture_get_gp_data(cur_ops
->ttype
, srcu_ctlp
, &flags
, &gp_seq
);
1967 pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
1968 cur_ops
->name
, gp_seq
, flags
);
1969 torture_stop_kthread(rcu_torture_stats
, stats_task
);
1970 torture_stop_kthread(rcu_torture_fqs
, fqs_task
);
1971 for (i
= 0; i
< ncbflooders
; i
++)
1972 torture_stop_kthread(rcu_torture_cbflood
, cbflood_task
[i
]);
1973 if (rcu_torture_can_boost())
1974 cpuhp_remove_state(rcutor_hp
);
1977 * Wait for all RCU callbacks to fire, then do torture-type-specific
1978 * cleanup operations.
1980 if (cur_ops
->cb_barrier
!= NULL
)
1981 cur_ops
->cb_barrier();
1982 if (cur_ops
->cleanup
!= NULL
)
1985 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
1987 if (err_segs_recorded
) {
1988 pr_alert("Failure/close-call rcutorture reader segments:\n");
1989 if (rt_read_nsegs
== 0)
1990 pr_alert("\t: No segments recorded!!!\n");
1992 for (i
= 0; i
< rt_read_nsegs
; i
++) {
1993 pr_alert("\t%d: %#x ", i
, err_segs
[i
].rt_readstate
);
1994 if (err_segs
[i
].rt_delay_jiffies
!= 0) {
1995 pr_cont("%s%ldjiffies", firsttime
? "" : "+",
1996 err_segs
[i
].rt_delay_jiffies
);
1999 if (err_segs
[i
].rt_delay_ms
!= 0) {
2000 pr_cont("%s%ldms", firsttime
? "" : "+",
2001 err_segs
[i
].rt_delay_ms
);
2004 if (err_segs
[i
].rt_delay_us
!= 0) {
2005 pr_cont("%s%ldus", firsttime
? "" : "+",
2006 err_segs
[i
].rt_delay_us
);
2010 err_segs
[i
].rt_preempted
? "preempted" : "");
2014 if (atomic_read(&n_rcu_torture_error
) || n_rcu_torture_barrier_error
)
2015 rcu_torture_print_module_parms(cur_ops
, "End of test: FAILURE");
2016 else if (torture_onoff_failures())
2017 rcu_torture_print_module_parms(cur_ops
,
2018 "End of test: RCU_HOTPLUG");
2020 rcu_torture_print_module_parms(cur_ops
, "End of test: SUCCESS");
2021 torture_cleanup_end();
2024 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2025 static void rcu_torture_leak_cb(struct rcu_head
*rhp
)
2029 static void rcu_torture_err_cb(struct rcu_head
*rhp
)
2032 * This -might- happen due to race conditions, but is unlikely.
2033 * The scenario that leads to this happening is that the
2034 * first of the pair of duplicate callbacks is queued,
2035 * someone else starts a grace period that includes that
2036 * callback, then the second of the pair must wait for the
2037 * next grace period. Unlikely, but can happen. If it
2038 * does happen, the debug-objects subsystem won't have splatted.
2040 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME
);
2042 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2045 * Verify that double-free causes debug-objects to complain, but only
2046 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
2047 * cannot be carried out.
2049 static void rcu_test_debug_objects(void)
2051 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2052 struct rcu_head rh1
;
2053 struct rcu_head rh2
;
2055 init_rcu_head_on_stack(&rh1
);
2056 init_rcu_head_on_stack(&rh2
);
2057 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME
);
2059 /* Try to queue the rh2 pair of callbacks for the same grace period. */
2060 preempt_disable(); /* Prevent preemption from interrupting test. */
2061 rcu_read_lock(); /* Make it impossible to finish a grace period. */
2062 call_rcu(&rh1
, rcu_torture_leak_cb
); /* Start grace period. */
2063 local_irq_disable(); /* Make it harder to start a new grace period. */
2064 call_rcu(&rh2
, rcu_torture_leak_cb
);
2065 call_rcu(&rh2
, rcu_torture_err_cb
); /* Duplicate callback. */
2070 /* Wait for them all to get done so we can safely return. */
2072 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME
);
2073 destroy_rcu_head_on_stack(&rh1
);
2074 destroy_rcu_head_on_stack(&rh2
);
2075 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2076 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME
);
2077 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2081 rcu_torture_init(void)
2086 static struct rcu_torture_ops
*torture_ops
[] = {
2087 &rcu_ops
, &rcu_busted_ops
, &srcu_ops
, &srcud_ops
,
2088 &busted_srcud_ops
, &tasks_ops
,
2091 if (!torture_init_begin(torture_type
, verbose
))
2094 /* Process args and tell the world that the torturer is on the job. */
2095 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++) {
2096 cur_ops
= torture_ops
[i
];
2097 if (strcmp(torture_type
, cur_ops
->name
) == 0)
2100 if (i
== ARRAY_SIZE(torture_ops
)) {
2101 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2103 pr_alert("rcu-torture types:");
2104 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++)
2105 pr_cont(" %s", torture_ops
[i
]->name
);
2107 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST
));
2111 if (cur_ops
->fqs
== NULL
&& fqs_duration
!= 0) {
2112 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
2118 if (nreaders
>= 0) {
2119 nrealreaders
= nreaders
;
2121 nrealreaders
= num_online_cpus() - 2 - nreaders
;
2122 if (nrealreaders
<= 0)
2125 rcu_torture_print_module_parms(cur_ops
, "Start of test");
2127 /* Set up the freelist. */
2129 INIT_LIST_HEAD(&rcu_torture_freelist
);
2130 for (i
= 0; i
< ARRAY_SIZE(rcu_tortures
); i
++) {
2131 rcu_tortures
[i
].rtort_mbtest
= 0;
2132 list_add_tail(&rcu_tortures
[i
].rtort_free
,
2133 &rcu_torture_freelist
);
2136 /* Initialize the statistics so that each run gets its own numbers. */
2138 rcu_torture_current
= NULL
;
2139 rcu_torture_current_version
= 0;
2140 atomic_set(&n_rcu_torture_alloc
, 0);
2141 atomic_set(&n_rcu_torture_alloc_fail
, 0);
2142 atomic_set(&n_rcu_torture_free
, 0);
2143 atomic_set(&n_rcu_torture_mberror
, 0);
2144 atomic_set(&n_rcu_torture_error
, 0);
2145 n_rcu_torture_barrier_error
= 0;
2146 n_rcu_torture_boost_ktrerror
= 0;
2147 n_rcu_torture_boost_rterror
= 0;
2148 n_rcu_torture_boost_failure
= 0;
2149 n_rcu_torture_boosts
= 0;
2150 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++)
2151 atomic_set(&rcu_torture_wcount
[i
], 0);
2152 for_each_possible_cpu(cpu
) {
2153 for (i
= 0; i
< RCU_TORTURE_PIPE_LEN
+ 1; i
++) {
2154 per_cpu(rcu_torture_count
, cpu
)[i
] = 0;
2155 per_cpu(rcu_torture_batch
, cpu
)[i
] = 0;
2158 err_segs_recorded
= 0;
2161 /* Start up the kthreads. */
2163 firsterr
= torture_create_kthread(rcu_torture_writer
, NULL
,
2167 if (nfakewriters
> 0) {
2168 fakewriter_tasks
= kcalloc(nfakewriters
,
2169 sizeof(fakewriter_tasks
[0]),
2171 if (fakewriter_tasks
== NULL
) {
2172 VERBOSE_TOROUT_ERRSTRING("out of memory");
2177 for (i
= 0; i
< nfakewriters
; i
++) {
2178 firsterr
= torture_create_kthread(rcu_torture_fakewriter
,
2179 NULL
, fakewriter_tasks
[i
]);
2183 reader_tasks
= kcalloc(nrealreaders
, sizeof(reader_tasks
[0]),
2185 if (reader_tasks
== NULL
) {
2186 VERBOSE_TOROUT_ERRSTRING("out of memory");
2190 for (i
= 0; i
< nrealreaders
; i
++) {
2191 firsterr
= torture_create_kthread(rcu_torture_reader
, (void *)i
,
2196 if (stat_interval
> 0) {
2197 firsterr
= torture_create_kthread(rcu_torture_stats
, NULL
,
2202 if (test_no_idle_hz
&& shuffle_interval
> 0) {
2203 firsterr
= torture_shuffle_init(shuffle_interval
* HZ
);
2210 firsterr
= torture_stutter_init(stutter
* HZ
);
2214 if (fqs_duration
< 0)
2217 /* Create the fqs thread */
2218 firsterr
= torture_create_kthread(rcu_torture_fqs
, NULL
,
2223 if (test_boost_interval
< 1)
2224 test_boost_interval
= 1;
2225 if (test_boost_duration
< 2)
2226 test_boost_duration
= 2;
2227 if (rcu_torture_can_boost()) {
2229 boost_starttime
= jiffies
+ test_boost_interval
* HZ
;
2231 firsterr
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "RCU_TORTURE",
2232 rcutorture_booster_init
,
2233 rcutorture_booster_cleanup
);
2236 rcutor_hp
= firsterr
;
2238 firsterr
= torture_shutdown_init(shutdown_secs
, rcu_torture_cleanup
);
2241 firsterr
= torture_onoff_init(onoff_holdoff
* HZ
, onoff_interval
);
2244 firsterr
= rcu_torture_stall_init();
2247 firsterr
= rcu_torture_fwd_prog_init();
2250 firsterr
= rcu_torture_barrier_init();
2254 rcu_test_debug_objects();
2255 if (cbflood_n_burst
> 0) {
2256 /* Create the cbflood threads */
2257 ncbflooders
= (num_online_cpus() + 3) / 4;
2258 cbflood_task
= kcalloc(ncbflooders
, sizeof(*cbflood_task
),
2260 if (!cbflood_task
) {
2261 VERBOSE_TOROUT_ERRSTRING("out of memory");
2265 for (i
= 0; i
< ncbflooders
; i
++) {
2266 firsterr
= torture_create_kthread(rcu_torture_cbflood
,
2278 rcu_torture_cleanup();
2282 module_init(rcu_torture_init
);
2283 module_exit(rcu_torture_cleanup
);