1 // SPDX-License-Identifier: GPL-2.0+
3 * Module-based torture test facility for locking
5 * Copyright (C) IBM Corporation, 2014
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Davidlohr Bueso <dave@stgolabs.net>
9 * Based on kernel/rcu/torture.c.
12 #define pr_fmt(fmt) fmt
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/rt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/rwsem.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/rtmutex.h>
26 #include <linux/atomic.h>
27 #include <linux/moduleparam.h>
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 #include <linux/torture.h>
31 #include <linux/reboot.h>
33 MODULE_DESCRIPTION("torture test facility for locking");
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
37 torture_param(int, acq_writer_lim
, 0, "Write_acquisition time limit (jiffies).");
38 torture_param(int, call_rcu_chains
, 0, "Self-propagate call_rcu() chains during test (0=disable).");
39 torture_param(int, long_hold
, 100, "Do occasional long hold of lock (ms), 0=disable");
40 torture_param(int, nested_locks
, 0, "Number of nested locks (max = 8)");
41 torture_param(int, nreaders_stress
, -1, "Number of read-locking stress-test threads");
42 torture_param(int, nwriters_stress
, -1, "Number of write-locking stress-test threads");
43 torture_param(int, onoff_holdoff
, 0, "Time after boot before CPU hotplugs (s)");
44 torture_param(int, onoff_interval
, 0, "Time between CPU hotplugs (s), 0=disable");
45 torture_param(int, rt_boost
, 2,
46 "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
47 torture_param(int, rt_boost_factor
, 50, "A factor determining how often rt-boost happens.");
48 torture_param(int, shuffle_interval
, 3, "Number of jiffies between shuffles, 0=disable");
49 torture_param(int, shutdown_secs
, 0, "Shutdown time (j), <= zero to disable.");
50 torture_param(int, stat_interval
, 60, "Number of seconds between stats printk()s");
51 torture_param(int, stutter
, 5, "Number of jiffies to run/halt test, 0=disable");
52 torture_param(int, verbose
, 1, "Enable verbose debugging printk()s");
53 torture_param(int, writer_fifo
, 0, "Run writers at sched_set_fifo() priority");
54 /* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
55 #define MAX_NESTED_LOCKS 8
57 static char *torture_type
= IS_ENABLED(CONFIG_PREEMPT_RT
) ? "raw_spin_lock" : "spin_lock";
58 module_param(torture_type
, charp
, 0444);
59 MODULE_PARM_DESC(torture_type
,
60 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
62 static cpumask_var_t bind_readers
; // Bind the readers to the specified set of CPUs.
63 static cpumask_var_t bind_writers
; // Bind the writers to the specified set of CPUs.
65 // Parse a cpumask kernel parameter. If there are more users later on,
66 // this might need to got to a more central location.
67 static int param_set_cpumask(const char *val
, const struct kernel_param
*kp
)
69 cpumask_var_t
*cm_bind
= kp
->arg
;
73 if (!alloc_cpumask_var(cm_bind
, GFP_KERNEL
)) {
78 ret
= cpulist_parse(val
, *cm_bind
);
83 pr_warn("%s: %s, all CPUs set\n", kp
->name
, s
);
84 cpumask_setall(*cm_bind
);
88 // Output a cpumask kernel parameter.
89 static int param_get_cpumask(char *buffer
, const struct kernel_param
*kp
)
91 cpumask_var_t
*cm_bind
= kp
->arg
;
93 return sprintf(buffer
, "%*pbl", cpumask_pr_args(*cm_bind
));
96 static bool cpumask_nonempty(cpumask_var_t mask
)
98 return cpumask_available(mask
) && !cpumask_empty(mask
);
101 static const struct kernel_param_ops lt_bind_ops
= {
102 .set
= param_set_cpumask
,
103 .get
= param_get_cpumask
,
106 module_param_cb(bind_readers
, <_bind_ops
, &bind_readers
, 0644);
107 module_param_cb(bind_writers
, <_bind_ops
, &bind_writers
, 0644);
109 long torture_sched_setaffinity(pid_t pid
, const struct cpumask
*in_mask
);
111 static struct task_struct
*stats_task
;
112 static struct task_struct
**writer_tasks
;
113 static struct task_struct
**reader_tasks
;
115 static bool lock_is_write_held
;
116 static atomic_t lock_is_read_held
;
117 static unsigned long last_lock_release
;
119 struct lock_stress_stats
{
121 long n_lock_acquired
;
124 struct call_rcu_chain
{
125 struct rcu_head crc_rh
;
128 struct call_rcu_chain
*call_rcu_chain_list
;
130 /* Forward reference. */
131 static void lock_torture_cleanup(void);
134 * Operations vector for selecting different types of tests.
136 struct lock_torture_ops
{
139 int (*nested_lock
)(int tid
, u32 lockset
);
140 int (*writelock
)(int tid
);
141 void (*write_delay
)(struct torture_random_state
*trsp
);
142 void (*task_boost
)(struct torture_random_state
*trsp
);
143 void (*writeunlock
)(int tid
);
144 void (*nested_unlock
)(int tid
, u32 lockset
);
145 int (*readlock
)(int tid
);
146 void (*read_delay
)(struct torture_random_state
*trsp
);
147 void (*readunlock
)(int tid
);
149 unsigned long flags
; /* for irq spinlocks */
153 struct lock_torture_cxt
{
154 int nrealwriters_stress
;
155 int nrealreaders_stress
;
158 atomic_t n_lock_torture_errors
;
159 struct lock_torture_ops
*cur_ops
;
160 struct lock_stress_stats
*lwsa
; /* writer statistics */
161 struct lock_stress_stats
*lrsa
; /* reader statistics */
163 static struct lock_torture_cxt cxt
= { 0, 0, false, false,
167 * Definitions for lock torture testing.
170 static int torture_lock_busted_write_lock(int tid __maybe_unused
)
172 return 0; /* BUGGY, do not use in real life!!! */
175 static void torture_lock_busted_write_delay(struct torture_random_state
*trsp
)
177 /* We want a long delay occasionally to force massive contention. */
178 if (long_hold
&& !(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 2000 * long_hold
)))
180 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
181 torture_preempt_schedule(); /* Allow test to be preempted. */
184 static void torture_lock_busted_write_unlock(int tid __maybe_unused
)
186 /* BUGGY, do not use in real life!!! */
189 static void __torture_rt_boost(struct torture_random_state
*trsp
)
191 const unsigned int factor
= rt_boost_factor
;
193 if (!rt_task(current
)) {
195 * Boost priority once every rt_boost_factor operations. When
196 * the task tries to take the lock, the rtmutex it will account
197 * for the new priority, and do any corresponding pi-dance.
199 if (trsp
&& !(torture_random(trsp
) %
200 (cxt
.nrealwriters_stress
* factor
))) {
201 sched_set_fifo(current
);
202 } else /* common case, do nothing */
206 * The task will remain boosted for another 10 * rt_boost_factor
207 * operations, then restored back to its original prio, and so
210 * When @trsp is nil, we want to force-reset the task for
211 * stopping the kthread.
213 if (!trsp
|| !(torture_random(trsp
) %
214 (cxt
.nrealwriters_stress
* factor
* 2))) {
215 sched_set_normal(current
, 0);
216 } else /* common case, do nothing */
221 static void torture_rt_boost(struct torture_random_state
*trsp
)
226 __torture_rt_boost(trsp
);
229 static struct lock_torture_ops lock_busted_ops
= {
230 .writelock
= torture_lock_busted_write_lock
,
231 .write_delay
= torture_lock_busted_write_delay
,
232 .task_boost
= torture_rt_boost
,
233 .writeunlock
= torture_lock_busted_write_unlock
,
237 .name
= "lock_busted"
240 static DEFINE_SPINLOCK(torture_spinlock
);
242 static int torture_spin_lock_write_lock(int tid __maybe_unused
)
243 __acquires(torture_spinlock
)
245 spin_lock(&torture_spinlock
);
249 static void torture_spin_lock_write_delay(struct torture_random_state
*trsp
)
251 const unsigned long shortdelay_us
= 2;
254 /* We want a short delay mostly to emulate likely code, and
255 * we want a long delay occasionally to force massive contention.
257 if (long_hold
&& !(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 2000 * long_hold
))) {
260 pr_alert("%s: delay = %lu jiffies.\n", __func__
, jiffies
- j
);
262 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 200 * shortdelay_us
)))
263 udelay(shortdelay_us
);
264 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
265 torture_preempt_schedule(); /* Allow test to be preempted. */
268 static void torture_spin_lock_write_unlock(int tid __maybe_unused
)
269 __releases(torture_spinlock
)
271 spin_unlock(&torture_spinlock
);
274 static struct lock_torture_ops spin_lock_ops
= {
275 .writelock
= torture_spin_lock_write_lock
,
276 .write_delay
= torture_spin_lock_write_delay
,
277 .task_boost
= torture_rt_boost
,
278 .writeunlock
= torture_spin_lock_write_unlock
,
285 static int torture_spin_lock_write_lock_irq(int tid __maybe_unused
)
286 __acquires(torture_spinlock
)
290 spin_lock_irqsave(&torture_spinlock
, flags
);
291 cxt
.cur_ops
->flags
= flags
;
295 static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused
)
296 __releases(torture_spinlock
)
298 spin_unlock_irqrestore(&torture_spinlock
, cxt
.cur_ops
->flags
);
301 static struct lock_torture_ops spin_lock_irq_ops
= {
302 .writelock
= torture_spin_lock_write_lock_irq
,
303 .write_delay
= torture_spin_lock_write_delay
,
304 .task_boost
= torture_rt_boost
,
305 .writeunlock
= torture_lock_spin_write_unlock_irq
,
309 .name
= "spin_lock_irq"
312 static DEFINE_RAW_SPINLOCK(torture_raw_spinlock
);
314 static int torture_raw_spin_lock_write_lock(int tid __maybe_unused
)
315 __acquires(torture_raw_spinlock
)
317 raw_spin_lock(&torture_raw_spinlock
);
321 static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused
)
322 __releases(torture_raw_spinlock
)
324 raw_spin_unlock(&torture_raw_spinlock
);
327 static struct lock_torture_ops raw_spin_lock_ops
= {
328 .writelock
= torture_raw_spin_lock_write_lock
,
329 .write_delay
= torture_spin_lock_write_delay
,
330 .task_boost
= torture_rt_boost
,
331 .writeunlock
= torture_raw_spin_lock_write_unlock
,
335 .name
= "raw_spin_lock"
338 static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused
)
339 __acquires(torture_raw_spinlock
)
343 raw_spin_lock_irqsave(&torture_raw_spinlock
, flags
);
344 cxt
.cur_ops
->flags
= flags
;
348 static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused
)
349 __releases(torture_raw_spinlock
)
351 raw_spin_unlock_irqrestore(&torture_raw_spinlock
, cxt
.cur_ops
->flags
);
354 static struct lock_torture_ops raw_spin_lock_irq_ops
= {
355 .writelock
= torture_raw_spin_lock_write_lock_irq
,
356 .write_delay
= torture_spin_lock_write_delay
,
357 .task_boost
= torture_rt_boost
,
358 .writeunlock
= torture_raw_spin_lock_write_unlock_irq
,
362 .name
= "raw_spin_lock_irq"
365 static DEFINE_RWLOCK(torture_rwlock
);
367 static int torture_rwlock_write_lock(int tid __maybe_unused
)
368 __acquires(torture_rwlock
)
370 write_lock(&torture_rwlock
);
374 static void torture_rwlock_write_delay(struct torture_random_state
*trsp
)
376 const unsigned long shortdelay_us
= 2;
378 /* We want a short delay mostly to emulate likely code, and
379 * we want a long delay occasionally to force massive contention.
381 if (long_hold
&& !(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 2000 * long_hold
)))
384 udelay(shortdelay_us
);
387 static void torture_rwlock_write_unlock(int tid __maybe_unused
)
388 __releases(torture_rwlock
)
390 write_unlock(&torture_rwlock
);
393 static int torture_rwlock_read_lock(int tid __maybe_unused
)
394 __acquires(torture_rwlock
)
396 read_lock(&torture_rwlock
);
400 static void torture_rwlock_read_delay(struct torture_random_state
*trsp
)
402 const unsigned long shortdelay_us
= 10;
404 /* We want a short delay mostly to emulate likely code, and
405 * we want a long delay occasionally to force massive contention.
407 if (long_hold
&& !(torture_random(trsp
) % (cxt
.nrealreaders_stress
* 2000 * long_hold
)))
410 udelay(shortdelay_us
);
413 static void torture_rwlock_read_unlock(int tid __maybe_unused
)
414 __releases(torture_rwlock
)
416 read_unlock(&torture_rwlock
);
419 static struct lock_torture_ops rw_lock_ops
= {
420 .writelock
= torture_rwlock_write_lock
,
421 .write_delay
= torture_rwlock_write_delay
,
422 .task_boost
= torture_rt_boost
,
423 .writeunlock
= torture_rwlock_write_unlock
,
424 .readlock
= torture_rwlock_read_lock
,
425 .read_delay
= torture_rwlock_read_delay
,
426 .readunlock
= torture_rwlock_read_unlock
,
430 static int torture_rwlock_write_lock_irq(int tid __maybe_unused
)
431 __acquires(torture_rwlock
)
435 write_lock_irqsave(&torture_rwlock
, flags
);
436 cxt
.cur_ops
->flags
= flags
;
440 static void torture_rwlock_write_unlock_irq(int tid __maybe_unused
)
441 __releases(torture_rwlock
)
443 write_unlock_irqrestore(&torture_rwlock
, cxt
.cur_ops
->flags
);
446 static int torture_rwlock_read_lock_irq(int tid __maybe_unused
)
447 __acquires(torture_rwlock
)
451 read_lock_irqsave(&torture_rwlock
, flags
);
452 cxt
.cur_ops
->flags
= flags
;
456 static void torture_rwlock_read_unlock_irq(int tid __maybe_unused
)
457 __releases(torture_rwlock
)
459 read_unlock_irqrestore(&torture_rwlock
, cxt
.cur_ops
->flags
);
462 static struct lock_torture_ops rw_lock_irq_ops
= {
463 .writelock
= torture_rwlock_write_lock_irq
,
464 .write_delay
= torture_rwlock_write_delay
,
465 .task_boost
= torture_rt_boost
,
466 .writeunlock
= torture_rwlock_write_unlock_irq
,
467 .readlock
= torture_rwlock_read_lock_irq
,
468 .read_delay
= torture_rwlock_read_delay
,
469 .readunlock
= torture_rwlock_read_unlock_irq
,
470 .name
= "rw_lock_irq"
473 static DEFINE_MUTEX(torture_mutex
);
474 static struct mutex torture_nested_mutexes
[MAX_NESTED_LOCKS
];
475 static struct lock_class_key nested_mutex_keys
[MAX_NESTED_LOCKS
];
477 static void torture_mutex_init(void)
481 for (i
= 0; i
< MAX_NESTED_LOCKS
; i
++)
482 __mutex_init(&torture_nested_mutexes
[i
], __func__
,
483 &nested_mutex_keys
[i
]);
486 static int torture_mutex_nested_lock(int tid __maybe_unused
,
491 for (i
= 0; i
< nested_locks
; i
++)
492 if (lockset
& (1 << i
))
493 mutex_lock(&torture_nested_mutexes
[i
]);
497 static int torture_mutex_lock(int tid __maybe_unused
)
498 __acquires(torture_mutex
)
500 mutex_lock(&torture_mutex
);
504 static void torture_mutex_delay(struct torture_random_state
*trsp
)
506 /* We want a long delay occasionally to force massive contention. */
507 if (long_hold
&& !(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 2000 * long_hold
)))
508 mdelay(long_hold
* 5);
509 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
510 torture_preempt_schedule(); /* Allow test to be preempted. */
513 static void torture_mutex_unlock(int tid __maybe_unused
)
514 __releases(torture_mutex
)
516 mutex_unlock(&torture_mutex
);
519 static void torture_mutex_nested_unlock(int tid __maybe_unused
,
524 for (i
= nested_locks
- 1; i
>= 0; i
--)
525 if (lockset
& (1 << i
))
526 mutex_unlock(&torture_nested_mutexes
[i
]);
529 static struct lock_torture_ops mutex_lock_ops
= {
530 .init
= torture_mutex_init
,
531 .nested_lock
= torture_mutex_nested_lock
,
532 .writelock
= torture_mutex_lock
,
533 .write_delay
= torture_mutex_delay
,
534 .task_boost
= torture_rt_boost
,
535 .writeunlock
= torture_mutex_unlock
,
536 .nested_unlock
= torture_mutex_nested_unlock
,
543 #include <linux/ww_mutex.h>
545 * The torture ww_mutexes should belong to the same lock class as
546 * torture_ww_class to avoid lockdep problem. The ww_mutex_init()
547 * function is called for initialization to ensure that.
549 static DEFINE_WD_CLASS(torture_ww_class
);
550 static struct ww_mutex torture_ww_mutex_0
, torture_ww_mutex_1
, torture_ww_mutex_2
;
551 static struct ww_acquire_ctx
*ww_acquire_ctxs
;
553 static void torture_ww_mutex_init(void)
555 ww_mutex_init(&torture_ww_mutex_0
, &torture_ww_class
);
556 ww_mutex_init(&torture_ww_mutex_1
, &torture_ww_class
);
557 ww_mutex_init(&torture_ww_mutex_2
, &torture_ww_class
);
559 ww_acquire_ctxs
= kmalloc_array(cxt
.nrealwriters_stress
,
560 sizeof(*ww_acquire_ctxs
),
562 if (!ww_acquire_ctxs
)
563 VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
566 static void torture_ww_mutex_exit(void)
568 kfree(ww_acquire_ctxs
);
571 static int torture_ww_mutex_lock(int tid
)
572 __acquires(torture_ww_mutex_0
)
573 __acquires(torture_ww_mutex_1
)
574 __acquires(torture_ww_mutex_2
)
577 struct reorder_lock
{
578 struct list_head link
;
579 struct ww_mutex
*lock
;
580 } locks
[3], *ll
, *ln
;
581 struct ww_acquire_ctx
*ctx
= &ww_acquire_ctxs
[tid
];
583 locks
[0].lock
= &torture_ww_mutex_0
;
584 list_add(&locks
[0].link
, &list
);
586 locks
[1].lock
= &torture_ww_mutex_1
;
587 list_add(&locks
[1].link
, &list
);
589 locks
[2].lock
= &torture_ww_mutex_2
;
590 list_add(&locks
[2].link
, &list
);
592 ww_acquire_init(ctx
, &torture_ww_class
);
594 list_for_each_entry(ll
, &list
, link
) {
597 err
= ww_mutex_lock(ll
->lock
, ctx
);
602 list_for_each_entry_continue_reverse(ln
, &list
, link
)
603 ww_mutex_unlock(ln
->lock
);
608 ww_mutex_lock_slow(ll
->lock
, ctx
);
609 list_move(&ll
->link
, &list
);
615 static void torture_ww_mutex_unlock(int tid
)
616 __releases(torture_ww_mutex_0
)
617 __releases(torture_ww_mutex_1
)
618 __releases(torture_ww_mutex_2
)
620 struct ww_acquire_ctx
*ctx
= &ww_acquire_ctxs
[tid
];
622 ww_mutex_unlock(&torture_ww_mutex_0
);
623 ww_mutex_unlock(&torture_ww_mutex_1
);
624 ww_mutex_unlock(&torture_ww_mutex_2
);
625 ww_acquire_fini(ctx
);
628 static struct lock_torture_ops ww_mutex_lock_ops
= {
629 .init
= torture_ww_mutex_init
,
630 .exit
= torture_ww_mutex_exit
,
631 .writelock
= torture_ww_mutex_lock
,
632 .write_delay
= torture_mutex_delay
,
633 .task_boost
= torture_rt_boost
,
634 .writeunlock
= torture_ww_mutex_unlock
,
638 .name
= "ww_mutex_lock"
641 #ifdef CONFIG_RT_MUTEXES
642 static DEFINE_RT_MUTEX(torture_rtmutex
);
643 static struct rt_mutex torture_nested_rtmutexes
[MAX_NESTED_LOCKS
];
644 static struct lock_class_key nested_rtmutex_keys
[MAX_NESTED_LOCKS
];
646 static void torture_rtmutex_init(void)
650 for (i
= 0; i
< MAX_NESTED_LOCKS
; i
++)
651 __rt_mutex_init(&torture_nested_rtmutexes
[i
], __func__
,
652 &nested_rtmutex_keys
[i
]);
655 static int torture_rtmutex_nested_lock(int tid __maybe_unused
,
660 for (i
= 0; i
< nested_locks
; i
++)
661 if (lockset
& (1 << i
))
662 rt_mutex_lock(&torture_nested_rtmutexes
[i
]);
666 static int torture_rtmutex_lock(int tid __maybe_unused
)
667 __acquires(torture_rtmutex
)
669 rt_mutex_lock(&torture_rtmutex
);
673 static void torture_rtmutex_delay(struct torture_random_state
*trsp
)
675 const unsigned long shortdelay_us
= 2;
678 * We want a short delay mostly to emulate likely code, and
679 * we want a long delay occasionally to force massive contention.
681 if (long_hold
&& !(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 2000 * long_hold
)))
683 if (!(torture_random(trsp
) %
684 (cxt
.nrealwriters_stress
* 200 * shortdelay_us
)))
685 udelay(shortdelay_us
);
686 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
687 torture_preempt_schedule(); /* Allow test to be preempted. */
690 static void torture_rtmutex_unlock(int tid __maybe_unused
)
691 __releases(torture_rtmutex
)
693 rt_mutex_unlock(&torture_rtmutex
);
696 static void torture_rt_boost_rtmutex(struct torture_random_state
*trsp
)
701 __torture_rt_boost(trsp
);
704 static void torture_rtmutex_nested_unlock(int tid __maybe_unused
,
709 for (i
= nested_locks
- 1; i
>= 0; i
--)
710 if (lockset
& (1 << i
))
711 rt_mutex_unlock(&torture_nested_rtmutexes
[i
]);
714 static struct lock_torture_ops rtmutex_lock_ops
= {
715 .init
= torture_rtmutex_init
,
716 .nested_lock
= torture_rtmutex_nested_lock
,
717 .writelock
= torture_rtmutex_lock
,
718 .write_delay
= torture_rtmutex_delay
,
719 .task_boost
= torture_rt_boost_rtmutex
,
720 .writeunlock
= torture_rtmutex_unlock
,
721 .nested_unlock
= torture_rtmutex_nested_unlock
,
725 .name
= "rtmutex_lock"
729 static DECLARE_RWSEM(torture_rwsem
);
730 static int torture_rwsem_down_write(int tid __maybe_unused
)
731 __acquires(torture_rwsem
)
733 down_write(&torture_rwsem
);
737 static void torture_rwsem_write_delay(struct torture_random_state
*trsp
)
739 /* We want a long delay occasionally to force massive contention. */
740 if (long_hold
&& !(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 2000 * long_hold
)))
741 mdelay(long_hold
* 10);
742 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
743 torture_preempt_schedule(); /* Allow test to be preempted. */
746 static void torture_rwsem_up_write(int tid __maybe_unused
)
747 __releases(torture_rwsem
)
749 up_write(&torture_rwsem
);
752 static int torture_rwsem_down_read(int tid __maybe_unused
)
753 __acquires(torture_rwsem
)
755 down_read(&torture_rwsem
);
759 static void torture_rwsem_read_delay(struct torture_random_state
*trsp
)
761 /* We want a long delay occasionally to force massive contention. */
762 if (long_hold
&& !(torture_random(trsp
) % (cxt
.nrealreaders_stress
* 2000 * long_hold
)))
763 mdelay(long_hold
* 2);
765 mdelay(long_hold
/ 2);
766 if (!(torture_random(trsp
) % (cxt
.nrealreaders_stress
* 20000)))
767 torture_preempt_schedule(); /* Allow test to be preempted. */
770 static void torture_rwsem_up_read(int tid __maybe_unused
)
771 __releases(torture_rwsem
)
773 up_read(&torture_rwsem
);
776 static struct lock_torture_ops rwsem_lock_ops
= {
777 .writelock
= torture_rwsem_down_write
,
778 .write_delay
= torture_rwsem_write_delay
,
779 .task_boost
= torture_rt_boost
,
780 .writeunlock
= torture_rwsem_up_write
,
781 .readlock
= torture_rwsem_down_read
,
782 .read_delay
= torture_rwsem_read_delay
,
783 .readunlock
= torture_rwsem_up_read
,
787 #include <linux/percpu-rwsem.h>
788 static struct percpu_rw_semaphore pcpu_rwsem
;
790 static void torture_percpu_rwsem_init(void)
792 BUG_ON(percpu_init_rwsem(&pcpu_rwsem
));
795 static void torture_percpu_rwsem_exit(void)
797 percpu_free_rwsem(&pcpu_rwsem
);
800 static int torture_percpu_rwsem_down_write(int tid __maybe_unused
)
801 __acquires(pcpu_rwsem
)
803 percpu_down_write(&pcpu_rwsem
);
807 static void torture_percpu_rwsem_up_write(int tid __maybe_unused
)
808 __releases(pcpu_rwsem
)
810 percpu_up_write(&pcpu_rwsem
);
813 static int torture_percpu_rwsem_down_read(int tid __maybe_unused
)
814 __acquires(pcpu_rwsem
)
816 percpu_down_read(&pcpu_rwsem
);
820 static void torture_percpu_rwsem_up_read(int tid __maybe_unused
)
821 __releases(pcpu_rwsem
)
823 percpu_up_read(&pcpu_rwsem
);
826 static struct lock_torture_ops percpu_rwsem_lock_ops
= {
827 .init
= torture_percpu_rwsem_init
,
828 .exit
= torture_percpu_rwsem_exit
,
829 .writelock
= torture_percpu_rwsem_down_write
,
830 .write_delay
= torture_rwsem_write_delay
,
831 .task_boost
= torture_rt_boost
,
832 .writeunlock
= torture_percpu_rwsem_up_write
,
833 .readlock
= torture_percpu_rwsem_down_read
,
834 .read_delay
= torture_rwsem_read_delay
,
835 .readunlock
= torture_percpu_rwsem_up_read
,
836 .name
= "percpu_rwsem_lock"
840 * Lock torture writer kthread. Repeatedly acquires and releases
841 * the lock, checking for duplicate acquisitions.
843 static int lock_torture_writer(void *arg
)
848 struct lock_stress_stats
*lwsp
= arg
;
849 DEFINE_TORTURE_RANDOM(rand
);
851 int tid
= lwsp
- cxt
.lwsa
;
853 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
854 if (!rt_task(current
))
855 set_user_nice(current
, MAX_NICE
);
858 if ((torture_random(&rand
) & 0xfffff) == 0)
859 schedule_timeout_uninterruptible(1);
861 lockset_mask
= torture_random(&rand
);
863 * When using nested_locks, we want to occasionally
864 * skip the main lock so we can avoid always serializing
865 * the lock chains on that central lock. By skipping the
866 * main lock occasionally, we can create different
867 * contention patterns (allowing for multiple disjoint
870 skip_main_lock
= (nested_locks
&&
871 !(torture_random(&rand
) % 100));
873 cxt
.cur_ops
->task_boost(&rand
);
874 if (cxt
.cur_ops
->nested_lock
)
875 cxt
.cur_ops
->nested_lock(tid
, lockset_mask
);
877 if (!skip_main_lock
) {
878 if (acq_writer_lim
> 0)
880 cxt
.cur_ops
->writelock(tid
);
881 if (WARN_ON_ONCE(lock_is_write_held
))
883 lock_is_write_held
= true;
884 if (WARN_ON_ONCE(atomic_read(&lock_is_read_held
)))
885 lwsp
->n_lock_fail
++; /* rare, but... */
886 if (acq_writer_lim
> 0) {
888 WARN_ONCE(time_after(j1
, j
+ acq_writer_lim
),
889 "%s: Lock acquisition took %lu jiffies.\n",
892 lwsp
->n_lock_acquired
++;
894 cxt
.cur_ops
->write_delay(&rand
);
896 lock_is_write_held
= false;
897 WRITE_ONCE(last_lock_release
, jiffies
);
898 cxt
.cur_ops
->writeunlock(tid
);
900 if (cxt
.cur_ops
->nested_unlock
)
901 cxt
.cur_ops
->nested_unlock(tid
, lockset_mask
);
903 stutter_wait("lock_torture_writer");
904 } while (!torture_must_stop());
906 cxt
.cur_ops
->task_boost(NULL
); /* reset prio */
907 torture_kthread_stopping("lock_torture_writer");
912 * Lock torture reader kthread. Repeatedly acquires and releases
915 static int lock_torture_reader(void *arg
)
917 struct lock_stress_stats
*lrsp
= arg
;
918 int tid
= lrsp
- cxt
.lrsa
;
919 DEFINE_TORTURE_RANDOM(rand
);
921 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
922 set_user_nice(current
, MAX_NICE
);
925 if ((torture_random(&rand
) & 0xfffff) == 0)
926 schedule_timeout_uninterruptible(1);
928 cxt
.cur_ops
->readlock(tid
);
929 atomic_inc(&lock_is_read_held
);
930 if (WARN_ON_ONCE(lock_is_write_held
))
931 lrsp
->n_lock_fail
++; /* rare, but... */
933 lrsp
->n_lock_acquired
++;
934 cxt
.cur_ops
->read_delay(&rand
);
935 atomic_dec(&lock_is_read_held
);
936 cxt
.cur_ops
->readunlock(tid
);
938 stutter_wait("lock_torture_reader");
939 } while (!torture_must_stop());
940 torture_kthread_stopping("lock_torture_reader");
945 * Create an lock-torture-statistics message in the specified buffer.
947 static void __torture_print_stats(char *page
,
948 struct lock_stress_stats
*statp
, bool write
)
953 long max
= 0, min
= statp
? data_race(statp
[0].n_lock_acquired
) : 0;
956 n_stress
= write
? cxt
.nrealwriters_stress
: cxt
.nrealreaders_stress
;
957 for (i
= 0; i
< n_stress
; i
++) {
958 if (data_race(statp
[i
].n_lock_fail
))
960 cur
= data_race(statp
[i
].n_lock_acquired
);
967 page
+= sprintf(page
,
968 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
969 write
? "Writes" : "Reads ",
971 !onoff_interval
&& max
/ 2 > min
? "???" : "",
972 fail
, fail
? "!!!" : "");
974 atomic_inc(&cxt
.n_lock_torture_errors
);
978 * Print torture statistics. Caller must ensure that there is only one
979 * call to this function at a given time!!! This is normally accomplished
980 * by relying on the module system to only have one copy of the module
981 * loaded, and then by giving the lock_torture_stats kthread full control
982 * (or the init/cleanup functions when lock_torture_stats thread is not
985 static void lock_torture_stats_print(void)
987 int size
= cxt
.nrealwriters_stress
* 200 + 8192;
990 if (cxt
.cur_ops
->readlock
)
991 size
+= cxt
.nrealreaders_stress
* 200 + 8192;
993 buf
= kmalloc(size
, GFP_KERNEL
);
995 pr_err("lock_torture_stats_print: Out of memory, need: %d",
1000 __torture_print_stats(buf
, cxt
.lwsa
, true);
1001 pr_alert("%s", buf
);
1004 if (cxt
.cur_ops
->readlock
) {
1005 buf
= kmalloc(size
, GFP_KERNEL
);
1007 pr_err("lock_torture_stats_print: Out of memory, need: %d",
1012 __torture_print_stats(buf
, cxt
.lrsa
, false);
1013 pr_alert("%s", buf
);
1019 * Periodically prints torture statistics, if periodic statistics printing
1020 * was specified via the stat_interval module parameter.
1022 * No need to worry about fullstop here, since this one doesn't reference
1023 * volatile state or register callbacks.
1025 static int lock_torture_stats(void *arg
)
1027 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
1029 schedule_timeout_interruptible(stat_interval
* HZ
);
1030 lock_torture_stats_print();
1031 torture_shutdown_absorb("lock_torture_stats");
1032 } while (!torture_must_stop());
1033 torture_kthread_stopping("lock_torture_stats");
1039 lock_torture_print_module_parms(struct lock_torture_ops
*cur_ops
,
1042 static cpumask_t cpumask_all
;
1043 cpumask_t
*rcmp
= cpumask_nonempty(bind_readers
) ? bind_readers
: &cpumask_all
;
1044 cpumask_t
*wcmp
= cpumask_nonempty(bind_writers
) ? bind_writers
: &cpumask_all
;
1046 cpumask_setall(&cpumask_all
);
1047 pr_alert("%s" TORTURE_FLAG
1048 "--- %s%s: acq_writer_lim=%d bind_readers=%*pbl bind_writers=%*pbl call_rcu_chains=%d long_hold=%d nested_locks=%d nreaders_stress=%d nwriters_stress=%d onoff_holdoff=%d onoff_interval=%d rt_boost=%d rt_boost_factor=%d shuffle_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d verbose=%d writer_fifo=%d\n",
1049 torture_type
, tag
, cxt
.debug_lock
? " [debug]": "",
1050 acq_writer_lim
, cpumask_pr_args(rcmp
), cpumask_pr_args(wcmp
),
1051 call_rcu_chains
, long_hold
, nested_locks
, cxt
.nrealreaders_stress
,
1052 cxt
.nrealwriters_stress
, onoff_holdoff
, onoff_interval
, rt_boost
,
1053 rt_boost_factor
, shuffle_interval
, shutdown_secs
, stat_interval
, stutter
,
1054 verbose
, writer_fifo
);
1057 // If requested, maintain call_rcu() chains to keep a grace period always
1058 // in flight. These increase the probability of getting an RCU CPU stall
1059 // warning and associated diagnostics when a locking primitive stalls.
1061 static void call_rcu_chain_cb(struct rcu_head
*rhp
)
1063 struct call_rcu_chain
*crcp
= container_of(rhp
, struct call_rcu_chain
, crc_rh
);
1065 if (!smp_load_acquire(&crcp
->crc_stop
)) {
1066 (void)start_poll_synchronize_rcu(); // Start one grace period...
1067 call_rcu(&crcp
->crc_rh
, call_rcu_chain_cb
); // ... and later start another.
1071 // Start the requested number of call_rcu() chains.
1072 static int call_rcu_chain_init(void)
1076 if (call_rcu_chains
<= 0)
1078 call_rcu_chain_list
= kcalloc(call_rcu_chains
, sizeof(*call_rcu_chain_list
), GFP_KERNEL
);
1079 if (!call_rcu_chain_list
)
1081 for (i
= 0; i
< call_rcu_chains
; i
++) {
1082 call_rcu_chain_list
[i
].crc_stop
= false;
1083 call_rcu(&call_rcu_chain_list
[i
].crc_rh
, call_rcu_chain_cb
);
1088 // Stop all of the call_rcu() chains.
1089 static void call_rcu_chain_cleanup(void)
1093 if (!call_rcu_chain_list
)
1095 for (i
= 0; i
< call_rcu_chains
; i
++)
1096 smp_store_release(&call_rcu_chain_list
[i
].crc_stop
, true);
1098 kfree(call_rcu_chain_list
);
1099 call_rcu_chain_list
= NULL
;
1102 static void lock_torture_cleanup(void)
1106 if (torture_cleanup_begin())
1110 * Indicates early cleanup, meaning that the test has not run,
1111 * such as when passing bogus args when loading the module.
1112 * However cxt->cur_ops.init() may have been invoked, so beside
1113 * perform the underlying torture-specific cleanups, cur_ops.exit()
1114 * will be invoked if needed.
1116 if (!cxt
.lwsa
&& !cxt
.lrsa
)
1120 for (i
= 0; i
< cxt
.nrealwriters_stress
; i
++)
1121 torture_stop_kthread(lock_torture_writer
, writer_tasks
[i
]);
1122 kfree(writer_tasks
);
1123 writer_tasks
= NULL
;
1127 for (i
= 0; i
< cxt
.nrealreaders_stress
; i
++)
1128 torture_stop_kthread(lock_torture_reader
,
1130 kfree(reader_tasks
);
1131 reader_tasks
= NULL
;
1134 torture_stop_kthread(lock_torture_stats
, stats_task
);
1135 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
1137 if (atomic_read(&cxt
.n_lock_torture_errors
))
1138 lock_torture_print_module_parms(cxt
.cur_ops
,
1139 "End of test: FAILURE");
1140 else if (torture_onoff_failures())
1141 lock_torture_print_module_parms(cxt
.cur_ops
,
1142 "End of test: LOCK_HOTPLUG");
1144 lock_torture_print_module_parms(cxt
.cur_ops
,
1145 "End of test: SUCCESS");
1152 call_rcu_chain_cleanup();
1155 if (cxt
.init_called
) {
1156 if (cxt
.cur_ops
->exit
)
1157 cxt
.cur_ops
->exit();
1158 cxt
.init_called
= false;
1160 torture_cleanup_end();
1163 static int __init
lock_torture_init(void)
1167 static struct lock_torture_ops
*torture_ops
[] = {
1169 &spin_lock_ops
, &spin_lock_irq_ops
,
1170 &raw_spin_lock_ops
, &raw_spin_lock_irq_ops
,
1171 &rw_lock_ops
, &rw_lock_irq_ops
,
1174 #ifdef CONFIG_RT_MUTEXES
1178 &percpu_rwsem_lock_ops
,
1181 if (!torture_init_begin(torture_type
, verbose
))
1184 /* Process args and tell the world that the torturer is on the job. */
1185 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++) {
1186 cxt
.cur_ops
= torture_ops
[i
];
1187 if (strcmp(torture_type
, cxt
.cur_ops
->name
) == 0)
1190 if (i
== ARRAY_SIZE(torture_ops
)) {
1191 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
1193 pr_alert("lock-torture types:");
1194 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++)
1195 pr_alert(" %s", torture_ops
[i
]->name
);
1201 if (nwriters_stress
== 0 &&
1202 (!cxt
.cur_ops
->readlock
|| nreaders_stress
== 0)) {
1203 pr_alert("lock-torture: must run at least one locking thread\n");
1208 if (nwriters_stress
>= 0)
1209 cxt
.nrealwriters_stress
= nwriters_stress
;
1211 cxt
.nrealwriters_stress
= 2 * num_online_cpus();
1213 if (cxt
.cur_ops
->init
) {
1214 cxt
.cur_ops
->init();
1215 cxt
.init_called
= true;
1218 #ifdef CONFIG_DEBUG_MUTEXES
1219 if (str_has_prefix(torture_type
, "mutex"))
1220 cxt
.debug_lock
= true;
1222 #ifdef CONFIG_DEBUG_RT_MUTEXES
1223 if (str_has_prefix(torture_type
, "rtmutex"))
1224 cxt
.debug_lock
= true;
1226 #ifdef CONFIG_DEBUG_SPINLOCK
1227 if ((str_has_prefix(torture_type
, "spin")) ||
1228 (str_has_prefix(torture_type
, "rw_lock")))
1229 cxt
.debug_lock
= true;
1232 /* Initialize the statistics so that each run gets its own numbers. */
1233 if (nwriters_stress
) {
1234 lock_is_write_held
= false;
1235 cxt
.lwsa
= kmalloc_array(cxt
.nrealwriters_stress
,
1238 if (cxt
.lwsa
== NULL
) {
1239 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
1244 for (i
= 0; i
< cxt
.nrealwriters_stress
; i
++) {
1245 cxt
.lwsa
[i
].n_lock_fail
= 0;
1246 cxt
.lwsa
[i
].n_lock_acquired
= 0;
1250 if (cxt
.cur_ops
->readlock
) {
1251 if (nreaders_stress
>= 0)
1252 cxt
.nrealreaders_stress
= nreaders_stress
;
1255 * By default distribute evenly the number of
1256 * readers and writers. We still run the same number
1257 * of threads as the writer-only locks default.
1259 if (nwriters_stress
< 0) /* user doesn't care */
1260 cxt
.nrealwriters_stress
= num_online_cpus();
1261 cxt
.nrealreaders_stress
= cxt
.nrealwriters_stress
;
1264 if (nreaders_stress
) {
1265 cxt
.lrsa
= kmalloc_array(cxt
.nrealreaders_stress
,
1268 if (cxt
.lrsa
== NULL
) {
1269 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
1276 for (i
= 0; i
< cxt
.nrealreaders_stress
; i
++) {
1277 cxt
.lrsa
[i
].n_lock_fail
= 0;
1278 cxt
.lrsa
[i
].n_lock_acquired
= 0;
1283 firsterr
= call_rcu_chain_init();
1284 if (torture_init_error(firsterr
))
1287 lock_torture_print_module_parms(cxt
.cur_ops
, "Start of test");
1289 /* Prepare torture context. */
1290 if (onoff_interval
> 0) {
1291 firsterr
= torture_onoff_init(onoff_holdoff
* HZ
,
1292 onoff_interval
* HZ
, NULL
);
1293 if (torture_init_error(firsterr
))
1296 if (shuffle_interval
> 0) {
1297 firsterr
= torture_shuffle_init(shuffle_interval
);
1298 if (torture_init_error(firsterr
))
1301 if (shutdown_secs
> 0) {
1302 firsterr
= torture_shutdown_init(shutdown_secs
,
1303 lock_torture_cleanup
);
1304 if (torture_init_error(firsterr
))
1308 firsterr
= torture_stutter_init(stutter
, stutter
);
1309 if (torture_init_error(firsterr
))
1313 if (nwriters_stress
) {
1314 writer_tasks
= kcalloc(cxt
.nrealwriters_stress
,
1315 sizeof(writer_tasks
[0]),
1317 if (writer_tasks
== NULL
) {
1318 TOROUT_ERRSTRING("writer_tasks: Out of memory");
1324 /* cap nested_locks to MAX_NESTED_LOCKS */
1325 if (nested_locks
> MAX_NESTED_LOCKS
)
1326 nested_locks
= MAX_NESTED_LOCKS
;
1328 if (cxt
.cur_ops
->readlock
) {
1329 reader_tasks
= kcalloc(cxt
.nrealreaders_stress
,
1330 sizeof(reader_tasks
[0]),
1332 if (reader_tasks
== NULL
) {
1333 TOROUT_ERRSTRING("reader_tasks: Out of memory");
1334 kfree(writer_tasks
);
1335 writer_tasks
= NULL
;
1342 * Create the kthreads and start torturing (oh, those poor little locks).
1344 * TODO: Note that we interleave writers with readers, giving writers a
1345 * slight advantage, by creating its kthread first. This can be modified
1346 * for very specific needs, or even let the user choose the policy, if
1349 for (i
= 0, j
= 0; i
< cxt
.nrealwriters_stress
||
1350 j
< cxt
.nrealreaders_stress
; i
++, j
++) {
1351 if (i
>= cxt
.nrealwriters_stress
)
1354 /* Create writer. */
1355 firsterr
= torture_create_kthread_cb(lock_torture_writer
, &cxt
.lwsa
[i
],
1357 writer_fifo
? sched_set_fifo
: NULL
);
1358 if (torture_init_error(firsterr
))
1360 if (cpumask_nonempty(bind_writers
))
1361 torture_sched_setaffinity(writer_tasks
[i
]->pid
, bind_writers
);
1364 if (cxt
.cur_ops
->readlock
== NULL
|| (j
>= cxt
.nrealreaders_stress
))
1366 /* Create reader. */
1367 firsterr
= torture_create_kthread(lock_torture_reader
, &cxt
.lrsa
[j
],
1369 if (torture_init_error(firsterr
))
1371 if (cpumask_nonempty(bind_readers
))
1372 torture_sched_setaffinity(reader_tasks
[j
]->pid
, bind_readers
);
1374 if (stat_interval
> 0) {
1375 firsterr
= torture_create_kthread(lock_torture_stats
, NULL
,
1377 if (torture_init_error(firsterr
))
1385 lock_torture_cleanup();
1386 if (shutdown_secs
) {
1387 WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST
));
1393 module_init(lock_torture_init
);
1394 module_exit(lock_torture_cleanup
);