1 // SPDX-License-Identifier: GPL-2.0+
3 * Module-based torture test facility for locking
5 * Copyright (C) IBM Corporation, 2014
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Davidlohr Bueso <dave@stgolabs.net>
9 * Based on kernel/rcu/torture.c.
12 #define pr_fmt(fmt) fmt
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/rt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/rwsem.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/rtmutex.h>
26 #include <linux/atomic.h>
27 #include <linux/moduleparam.h>
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 #include <linux/percpu-rwsem.h>
31 #include <linux/torture.h>
32 #include <linux/reboot.h>
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
37 torture_param(int, nwriters_stress
, -1,
38 "Number of write-locking stress-test threads");
39 torture_param(int, nreaders_stress
, -1,
40 "Number of read-locking stress-test threads");
41 torture_param(int, onoff_holdoff
, 0, "Time after boot before CPU hotplugs (s)");
42 torture_param(int, onoff_interval
, 0,
43 "Time between CPU hotplugs (s), 0=disable");
44 torture_param(int, shuffle_interval
, 3,
45 "Number of jiffies between shuffles, 0=disable");
46 torture_param(int, shutdown_secs
, 0, "Shutdown time (j), <= zero to disable.");
47 torture_param(int, stat_interval
, 60,
48 "Number of seconds between stats printk()s");
49 torture_param(int, stutter
, 5, "Number of jiffies to run/halt test, 0=disable");
50 torture_param(int, verbose
, 1,
51 "Enable verbose debugging printk()s");
53 static char *torture_type
= "spin_lock";
54 module_param(torture_type
, charp
, 0444);
55 MODULE_PARM_DESC(torture_type
,
56 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
58 static struct task_struct
*stats_task
;
59 static struct task_struct
**writer_tasks
;
60 static struct task_struct
**reader_tasks
;
62 static bool lock_is_write_held
;
63 static bool lock_is_read_held
;
64 static unsigned long last_lock_release
;
66 struct lock_stress_stats
{
71 /* Forward reference. */
72 static void lock_torture_cleanup(void);
75 * Operations vector for selecting different types of tests.
77 struct lock_torture_ops
{
80 int (*writelock
)(void);
81 void (*write_delay
)(struct torture_random_state
*trsp
);
82 void (*task_boost
)(struct torture_random_state
*trsp
);
83 void (*writeunlock
)(void);
84 int (*readlock
)(void);
85 void (*read_delay
)(struct torture_random_state
*trsp
);
86 void (*readunlock
)(void);
88 unsigned long flags
; /* for irq spinlocks */
92 struct lock_torture_cxt
{
93 int nrealwriters_stress
;
94 int nrealreaders_stress
;
97 atomic_t n_lock_torture_errors
;
98 struct lock_torture_ops
*cur_ops
;
99 struct lock_stress_stats
*lwsa
; /* writer statistics */
100 struct lock_stress_stats
*lrsa
; /* reader statistics */
102 static struct lock_torture_cxt cxt
= { 0, 0, false, false,
106 * Definitions for lock torture testing.
109 static int torture_lock_busted_write_lock(void)
111 return 0; /* BUGGY, do not use in real life!!! */
114 static void torture_lock_busted_write_delay(struct torture_random_state
*trsp
)
116 const unsigned long longdelay_ms
= 100;
118 /* We want a long delay occasionally to force massive contention. */
119 if (!(torture_random(trsp
) %
120 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
121 mdelay(longdelay_ms
);
122 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
123 torture_preempt_schedule(); /* Allow test to be preempted. */
126 static void torture_lock_busted_write_unlock(void)
128 /* BUGGY, do not use in real life!!! */
131 static void torture_boost_dummy(struct torture_random_state
*trsp
)
133 /* Only rtmutexes care about priority */
136 static struct lock_torture_ops lock_busted_ops
= {
137 .writelock
= torture_lock_busted_write_lock
,
138 .write_delay
= torture_lock_busted_write_delay
,
139 .task_boost
= torture_boost_dummy
,
140 .writeunlock
= torture_lock_busted_write_unlock
,
144 .name
= "lock_busted"
147 static DEFINE_SPINLOCK(torture_spinlock
);
149 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock
)
151 spin_lock(&torture_spinlock
);
155 static void torture_spin_lock_write_delay(struct torture_random_state
*trsp
)
157 const unsigned long shortdelay_us
= 2;
158 const unsigned long longdelay_ms
= 100;
160 /* We want a short delay mostly to emulate likely code, and
161 * we want a long delay occasionally to force massive contention.
163 if (!(torture_random(trsp
) %
164 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
165 mdelay(longdelay_ms
);
166 if (!(torture_random(trsp
) %
167 (cxt
.nrealwriters_stress
* 2 * shortdelay_us
)))
168 udelay(shortdelay_us
);
169 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
170 torture_preempt_schedule(); /* Allow test to be preempted. */
173 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock
)
175 spin_unlock(&torture_spinlock
);
178 static struct lock_torture_ops spin_lock_ops
= {
179 .writelock
= torture_spin_lock_write_lock
,
180 .write_delay
= torture_spin_lock_write_delay
,
181 .task_boost
= torture_boost_dummy
,
182 .writeunlock
= torture_spin_lock_write_unlock
,
189 static int torture_spin_lock_write_lock_irq(void)
190 __acquires(torture_spinlock
)
194 spin_lock_irqsave(&torture_spinlock
, flags
);
195 cxt
.cur_ops
->flags
= flags
;
199 static void torture_lock_spin_write_unlock_irq(void)
200 __releases(torture_spinlock
)
202 spin_unlock_irqrestore(&torture_spinlock
, cxt
.cur_ops
->flags
);
205 static struct lock_torture_ops spin_lock_irq_ops
= {
206 .writelock
= torture_spin_lock_write_lock_irq
,
207 .write_delay
= torture_spin_lock_write_delay
,
208 .task_boost
= torture_boost_dummy
,
209 .writeunlock
= torture_lock_spin_write_unlock_irq
,
213 .name
= "spin_lock_irq"
216 static DEFINE_RWLOCK(torture_rwlock
);
218 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock
)
220 write_lock(&torture_rwlock
);
224 static void torture_rwlock_write_delay(struct torture_random_state
*trsp
)
226 const unsigned long shortdelay_us
= 2;
227 const unsigned long longdelay_ms
= 100;
229 /* We want a short delay mostly to emulate likely code, and
230 * we want a long delay occasionally to force massive contention.
232 if (!(torture_random(trsp
) %
233 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
234 mdelay(longdelay_ms
);
236 udelay(shortdelay_us
);
239 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock
)
241 write_unlock(&torture_rwlock
);
244 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock
)
246 read_lock(&torture_rwlock
);
250 static void torture_rwlock_read_delay(struct torture_random_state
*trsp
)
252 const unsigned long shortdelay_us
= 10;
253 const unsigned long longdelay_ms
= 100;
255 /* We want a short delay mostly to emulate likely code, and
256 * we want a long delay occasionally to force massive contention.
258 if (!(torture_random(trsp
) %
259 (cxt
.nrealreaders_stress
* 2000 * longdelay_ms
)))
260 mdelay(longdelay_ms
);
262 udelay(shortdelay_us
);
265 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock
)
267 read_unlock(&torture_rwlock
);
270 static struct lock_torture_ops rw_lock_ops
= {
271 .writelock
= torture_rwlock_write_lock
,
272 .write_delay
= torture_rwlock_write_delay
,
273 .task_boost
= torture_boost_dummy
,
274 .writeunlock
= torture_rwlock_write_unlock
,
275 .readlock
= torture_rwlock_read_lock
,
276 .read_delay
= torture_rwlock_read_delay
,
277 .readunlock
= torture_rwlock_read_unlock
,
281 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock
)
285 write_lock_irqsave(&torture_rwlock
, flags
);
286 cxt
.cur_ops
->flags
= flags
;
290 static void torture_rwlock_write_unlock_irq(void)
291 __releases(torture_rwlock
)
293 write_unlock_irqrestore(&torture_rwlock
, cxt
.cur_ops
->flags
);
296 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock
)
300 read_lock_irqsave(&torture_rwlock
, flags
);
301 cxt
.cur_ops
->flags
= flags
;
305 static void torture_rwlock_read_unlock_irq(void)
306 __releases(torture_rwlock
)
308 read_unlock_irqrestore(&torture_rwlock
, cxt
.cur_ops
->flags
);
311 static struct lock_torture_ops rw_lock_irq_ops
= {
312 .writelock
= torture_rwlock_write_lock_irq
,
313 .write_delay
= torture_rwlock_write_delay
,
314 .task_boost
= torture_boost_dummy
,
315 .writeunlock
= torture_rwlock_write_unlock_irq
,
316 .readlock
= torture_rwlock_read_lock_irq
,
317 .read_delay
= torture_rwlock_read_delay
,
318 .readunlock
= torture_rwlock_read_unlock_irq
,
319 .name
= "rw_lock_irq"
322 static DEFINE_MUTEX(torture_mutex
);
324 static int torture_mutex_lock(void) __acquires(torture_mutex
)
326 mutex_lock(&torture_mutex
);
330 static void torture_mutex_delay(struct torture_random_state
*trsp
)
332 const unsigned long longdelay_ms
= 100;
334 /* We want a long delay occasionally to force massive contention. */
335 if (!(torture_random(trsp
) %
336 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
337 mdelay(longdelay_ms
* 5);
339 mdelay(longdelay_ms
/ 5);
340 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
341 torture_preempt_schedule(); /* Allow test to be preempted. */
344 static void torture_mutex_unlock(void) __releases(torture_mutex
)
346 mutex_unlock(&torture_mutex
);
349 static struct lock_torture_ops mutex_lock_ops
= {
350 .writelock
= torture_mutex_lock
,
351 .write_delay
= torture_mutex_delay
,
352 .task_boost
= torture_boost_dummy
,
353 .writeunlock
= torture_mutex_unlock
,
360 #include <linux/ww_mutex.h>
361 static DEFINE_WD_CLASS(torture_ww_class
);
362 static DEFINE_WW_MUTEX(torture_ww_mutex_0
, &torture_ww_class
);
363 static DEFINE_WW_MUTEX(torture_ww_mutex_1
, &torture_ww_class
);
364 static DEFINE_WW_MUTEX(torture_ww_mutex_2
, &torture_ww_class
);
366 static int torture_ww_mutex_lock(void)
367 __acquires(torture_ww_mutex_0
)
368 __acquires(torture_ww_mutex_1
)
369 __acquires(torture_ww_mutex_2
)
372 struct reorder_lock
{
373 struct list_head link
;
374 struct ww_mutex
*lock
;
375 } locks
[3], *ll
, *ln
;
376 struct ww_acquire_ctx ctx
;
378 locks
[0].lock
= &torture_ww_mutex_0
;
379 list_add(&locks
[0].link
, &list
);
381 locks
[1].lock
= &torture_ww_mutex_1
;
382 list_add(&locks
[1].link
, &list
);
384 locks
[2].lock
= &torture_ww_mutex_2
;
385 list_add(&locks
[2].link
, &list
);
387 ww_acquire_init(&ctx
, &torture_ww_class
);
389 list_for_each_entry(ll
, &list
, link
) {
392 err
= ww_mutex_lock(ll
->lock
, &ctx
);
397 list_for_each_entry_continue_reverse(ln
, &list
, link
)
398 ww_mutex_unlock(ln
->lock
);
403 ww_mutex_lock_slow(ll
->lock
, &ctx
);
404 list_move(&ll
->link
, &list
);
407 ww_acquire_fini(&ctx
);
411 static void torture_ww_mutex_unlock(void)
412 __releases(torture_ww_mutex_0
)
413 __releases(torture_ww_mutex_1
)
414 __releases(torture_ww_mutex_2
)
416 ww_mutex_unlock(&torture_ww_mutex_0
);
417 ww_mutex_unlock(&torture_ww_mutex_1
);
418 ww_mutex_unlock(&torture_ww_mutex_2
);
421 static struct lock_torture_ops ww_mutex_lock_ops
= {
422 .writelock
= torture_ww_mutex_lock
,
423 .write_delay
= torture_mutex_delay
,
424 .task_boost
= torture_boost_dummy
,
425 .writeunlock
= torture_ww_mutex_unlock
,
429 .name
= "ww_mutex_lock"
432 #ifdef CONFIG_RT_MUTEXES
433 static DEFINE_RT_MUTEX(torture_rtmutex
);
435 static int torture_rtmutex_lock(void) __acquires(torture_rtmutex
)
437 rt_mutex_lock(&torture_rtmutex
);
441 static void torture_rtmutex_boost(struct torture_random_state
*trsp
)
443 const unsigned int factor
= 50000; /* yes, quite arbitrary */
445 if (!rt_task(current
)) {
447 * Boost priority once every ~50k operations. When the
448 * task tries to take the lock, the rtmutex it will account
449 * for the new priority, and do any corresponding pi-dance.
451 if (trsp
&& !(torture_random(trsp
) %
452 (cxt
.nrealwriters_stress
* factor
))) {
453 sched_set_fifo(current
);
454 } else /* common case, do nothing */
458 * The task will remain boosted for another ~500k operations,
459 * then restored back to its original prio, and so forth.
461 * When @trsp is nil, we want to force-reset the task for
462 * stopping the kthread.
464 if (!trsp
|| !(torture_random(trsp
) %
465 (cxt
.nrealwriters_stress
* factor
* 2))) {
466 sched_set_normal(current
, 0);
467 } else /* common case, do nothing */
472 static void torture_rtmutex_delay(struct torture_random_state
*trsp
)
474 const unsigned long shortdelay_us
= 2;
475 const unsigned long longdelay_ms
= 100;
478 * We want a short delay mostly to emulate likely code, and
479 * we want a long delay occasionally to force massive contention.
481 if (!(torture_random(trsp
) %
482 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
483 mdelay(longdelay_ms
);
484 if (!(torture_random(trsp
) %
485 (cxt
.nrealwriters_stress
* 2 * shortdelay_us
)))
486 udelay(shortdelay_us
);
487 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
488 torture_preempt_schedule(); /* Allow test to be preempted. */
491 static void torture_rtmutex_unlock(void) __releases(torture_rtmutex
)
493 rt_mutex_unlock(&torture_rtmutex
);
496 static struct lock_torture_ops rtmutex_lock_ops
= {
497 .writelock
= torture_rtmutex_lock
,
498 .write_delay
= torture_rtmutex_delay
,
499 .task_boost
= torture_rtmutex_boost
,
500 .writeunlock
= torture_rtmutex_unlock
,
504 .name
= "rtmutex_lock"
508 static DECLARE_RWSEM(torture_rwsem
);
509 static int torture_rwsem_down_write(void) __acquires(torture_rwsem
)
511 down_write(&torture_rwsem
);
515 static void torture_rwsem_write_delay(struct torture_random_state
*trsp
)
517 const unsigned long longdelay_ms
= 100;
519 /* We want a long delay occasionally to force massive contention. */
520 if (!(torture_random(trsp
) %
521 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
522 mdelay(longdelay_ms
* 10);
524 mdelay(longdelay_ms
/ 10);
525 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
526 torture_preempt_schedule(); /* Allow test to be preempted. */
529 static void torture_rwsem_up_write(void) __releases(torture_rwsem
)
531 up_write(&torture_rwsem
);
534 static int torture_rwsem_down_read(void) __acquires(torture_rwsem
)
536 down_read(&torture_rwsem
);
540 static void torture_rwsem_read_delay(struct torture_random_state
*trsp
)
542 const unsigned long longdelay_ms
= 100;
544 /* We want a long delay occasionally to force massive contention. */
545 if (!(torture_random(trsp
) %
546 (cxt
.nrealreaders_stress
* 2000 * longdelay_ms
)))
547 mdelay(longdelay_ms
* 2);
549 mdelay(longdelay_ms
/ 2);
550 if (!(torture_random(trsp
) % (cxt
.nrealreaders_stress
* 20000)))
551 torture_preempt_schedule(); /* Allow test to be preempted. */
554 static void torture_rwsem_up_read(void) __releases(torture_rwsem
)
556 up_read(&torture_rwsem
);
559 static struct lock_torture_ops rwsem_lock_ops
= {
560 .writelock
= torture_rwsem_down_write
,
561 .write_delay
= torture_rwsem_write_delay
,
562 .task_boost
= torture_boost_dummy
,
563 .writeunlock
= torture_rwsem_up_write
,
564 .readlock
= torture_rwsem_down_read
,
565 .read_delay
= torture_rwsem_read_delay
,
566 .readunlock
= torture_rwsem_up_read
,
570 #include <linux/percpu-rwsem.h>
571 static struct percpu_rw_semaphore pcpu_rwsem
;
573 static void torture_percpu_rwsem_init(void)
575 BUG_ON(percpu_init_rwsem(&pcpu_rwsem
));
578 static void torture_percpu_rwsem_exit(void)
580 percpu_free_rwsem(&pcpu_rwsem
);
583 static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem
)
585 percpu_down_write(&pcpu_rwsem
);
589 static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem
)
591 percpu_up_write(&pcpu_rwsem
);
594 static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem
)
596 percpu_down_read(&pcpu_rwsem
);
600 static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem
)
602 percpu_up_read(&pcpu_rwsem
);
605 static struct lock_torture_ops percpu_rwsem_lock_ops
= {
606 .init
= torture_percpu_rwsem_init
,
607 .exit
= torture_percpu_rwsem_exit
,
608 .writelock
= torture_percpu_rwsem_down_write
,
609 .write_delay
= torture_rwsem_write_delay
,
610 .task_boost
= torture_boost_dummy
,
611 .writeunlock
= torture_percpu_rwsem_up_write
,
612 .readlock
= torture_percpu_rwsem_down_read
,
613 .read_delay
= torture_rwsem_read_delay
,
614 .readunlock
= torture_percpu_rwsem_up_read
,
615 .name
= "percpu_rwsem_lock"
619 * Lock torture writer kthread. Repeatedly acquires and releases
620 * the lock, checking for duplicate acquisitions.
622 static int lock_torture_writer(void *arg
)
624 struct lock_stress_stats
*lwsp
= arg
;
625 DEFINE_TORTURE_RANDOM(rand
);
627 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
628 set_user_nice(current
, MAX_NICE
);
631 if ((torture_random(&rand
) & 0xfffff) == 0)
632 schedule_timeout_uninterruptible(1);
634 cxt
.cur_ops
->task_boost(&rand
);
635 cxt
.cur_ops
->writelock();
636 if (WARN_ON_ONCE(lock_is_write_held
))
638 lock_is_write_held
= true;
639 if (WARN_ON_ONCE(lock_is_read_held
))
640 lwsp
->n_lock_fail
++; /* rare, but... */
642 lwsp
->n_lock_acquired
++;
643 cxt
.cur_ops
->write_delay(&rand
);
644 lock_is_write_held
= false;
645 WRITE_ONCE(last_lock_release
, jiffies
);
646 cxt
.cur_ops
->writeunlock();
648 stutter_wait("lock_torture_writer");
649 } while (!torture_must_stop());
651 cxt
.cur_ops
->task_boost(NULL
); /* reset prio */
652 torture_kthread_stopping("lock_torture_writer");
657 * Lock torture reader kthread. Repeatedly acquires and releases
660 static int lock_torture_reader(void *arg
)
662 struct lock_stress_stats
*lrsp
= arg
;
663 DEFINE_TORTURE_RANDOM(rand
);
665 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
666 set_user_nice(current
, MAX_NICE
);
669 if ((torture_random(&rand
) & 0xfffff) == 0)
670 schedule_timeout_uninterruptible(1);
672 cxt
.cur_ops
->readlock();
673 lock_is_read_held
= true;
674 if (WARN_ON_ONCE(lock_is_write_held
))
675 lrsp
->n_lock_fail
++; /* rare, but... */
677 lrsp
->n_lock_acquired
++;
678 cxt
.cur_ops
->read_delay(&rand
);
679 lock_is_read_held
= false;
680 cxt
.cur_ops
->readunlock();
682 stutter_wait("lock_torture_reader");
683 } while (!torture_must_stop());
684 torture_kthread_stopping("lock_torture_reader");
689 * Create an lock-torture-statistics message in the specified buffer.
691 static void __torture_print_stats(char *page
,
692 struct lock_stress_stats
*statp
, bool write
)
696 long max
= 0, min
= statp
? statp
[0].n_lock_acquired
: 0;
699 n_stress
= write
? cxt
.nrealwriters_stress
: cxt
.nrealreaders_stress
;
700 for (i
= 0; i
< n_stress
; i
++) {
701 if (statp
[i
].n_lock_fail
)
703 sum
+= statp
[i
].n_lock_acquired
;
704 if (max
< statp
[i
].n_lock_acquired
)
705 max
= statp
[i
].n_lock_acquired
;
706 if (min
> statp
[i
].n_lock_acquired
)
707 min
= statp
[i
].n_lock_acquired
;
709 page
+= sprintf(page
,
710 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
711 write
? "Writes" : "Reads ",
713 !onoff_interval
&& max
/ 2 > min
? "???" : "",
714 fail
, fail
? "!!!" : "");
716 atomic_inc(&cxt
.n_lock_torture_errors
);
720 * Print torture statistics. Caller must ensure that there is only one
721 * call to this function at a given time!!! This is normally accomplished
722 * by relying on the module system to only have one copy of the module
723 * loaded, and then by giving the lock_torture_stats kthread full control
724 * (or the init/cleanup functions when lock_torture_stats thread is not
727 static void lock_torture_stats_print(void)
729 int size
= cxt
.nrealwriters_stress
* 200 + 8192;
732 if (cxt
.cur_ops
->readlock
)
733 size
+= cxt
.nrealreaders_stress
* 200 + 8192;
735 buf
= kmalloc(size
, GFP_KERNEL
);
737 pr_err("lock_torture_stats_print: Out of memory, need: %d",
742 __torture_print_stats(buf
, cxt
.lwsa
, true);
746 if (cxt
.cur_ops
->readlock
) {
747 buf
= kmalloc(size
, GFP_KERNEL
);
749 pr_err("lock_torture_stats_print: Out of memory, need: %d",
754 __torture_print_stats(buf
, cxt
.lrsa
, false);
761 * Periodically prints torture statistics, if periodic statistics printing
762 * was specified via the stat_interval module parameter.
764 * No need to worry about fullstop here, since this one doesn't reference
765 * volatile state or register callbacks.
767 static int lock_torture_stats(void *arg
)
769 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
771 schedule_timeout_interruptible(stat_interval
* HZ
);
772 lock_torture_stats_print();
773 torture_shutdown_absorb("lock_torture_stats");
774 } while (!torture_must_stop());
775 torture_kthread_stopping("lock_torture_stats");
780 lock_torture_print_module_parms(struct lock_torture_ops
*cur_ops
,
783 pr_alert("%s" TORTURE_FLAG
784 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
785 torture_type
, tag
, cxt
.debug_lock
? " [debug]": "",
786 cxt
.nrealwriters_stress
, cxt
.nrealreaders_stress
, stat_interval
,
787 verbose
, shuffle_interval
, stutter
, shutdown_secs
,
788 onoff_interval
, onoff_holdoff
);
791 static void lock_torture_cleanup(void)
795 if (torture_cleanup_begin())
799 * Indicates early cleanup, meaning that the test has not run,
800 * such as when passing bogus args when loading the module.
801 * However cxt->cur_ops.init() may have been invoked, so beside
802 * perform the underlying torture-specific cleanups, cur_ops.exit()
803 * will be invoked if needed.
805 if (!cxt
.lwsa
&& !cxt
.lrsa
)
809 for (i
= 0; i
< cxt
.nrealwriters_stress
; i
++)
810 torture_stop_kthread(lock_torture_writer
,
817 for (i
= 0; i
< cxt
.nrealreaders_stress
; i
++)
818 torture_stop_kthread(lock_torture_reader
,
824 torture_stop_kthread(lock_torture_stats
, stats_task
);
825 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
827 if (atomic_read(&cxt
.n_lock_torture_errors
))
828 lock_torture_print_module_parms(cxt
.cur_ops
,
829 "End of test: FAILURE");
830 else if (torture_onoff_failures())
831 lock_torture_print_module_parms(cxt
.cur_ops
,
832 "End of test: LOCK_HOTPLUG");
834 lock_torture_print_module_parms(cxt
.cur_ops
,
835 "End of test: SUCCESS");
843 if (cxt
.init_called
) {
844 if (cxt
.cur_ops
->exit
)
846 cxt
.init_called
= false;
848 torture_cleanup_end();
851 static int __init
lock_torture_init(void)
855 static struct lock_torture_ops
*torture_ops
[] = {
857 &spin_lock_ops
, &spin_lock_irq_ops
,
858 &rw_lock_ops
, &rw_lock_irq_ops
,
861 #ifdef CONFIG_RT_MUTEXES
865 &percpu_rwsem_lock_ops
,
868 if (!torture_init_begin(torture_type
, verbose
))
871 /* Process args and tell the world that the torturer is on the job. */
872 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++) {
873 cxt
.cur_ops
= torture_ops
[i
];
874 if (strcmp(torture_type
, cxt
.cur_ops
->name
) == 0)
877 if (i
== ARRAY_SIZE(torture_ops
)) {
878 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
880 pr_alert("lock-torture types:");
881 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++)
882 pr_alert(" %s", torture_ops
[i
]->name
);
888 if (nwriters_stress
== 0 &&
889 (!cxt
.cur_ops
->readlock
|| nreaders_stress
== 0)) {
890 pr_alert("lock-torture: must run at least one locking thread\n");
895 if (cxt
.cur_ops
->init
) {
897 cxt
.init_called
= true;
900 if (nwriters_stress
>= 0)
901 cxt
.nrealwriters_stress
= nwriters_stress
;
903 cxt
.nrealwriters_stress
= 2 * num_online_cpus();
905 #ifdef CONFIG_DEBUG_MUTEXES
906 if (str_has_prefix(torture_type
, "mutex"))
907 cxt
.debug_lock
= true;
909 #ifdef CONFIG_DEBUG_RT_MUTEXES
910 if (str_has_prefix(torture_type
, "rtmutex"))
911 cxt
.debug_lock
= true;
913 #ifdef CONFIG_DEBUG_SPINLOCK
914 if ((str_has_prefix(torture_type
, "spin")) ||
915 (str_has_prefix(torture_type
, "rw_lock")))
916 cxt
.debug_lock
= true;
919 /* Initialize the statistics so that each run gets its own numbers. */
920 if (nwriters_stress
) {
921 lock_is_write_held
= false;
922 cxt
.lwsa
= kmalloc_array(cxt
.nrealwriters_stress
,
925 if (cxt
.lwsa
== NULL
) {
926 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
931 for (i
= 0; i
< cxt
.nrealwriters_stress
; i
++) {
932 cxt
.lwsa
[i
].n_lock_fail
= 0;
933 cxt
.lwsa
[i
].n_lock_acquired
= 0;
937 if (cxt
.cur_ops
->readlock
) {
938 if (nreaders_stress
>= 0)
939 cxt
.nrealreaders_stress
= nreaders_stress
;
942 * By default distribute evenly the number of
943 * readers and writers. We still run the same number
944 * of threads as the writer-only locks default.
946 if (nwriters_stress
< 0) /* user doesn't care */
947 cxt
.nrealwriters_stress
= num_online_cpus();
948 cxt
.nrealreaders_stress
= cxt
.nrealwriters_stress
;
951 if (nreaders_stress
) {
952 lock_is_read_held
= false;
953 cxt
.lrsa
= kmalloc_array(cxt
.nrealreaders_stress
,
956 if (cxt
.lrsa
== NULL
) {
957 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
964 for (i
= 0; i
< cxt
.nrealreaders_stress
; i
++) {
965 cxt
.lrsa
[i
].n_lock_fail
= 0;
966 cxt
.lrsa
[i
].n_lock_acquired
= 0;
971 lock_torture_print_module_parms(cxt
.cur_ops
, "Start of test");
973 /* Prepare torture context. */
974 if (onoff_interval
> 0) {
975 firsterr
= torture_onoff_init(onoff_holdoff
* HZ
,
976 onoff_interval
* HZ
, NULL
);
980 if (shuffle_interval
> 0) {
981 firsterr
= torture_shuffle_init(shuffle_interval
);
985 if (shutdown_secs
> 0) {
986 firsterr
= torture_shutdown_init(shutdown_secs
,
987 lock_torture_cleanup
);
992 firsterr
= torture_stutter_init(stutter
, stutter
);
997 if (nwriters_stress
) {
998 writer_tasks
= kcalloc(cxt
.nrealwriters_stress
,
999 sizeof(writer_tasks
[0]),
1001 if (writer_tasks
== NULL
) {
1002 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
1008 if (cxt
.cur_ops
->readlock
) {
1009 reader_tasks
= kcalloc(cxt
.nrealreaders_stress
,
1010 sizeof(reader_tasks
[0]),
1012 if (reader_tasks
== NULL
) {
1013 VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
1014 kfree(writer_tasks
);
1015 writer_tasks
= NULL
;
1022 * Create the kthreads and start torturing (oh, those poor little locks).
1024 * TODO: Note that we interleave writers with readers, giving writers a
1025 * slight advantage, by creating its kthread first. This can be modified
1026 * for very specific needs, or even let the user choose the policy, if
1029 for (i
= 0, j
= 0; i
< cxt
.nrealwriters_stress
||
1030 j
< cxt
.nrealreaders_stress
; i
++, j
++) {
1031 if (i
>= cxt
.nrealwriters_stress
)
1034 /* Create writer. */
1035 firsterr
= torture_create_kthread(lock_torture_writer
, &cxt
.lwsa
[i
],
1041 if (cxt
.cur_ops
->readlock
== NULL
|| (j
>= cxt
.nrealreaders_stress
))
1043 /* Create reader. */
1044 firsterr
= torture_create_kthread(lock_torture_reader
, &cxt
.lrsa
[j
],
1049 if (stat_interval
> 0) {
1050 firsterr
= torture_create_kthread(lock_torture_stats
, NULL
,
1060 lock_torture_cleanup();
1061 if (shutdown_secs
) {
1062 WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST
));
1068 module_init(lock_torture_init
);
1069 module_exit(lock_torture_cleanup
);