2 * Module-based torture test facility for locking
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright (C) IBM Corporation, 2014
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Davidlohr Bueso <dave@stgolabs.net>
22 * Based on kernel/rcu/torture.c.
25 #define pr_fmt(fmt) fmt
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/sched/rt.h>
31 #include <linux/spinlock.h>
32 #include <linux/rwlock.h>
33 #include <linux/mutex.h>
34 #include <linux/rwsem.h>
35 #include <linux/smp.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <uapi/linux/sched/types.h>
39 #include <linux/rtmutex.h>
40 #include <linux/atomic.h>
41 #include <linux/moduleparam.h>
42 #include <linux/delay.h>
43 #include <linux/slab.h>
44 #include <linux/percpu-rwsem.h>
45 #include <linux/torture.h>
47 MODULE_LICENSE("GPL");
48 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
50 torture_param(int, nwriters_stress
, -1,
51 "Number of write-locking stress-test threads");
52 torture_param(int, nreaders_stress
, -1,
53 "Number of read-locking stress-test threads");
54 torture_param(int, onoff_holdoff
, 0, "Time after boot before CPU hotplugs (s)");
55 torture_param(int, onoff_interval
, 0,
56 "Time between CPU hotplugs (s), 0=disable");
57 torture_param(int, shuffle_interval
, 3,
58 "Number of jiffies between shuffles, 0=disable");
59 torture_param(int, shutdown_secs
, 0, "Shutdown time (j), <= zero to disable.");
60 torture_param(int, stat_interval
, 60,
61 "Number of seconds between stats printk()s");
62 torture_param(int, stutter
, 5, "Number of jiffies to run/halt test, 0=disable");
63 torture_param(int, verbose
, 1,
64 "Enable verbose debugging printk()s");
66 static char *torture_type
= "spin_lock";
67 module_param(torture_type
, charp
, 0444);
68 MODULE_PARM_DESC(torture_type
,
69 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
71 static struct task_struct
*stats_task
;
72 static struct task_struct
**writer_tasks
;
73 static struct task_struct
**reader_tasks
;
75 static bool lock_is_write_held
;
76 static bool lock_is_read_held
;
78 struct lock_stress_stats
{
83 /* Forward reference. */
84 static void lock_torture_cleanup(void);
87 * Operations vector for selecting different types of tests.
89 struct lock_torture_ops
{
91 int (*writelock
)(void);
92 void (*write_delay
)(struct torture_random_state
*trsp
);
93 void (*task_boost
)(struct torture_random_state
*trsp
);
94 void (*writeunlock
)(void);
95 int (*readlock
)(void);
96 void (*read_delay
)(struct torture_random_state
*trsp
);
97 void (*readunlock
)(void);
99 unsigned long flags
; /* for irq spinlocks */
103 struct lock_torture_cxt
{
104 int nrealwriters_stress
;
105 int nrealreaders_stress
;
107 atomic_t n_lock_torture_errors
;
108 struct lock_torture_ops
*cur_ops
;
109 struct lock_stress_stats
*lwsa
; /* writer statistics */
110 struct lock_stress_stats
*lrsa
; /* reader statistics */
112 static struct lock_torture_cxt cxt
= { 0, 0, false,
116 * Definitions for lock torture testing.
119 static int torture_lock_busted_write_lock(void)
121 return 0; /* BUGGY, do not use in real life!!! */
124 static void torture_lock_busted_write_delay(struct torture_random_state
*trsp
)
126 const unsigned long longdelay_ms
= 100;
128 /* We want a long delay occasionally to force massive contention. */
129 if (!(torture_random(trsp
) %
130 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
131 mdelay(longdelay_ms
);
132 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
133 torture_preempt_schedule(); /* Allow test to be preempted. */
136 static void torture_lock_busted_write_unlock(void)
138 /* BUGGY, do not use in real life!!! */
141 static void torture_boost_dummy(struct torture_random_state
*trsp
)
143 /* Only rtmutexes care about priority */
146 static struct lock_torture_ops lock_busted_ops
= {
147 .writelock
= torture_lock_busted_write_lock
,
148 .write_delay
= torture_lock_busted_write_delay
,
149 .task_boost
= torture_boost_dummy
,
150 .writeunlock
= torture_lock_busted_write_unlock
,
154 .name
= "lock_busted"
157 static DEFINE_SPINLOCK(torture_spinlock
);
159 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock
)
161 spin_lock(&torture_spinlock
);
165 static void torture_spin_lock_write_delay(struct torture_random_state
*trsp
)
167 const unsigned long shortdelay_us
= 2;
168 const unsigned long longdelay_ms
= 100;
170 /* We want a short delay mostly to emulate likely code, and
171 * we want a long delay occasionally to force massive contention.
173 if (!(torture_random(trsp
) %
174 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
175 mdelay(longdelay_ms
);
176 if (!(torture_random(trsp
) %
177 (cxt
.nrealwriters_stress
* 2 * shortdelay_us
)))
178 udelay(shortdelay_us
);
179 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
180 torture_preempt_schedule(); /* Allow test to be preempted. */
183 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock
)
185 spin_unlock(&torture_spinlock
);
188 static struct lock_torture_ops spin_lock_ops
= {
189 .writelock
= torture_spin_lock_write_lock
,
190 .write_delay
= torture_spin_lock_write_delay
,
191 .task_boost
= torture_boost_dummy
,
192 .writeunlock
= torture_spin_lock_write_unlock
,
199 static int torture_spin_lock_write_lock_irq(void)
200 __acquires(torture_spinlock
)
204 spin_lock_irqsave(&torture_spinlock
, flags
);
205 cxt
.cur_ops
->flags
= flags
;
209 static void torture_lock_spin_write_unlock_irq(void)
210 __releases(torture_spinlock
)
212 spin_unlock_irqrestore(&torture_spinlock
, cxt
.cur_ops
->flags
);
215 static struct lock_torture_ops spin_lock_irq_ops
= {
216 .writelock
= torture_spin_lock_write_lock_irq
,
217 .write_delay
= torture_spin_lock_write_delay
,
218 .task_boost
= torture_boost_dummy
,
219 .writeunlock
= torture_lock_spin_write_unlock_irq
,
223 .name
= "spin_lock_irq"
226 static DEFINE_RWLOCK(torture_rwlock
);
228 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock
)
230 write_lock(&torture_rwlock
);
234 static void torture_rwlock_write_delay(struct torture_random_state
*trsp
)
236 const unsigned long shortdelay_us
= 2;
237 const unsigned long longdelay_ms
= 100;
239 /* We want a short delay mostly to emulate likely code, and
240 * we want a long delay occasionally to force massive contention.
242 if (!(torture_random(trsp
) %
243 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
244 mdelay(longdelay_ms
);
246 udelay(shortdelay_us
);
249 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock
)
251 write_unlock(&torture_rwlock
);
254 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock
)
256 read_lock(&torture_rwlock
);
260 static void torture_rwlock_read_delay(struct torture_random_state
*trsp
)
262 const unsigned long shortdelay_us
= 10;
263 const unsigned long longdelay_ms
= 100;
265 /* We want a short delay mostly to emulate likely code, and
266 * we want a long delay occasionally to force massive contention.
268 if (!(torture_random(trsp
) %
269 (cxt
.nrealreaders_stress
* 2000 * longdelay_ms
)))
270 mdelay(longdelay_ms
);
272 udelay(shortdelay_us
);
275 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock
)
277 read_unlock(&torture_rwlock
);
280 static struct lock_torture_ops rw_lock_ops
= {
281 .writelock
= torture_rwlock_write_lock
,
282 .write_delay
= torture_rwlock_write_delay
,
283 .task_boost
= torture_boost_dummy
,
284 .writeunlock
= torture_rwlock_write_unlock
,
285 .readlock
= torture_rwlock_read_lock
,
286 .read_delay
= torture_rwlock_read_delay
,
287 .readunlock
= torture_rwlock_read_unlock
,
291 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock
)
295 write_lock_irqsave(&torture_rwlock
, flags
);
296 cxt
.cur_ops
->flags
= flags
;
300 static void torture_rwlock_write_unlock_irq(void)
301 __releases(torture_rwlock
)
303 write_unlock_irqrestore(&torture_rwlock
, cxt
.cur_ops
->flags
);
306 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock
)
310 read_lock_irqsave(&torture_rwlock
, flags
);
311 cxt
.cur_ops
->flags
= flags
;
315 static void torture_rwlock_read_unlock_irq(void)
316 __releases(torture_rwlock
)
318 read_unlock_irqrestore(&torture_rwlock
, cxt
.cur_ops
->flags
);
321 static struct lock_torture_ops rw_lock_irq_ops
= {
322 .writelock
= torture_rwlock_write_lock_irq
,
323 .write_delay
= torture_rwlock_write_delay
,
324 .task_boost
= torture_boost_dummy
,
325 .writeunlock
= torture_rwlock_write_unlock_irq
,
326 .readlock
= torture_rwlock_read_lock_irq
,
327 .read_delay
= torture_rwlock_read_delay
,
328 .readunlock
= torture_rwlock_read_unlock_irq
,
329 .name
= "rw_lock_irq"
332 static DEFINE_MUTEX(torture_mutex
);
334 static int torture_mutex_lock(void) __acquires(torture_mutex
)
336 mutex_lock(&torture_mutex
);
340 static void torture_mutex_delay(struct torture_random_state
*trsp
)
342 const unsigned long longdelay_ms
= 100;
344 /* We want a long delay occasionally to force massive contention. */
345 if (!(torture_random(trsp
) %
346 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
347 mdelay(longdelay_ms
* 5);
349 mdelay(longdelay_ms
/ 5);
350 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
351 torture_preempt_schedule(); /* Allow test to be preempted. */
354 static void torture_mutex_unlock(void) __releases(torture_mutex
)
356 mutex_unlock(&torture_mutex
);
359 static struct lock_torture_ops mutex_lock_ops
= {
360 .writelock
= torture_mutex_lock
,
361 .write_delay
= torture_mutex_delay
,
362 .task_boost
= torture_boost_dummy
,
363 .writeunlock
= torture_mutex_unlock
,
370 #include <linux/ww_mutex.h>
371 static DEFINE_WD_CLASS(torture_ww_class
);
372 static DEFINE_WW_MUTEX(torture_ww_mutex_0
, &torture_ww_class
);
373 static DEFINE_WW_MUTEX(torture_ww_mutex_1
, &torture_ww_class
);
374 static DEFINE_WW_MUTEX(torture_ww_mutex_2
, &torture_ww_class
);
376 static int torture_ww_mutex_lock(void)
377 __acquires(torture_ww_mutex_0
)
378 __acquires(torture_ww_mutex_1
)
379 __acquires(torture_ww_mutex_2
)
382 struct reorder_lock
{
383 struct list_head link
;
384 struct ww_mutex
*lock
;
385 } locks
[3], *ll
, *ln
;
386 struct ww_acquire_ctx ctx
;
388 locks
[0].lock
= &torture_ww_mutex_0
;
389 list_add(&locks
[0].link
, &list
);
391 locks
[1].lock
= &torture_ww_mutex_1
;
392 list_add(&locks
[1].link
, &list
);
394 locks
[2].lock
= &torture_ww_mutex_2
;
395 list_add(&locks
[2].link
, &list
);
397 ww_acquire_init(&ctx
, &torture_ww_class
);
399 list_for_each_entry(ll
, &list
, link
) {
402 err
= ww_mutex_lock(ll
->lock
, &ctx
);
407 list_for_each_entry_continue_reverse(ln
, &list
, link
)
408 ww_mutex_unlock(ln
->lock
);
413 ww_mutex_lock_slow(ll
->lock
, &ctx
);
414 list_move(&ll
->link
, &list
);
417 ww_acquire_fini(&ctx
);
421 static void torture_ww_mutex_unlock(void)
422 __releases(torture_ww_mutex_0
)
423 __releases(torture_ww_mutex_1
)
424 __releases(torture_ww_mutex_2
)
426 ww_mutex_unlock(&torture_ww_mutex_0
);
427 ww_mutex_unlock(&torture_ww_mutex_1
);
428 ww_mutex_unlock(&torture_ww_mutex_2
);
431 static struct lock_torture_ops ww_mutex_lock_ops
= {
432 .writelock
= torture_ww_mutex_lock
,
433 .write_delay
= torture_mutex_delay
,
434 .task_boost
= torture_boost_dummy
,
435 .writeunlock
= torture_ww_mutex_unlock
,
439 .name
= "ww_mutex_lock"
442 #ifdef CONFIG_RT_MUTEXES
443 static DEFINE_RT_MUTEX(torture_rtmutex
);
445 static int torture_rtmutex_lock(void) __acquires(torture_rtmutex
)
447 rt_mutex_lock(&torture_rtmutex
);
451 static void torture_rtmutex_boost(struct torture_random_state
*trsp
)
454 struct sched_param param
;
455 const unsigned int factor
= 50000; /* yes, quite arbitrary */
457 if (!rt_task(current
)) {
459 * Boost priority once every ~50k operations. When the
460 * task tries to take the lock, the rtmutex it will account
461 * for the new priority, and do any corresponding pi-dance.
463 if (trsp
&& !(torture_random(trsp
) %
464 (cxt
.nrealwriters_stress
* factor
))) {
466 param
.sched_priority
= MAX_RT_PRIO
- 1;
467 } else /* common case, do nothing */
471 * The task will remain boosted for another ~500k operations,
472 * then restored back to its original prio, and so forth.
474 * When @trsp is nil, we want to force-reset the task for
475 * stopping the kthread.
477 if (!trsp
|| !(torture_random(trsp
) %
478 (cxt
.nrealwriters_stress
* factor
* 2))) {
479 policy
= SCHED_NORMAL
;
480 param
.sched_priority
= 0;
481 } else /* common case, do nothing */
485 sched_setscheduler_nocheck(current
, policy
, ¶m
);
488 static void torture_rtmutex_delay(struct torture_random_state
*trsp
)
490 const unsigned long shortdelay_us
= 2;
491 const unsigned long longdelay_ms
= 100;
494 * We want a short delay mostly to emulate likely code, and
495 * we want a long delay occasionally to force massive contention.
497 if (!(torture_random(trsp
) %
498 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
499 mdelay(longdelay_ms
);
500 if (!(torture_random(trsp
) %
501 (cxt
.nrealwriters_stress
* 2 * shortdelay_us
)))
502 udelay(shortdelay_us
);
503 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
504 torture_preempt_schedule(); /* Allow test to be preempted. */
507 static void torture_rtmutex_unlock(void) __releases(torture_rtmutex
)
509 rt_mutex_unlock(&torture_rtmutex
);
512 static struct lock_torture_ops rtmutex_lock_ops
= {
513 .writelock
= torture_rtmutex_lock
,
514 .write_delay
= torture_rtmutex_delay
,
515 .task_boost
= torture_rtmutex_boost
,
516 .writeunlock
= torture_rtmutex_unlock
,
520 .name
= "rtmutex_lock"
524 static DECLARE_RWSEM(torture_rwsem
);
525 static int torture_rwsem_down_write(void) __acquires(torture_rwsem
)
527 down_write(&torture_rwsem
);
531 static void torture_rwsem_write_delay(struct torture_random_state
*trsp
)
533 const unsigned long longdelay_ms
= 100;
535 /* We want a long delay occasionally to force massive contention. */
536 if (!(torture_random(trsp
) %
537 (cxt
.nrealwriters_stress
* 2000 * longdelay_ms
)))
538 mdelay(longdelay_ms
* 10);
540 mdelay(longdelay_ms
/ 10);
541 if (!(torture_random(trsp
) % (cxt
.nrealwriters_stress
* 20000)))
542 torture_preempt_schedule(); /* Allow test to be preempted. */
545 static void torture_rwsem_up_write(void) __releases(torture_rwsem
)
547 up_write(&torture_rwsem
);
550 static int torture_rwsem_down_read(void) __acquires(torture_rwsem
)
552 down_read(&torture_rwsem
);
556 static void torture_rwsem_read_delay(struct torture_random_state
*trsp
)
558 const unsigned long longdelay_ms
= 100;
560 /* We want a long delay occasionally to force massive contention. */
561 if (!(torture_random(trsp
) %
562 (cxt
.nrealreaders_stress
* 2000 * longdelay_ms
)))
563 mdelay(longdelay_ms
* 2);
565 mdelay(longdelay_ms
/ 2);
566 if (!(torture_random(trsp
) % (cxt
.nrealreaders_stress
* 20000)))
567 torture_preempt_schedule(); /* Allow test to be preempted. */
570 static void torture_rwsem_up_read(void) __releases(torture_rwsem
)
572 up_read(&torture_rwsem
);
575 static struct lock_torture_ops rwsem_lock_ops
= {
576 .writelock
= torture_rwsem_down_write
,
577 .write_delay
= torture_rwsem_write_delay
,
578 .task_boost
= torture_boost_dummy
,
579 .writeunlock
= torture_rwsem_up_write
,
580 .readlock
= torture_rwsem_down_read
,
581 .read_delay
= torture_rwsem_read_delay
,
582 .readunlock
= torture_rwsem_up_read
,
586 #include <linux/percpu-rwsem.h>
587 static struct percpu_rw_semaphore pcpu_rwsem
;
589 void torture_percpu_rwsem_init(void)
591 BUG_ON(percpu_init_rwsem(&pcpu_rwsem
));
594 static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem
)
596 percpu_down_write(&pcpu_rwsem
);
600 static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem
)
602 percpu_up_write(&pcpu_rwsem
);
605 static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem
)
607 percpu_down_read(&pcpu_rwsem
);
611 static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem
)
613 percpu_up_read(&pcpu_rwsem
);
616 static struct lock_torture_ops percpu_rwsem_lock_ops
= {
617 .init
= torture_percpu_rwsem_init
,
618 .writelock
= torture_percpu_rwsem_down_write
,
619 .write_delay
= torture_rwsem_write_delay
,
620 .task_boost
= torture_boost_dummy
,
621 .writeunlock
= torture_percpu_rwsem_up_write
,
622 .readlock
= torture_percpu_rwsem_down_read
,
623 .read_delay
= torture_rwsem_read_delay
,
624 .readunlock
= torture_percpu_rwsem_up_read
,
625 .name
= "percpu_rwsem_lock"
629 * Lock torture writer kthread. Repeatedly acquires and releases
630 * the lock, checking for duplicate acquisitions.
632 static int lock_torture_writer(void *arg
)
634 struct lock_stress_stats
*lwsp
= arg
;
635 static DEFINE_TORTURE_RANDOM(rand
);
637 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
638 set_user_nice(current
, MAX_NICE
);
641 if ((torture_random(&rand
) & 0xfffff) == 0)
642 schedule_timeout_uninterruptible(1);
644 cxt
.cur_ops
->task_boost(&rand
);
645 cxt
.cur_ops
->writelock();
646 if (WARN_ON_ONCE(lock_is_write_held
))
648 lock_is_write_held
= 1;
649 if (WARN_ON_ONCE(lock_is_read_held
))
650 lwsp
->n_lock_fail
++; /* rare, but... */
652 lwsp
->n_lock_acquired
++;
653 cxt
.cur_ops
->write_delay(&rand
);
654 lock_is_write_held
= 0;
655 cxt
.cur_ops
->writeunlock();
657 stutter_wait("lock_torture_writer");
658 } while (!torture_must_stop());
660 cxt
.cur_ops
->task_boost(NULL
); /* reset prio */
661 torture_kthread_stopping("lock_torture_writer");
666 * Lock torture reader kthread. Repeatedly acquires and releases
669 static int lock_torture_reader(void *arg
)
671 struct lock_stress_stats
*lrsp
= arg
;
672 static DEFINE_TORTURE_RANDOM(rand
);
674 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
675 set_user_nice(current
, MAX_NICE
);
678 if ((torture_random(&rand
) & 0xfffff) == 0)
679 schedule_timeout_uninterruptible(1);
681 cxt
.cur_ops
->readlock();
682 lock_is_read_held
= 1;
683 if (WARN_ON_ONCE(lock_is_write_held
))
684 lrsp
->n_lock_fail
++; /* rare, but... */
686 lrsp
->n_lock_acquired
++;
687 cxt
.cur_ops
->read_delay(&rand
);
688 lock_is_read_held
= 0;
689 cxt
.cur_ops
->readunlock();
691 stutter_wait("lock_torture_reader");
692 } while (!torture_must_stop());
693 torture_kthread_stopping("lock_torture_reader");
698 * Create an lock-torture-statistics message in the specified buffer.
700 static void __torture_print_stats(char *page
,
701 struct lock_stress_stats
*statp
, bool write
)
705 long max
= 0, min
= statp
? statp
[0].n_lock_acquired
: 0;
708 n_stress
= write
? cxt
.nrealwriters_stress
: cxt
.nrealreaders_stress
;
709 for (i
= 0; i
< n_stress
; i
++) {
710 if (statp
[i
].n_lock_fail
)
712 sum
+= statp
[i
].n_lock_acquired
;
713 if (max
< statp
[i
].n_lock_fail
)
714 max
= statp
[i
].n_lock_fail
;
715 if (min
> statp
[i
].n_lock_fail
)
716 min
= statp
[i
].n_lock_fail
;
718 page
+= sprintf(page
,
719 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
720 write
? "Writes" : "Reads ",
721 sum
, max
, min
, max
/ 2 > min
? "???" : "",
722 fail
, fail
? "!!!" : "");
724 atomic_inc(&cxt
.n_lock_torture_errors
);
728 * Print torture statistics. Caller must ensure that there is only one
729 * call to this function at a given time!!! This is normally accomplished
730 * by relying on the module system to only have one copy of the module
731 * loaded, and then by giving the lock_torture_stats kthread full control
732 * (or the init/cleanup functions when lock_torture_stats thread is not
735 static void lock_torture_stats_print(void)
737 int size
= cxt
.nrealwriters_stress
* 200 + 8192;
740 if (cxt
.cur_ops
->readlock
)
741 size
+= cxt
.nrealreaders_stress
* 200 + 8192;
743 buf
= kmalloc(size
, GFP_KERNEL
);
745 pr_err("lock_torture_stats_print: Out of memory, need: %d",
750 __torture_print_stats(buf
, cxt
.lwsa
, true);
754 if (cxt
.cur_ops
->readlock
) {
755 buf
= kmalloc(size
, GFP_KERNEL
);
757 pr_err("lock_torture_stats_print: Out of memory, need: %d",
762 __torture_print_stats(buf
, cxt
.lrsa
, false);
769 * Periodically prints torture statistics, if periodic statistics printing
770 * was specified via the stat_interval module parameter.
772 * No need to worry about fullstop here, since this one doesn't reference
773 * volatile state or register callbacks.
775 static int lock_torture_stats(void *arg
)
777 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
779 schedule_timeout_interruptible(stat_interval
* HZ
);
780 lock_torture_stats_print();
781 torture_shutdown_absorb("lock_torture_stats");
782 } while (!torture_must_stop());
783 torture_kthread_stopping("lock_torture_stats");
788 lock_torture_print_module_parms(struct lock_torture_ops
*cur_ops
,
791 pr_alert("%s" TORTURE_FLAG
792 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
793 torture_type
, tag
, cxt
.debug_lock
? " [debug]": "",
794 cxt
.nrealwriters_stress
, cxt
.nrealreaders_stress
, stat_interval
,
795 verbose
, shuffle_interval
, stutter
, shutdown_secs
,
796 onoff_interval
, onoff_holdoff
);
799 static void lock_torture_cleanup(void)
803 if (torture_cleanup_begin())
807 * Indicates early cleanup, meaning that the test has not run,
808 * such as when passing bogus args when loading the module. As
809 * such, only perform the underlying torture-specific cleanups,
810 * and avoid anything related to locktorture.
812 if (!cxt
.lwsa
&& !cxt
.lrsa
)
816 for (i
= 0; i
< cxt
.nrealwriters_stress
; i
++)
817 torture_stop_kthread(lock_torture_writer
,
824 for (i
= 0; i
< cxt
.nrealreaders_stress
; i
++)
825 torture_stop_kthread(lock_torture_reader
,
831 torture_stop_kthread(lock_torture_stats
, stats_task
);
832 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
834 if (atomic_read(&cxt
.n_lock_torture_errors
))
835 lock_torture_print_module_parms(cxt
.cur_ops
,
836 "End of test: FAILURE");
837 else if (torture_onoff_failures())
838 lock_torture_print_module_parms(cxt
.cur_ops
,
839 "End of test: LOCK_HOTPLUG");
841 lock_torture_print_module_parms(cxt
.cur_ops
,
842 "End of test: SUCCESS");
848 torture_cleanup_end();
851 static int __init
lock_torture_init(void)
855 static struct lock_torture_ops
*torture_ops
[] = {
857 &spin_lock_ops
, &spin_lock_irq_ops
,
858 &rw_lock_ops
, &rw_lock_irq_ops
,
861 #ifdef CONFIG_RT_MUTEXES
865 &percpu_rwsem_lock_ops
,
868 if (!torture_init_begin(torture_type
, verbose
))
871 /* Process args and tell the world that the torturer is on the job. */
872 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++) {
873 cxt
.cur_ops
= torture_ops
[i
];
874 if (strcmp(torture_type
, cxt
.cur_ops
->name
) == 0)
877 if (i
== ARRAY_SIZE(torture_ops
)) {
878 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
880 pr_alert("lock-torture types:");
881 for (i
= 0; i
< ARRAY_SIZE(torture_ops
); i
++)
882 pr_alert(" %s", torture_ops
[i
]->name
);
888 if (nwriters_stress
== 0 && nreaders_stress
== 0) {
889 pr_alert("lock-torture: must run at least one locking thread\n");
894 if (cxt
.cur_ops
->init
)
897 if (nwriters_stress
>= 0)
898 cxt
.nrealwriters_stress
= nwriters_stress
;
900 cxt
.nrealwriters_stress
= 2 * num_online_cpus();
902 #ifdef CONFIG_DEBUG_MUTEXES
903 if (strncmp(torture_type
, "mutex", 5) == 0)
904 cxt
.debug_lock
= true;
906 #ifdef CONFIG_DEBUG_RT_MUTEXES
907 if (strncmp(torture_type
, "rtmutex", 7) == 0)
908 cxt
.debug_lock
= true;
910 #ifdef CONFIG_DEBUG_SPINLOCK
911 if ((strncmp(torture_type
, "spin", 4) == 0) ||
912 (strncmp(torture_type
, "rw_lock", 7) == 0))
913 cxt
.debug_lock
= true;
916 /* Initialize the statistics so that each run gets its own numbers. */
917 if (nwriters_stress
) {
918 lock_is_write_held
= 0;
919 cxt
.lwsa
= kmalloc_array(cxt
.nrealwriters_stress
,
922 if (cxt
.lwsa
== NULL
) {
923 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
928 for (i
= 0; i
< cxt
.nrealwriters_stress
; i
++) {
929 cxt
.lwsa
[i
].n_lock_fail
= 0;
930 cxt
.lwsa
[i
].n_lock_acquired
= 0;
934 if (cxt
.cur_ops
->readlock
) {
935 if (nreaders_stress
>= 0)
936 cxt
.nrealreaders_stress
= nreaders_stress
;
939 * By default distribute evenly the number of
940 * readers and writers. We still run the same number
941 * of threads as the writer-only locks default.
943 if (nwriters_stress
< 0) /* user doesn't care */
944 cxt
.nrealwriters_stress
= num_online_cpus();
945 cxt
.nrealreaders_stress
= cxt
.nrealwriters_stress
;
948 if (nreaders_stress
) {
949 lock_is_read_held
= 0;
950 cxt
.lrsa
= kmalloc_array(cxt
.nrealreaders_stress
,
953 if (cxt
.lrsa
== NULL
) {
954 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
961 for (i
= 0; i
< cxt
.nrealreaders_stress
; i
++) {
962 cxt
.lrsa
[i
].n_lock_fail
= 0;
963 cxt
.lrsa
[i
].n_lock_acquired
= 0;
968 lock_torture_print_module_parms(cxt
.cur_ops
, "Start of test");
970 /* Prepare torture context. */
971 if (onoff_interval
> 0) {
972 firsterr
= torture_onoff_init(onoff_holdoff
* HZ
,
973 onoff_interval
* HZ
);
977 if (shuffle_interval
> 0) {
978 firsterr
= torture_shuffle_init(shuffle_interval
);
982 if (shutdown_secs
> 0) {
983 firsterr
= torture_shutdown_init(shutdown_secs
,
984 lock_torture_cleanup
);
989 firsterr
= torture_stutter_init(stutter
);
994 if (nwriters_stress
) {
995 writer_tasks
= kcalloc(cxt
.nrealwriters_stress
,
996 sizeof(writer_tasks
[0]),
998 if (writer_tasks
== NULL
) {
999 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
1005 if (cxt
.cur_ops
->readlock
) {
1006 reader_tasks
= kcalloc(cxt
.nrealreaders_stress
,
1007 sizeof(reader_tasks
[0]),
1009 if (reader_tasks
== NULL
) {
1010 VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
1011 kfree(writer_tasks
);
1012 writer_tasks
= NULL
;
1019 * Create the kthreads and start torturing (oh, those poor little locks).
1021 * TODO: Note that we interleave writers with readers, giving writers a
1022 * slight advantage, by creating its kthread first. This can be modified
1023 * for very specific needs, or even let the user choose the policy, if
1026 for (i
= 0, j
= 0; i
< cxt
.nrealwriters_stress
||
1027 j
< cxt
.nrealreaders_stress
; i
++, j
++) {
1028 if (i
>= cxt
.nrealwriters_stress
)
1031 /* Create writer. */
1032 firsterr
= torture_create_kthread(lock_torture_writer
, &cxt
.lwsa
[i
],
1038 if (cxt
.cur_ops
->readlock
== NULL
|| (j
>= cxt
.nrealreaders_stress
))
1040 /* Create reader. */
1041 firsterr
= torture_create_kthread(lock_torture_reader
, &cxt
.lrsa
[j
],
1046 if (stat_interval
> 0) {
1047 firsterr
= torture_create_kthread(lock_torture_stats
, NULL
,
1057 lock_torture_cleanup();
1061 module_init(lock_torture_init
);
1062 module_exit(lock_torture_cleanup
);