1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Module-based API test facility for ww_mutexes
6 #include <linux/kernel.h>
8 #include <linux/completion.h>
9 #include <linux/delay.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/prandom.h>
13 #include <linux/slab.h>
14 #include <linux/ww_mutex.h>
16 static DEFINE_WD_CLASS(ww_class
);
17 struct workqueue_struct
*wq
;
19 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
20 #define ww_acquire_init_noinject(a, b) do { \
21 ww_acquire_init((a), (b)); \
22 (a)->deadlock_inject_countdown = ~0U; \
25 #define ww_acquire_init_noinject(a, b) ww_acquire_init((a), (b))
29 struct work_struct work
;
30 struct ww_mutex mutex
;
31 struct completion ready
, go
, done
;
35 #define TEST_MTX_SPIN BIT(0)
36 #define TEST_MTX_TRY BIT(1)
37 #define TEST_MTX_CTX BIT(2)
38 #define __TEST_MTX_LAST BIT(3)
40 static void test_mutex_work(struct work_struct
*work
)
42 struct test_mutex
*mtx
= container_of(work
, typeof(*mtx
), work
);
44 complete(&mtx
->ready
);
45 wait_for_completion(&mtx
->go
);
47 if (mtx
->flags
& TEST_MTX_TRY
) {
48 while (!ww_mutex_trylock(&mtx
->mutex
, NULL
))
51 ww_mutex_lock(&mtx
->mutex
, NULL
);
54 ww_mutex_unlock(&mtx
->mutex
);
57 static int __test_mutex(unsigned int flags
)
59 #define TIMEOUT (HZ / 16)
60 struct test_mutex mtx
;
61 struct ww_acquire_ctx ctx
;
64 ww_mutex_init(&mtx
.mutex
, &ww_class
);
65 if (flags
& TEST_MTX_CTX
)
66 ww_acquire_init(&ctx
, &ww_class
);
68 INIT_WORK_ONSTACK(&mtx
.work
, test_mutex_work
);
69 init_completion(&mtx
.ready
);
70 init_completion(&mtx
.go
);
71 init_completion(&mtx
.done
);
74 schedule_work(&mtx
.work
);
76 wait_for_completion(&mtx
.ready
);
77 ww_mutex_lock(&mtx
.mutex
, (flags
& TEST_MTX_CTX
) ? &ctx
: NULL
);
79 if (flags
& TEST_MTX_SPIN
) {
80 unsigned long timeout
= jiffies
+ TIMEOUT
;
84 if (completion_done(&mtx
.done
)) {
89 } while (time_before(jiffies
, timeout
));
91 ret
= wait_for_completion_timeout(&mtx
.done
, TIMEOUT
);
93 ww_mutex_unlock(&mtx
.mutex
);
94 if (flags
& TEST_MTX_CTX
)
95 ww_acquire_fini(&ctx
);
98 pr_err("%s(flags=%x): mutual exclusion failure\n",
103 flush_work(&mtx
.work
);
104 destroy_work_on_stack(&mtx
.work
);
109 static int test_mutex(void)
114 for (i
= 0; i
< __TEST_MTX_LAST
; i
++) {
115 ret
= __test_mutex(i
);
123 static int test_aa(bool trylock
)
125 struct ww_mutex mutex
;
126 struct ww_acquire_ctx ctx
;
128 const char *from
= trylock
? "trylock" : "lock";
130 ww_mutex_init(&mutex
, &ww_class
);
131 ww_acquire_init(&ctx
, &ww_class
);
134 ret
= ww_mutex_lock(&mutex
, &ctx
);
136 pr_err("%s: initial lock failed!\n", __func__
);
140 ret
= !ww_mutex_trylock(&mutex
, &ctx
);
142 pr_err("%s: initial trylock failed!\n", __func__
);
147 if (ww_mutex_trylock(&mutex
, NULL
)) {
148 pr_err("%s: trylocked itself without context from %s!\n", __func__
, from
);
149 ww_mutex_unlock(&mutex
);
154 if (ww_mutex_trylock(&mutex
, &ctx
)) {
155 pr_err("%s: trylocked itself with context from %s!\n", __func__
, from
);
156 ww_mutex_unlock(&mutex
);
161 ret
= ww_mutex_lock(&mutex
, &ctx
);
162 if (ret
!= -EALREADY
) {
163 pr_err("%s: missed deadlock for recursing, ret=%d from %s\n",
164 __func__
, ret
, from
);
166 ww_mutex_unlock(&mutex
);
171 ww_mutex_unlock(&mutex
);
174 ww_acquire_fini(&ctx
);
179 struct work_struct work
;
180 struct ww_mutex a_mutex
;
181 struct ww_mutex b_mutex
;
182 struct completion a_ready
;
183 struct completion b_ready
;
184 bool resolve
, trylock
;
188 static void test_abba_work(struct work_struct
*work
)
190 struct test_abba
*abba
= container_of(work
, typeof(*abba
), work
);
191 struct ww_acquire_ctx ctx
;
194 ww_acquire_init_noinject(&ctx
, &ww_class
);
196 ww_mutex_lock(&abba
->b_mutex
, &ctx
);
198 WARN_ON(!ww_mutex_trylock(&abba
->b_mutex
, &ctx
));
200 WARN_ON(READ_ONCE(abba
->b_mutex
.ctx
) != &ctx
);
202 complete(&abba
->b_ready
);
203 wait_for_completion(&abba
->a_ready
);
205 err
= ww_mutex_lock(&abba
->a_mutex
, &ctx
);
206 if (abba
->resolve
&& err
== -EDEADLK
) {
207 ww_mutex_unlock(&abba
->b_mutex
);
208 ww_mutex_lock_slow(&abba
->a_mutex
, &ctx
);
209 err
= ww_mutex_lock(&abba
->b_mutex
, &ctx
);
213 ww_mutex_unlock(&abba
->a_mutex
);
214 ww_mutex_unlock(&abba
->b_mutex
);
215 ww_acquire_fini(&ctx
);
220 static int test_abba(bool trylock
, bool resolve
)
222 struct test_abba abba
;
223 struct ww_acquire_ctx ctx
;
226 ww_mutex_init(&abba
.a_mutex
, &ww_class
);
227 ww_mutex_init(&abba
.b_mutex
, &ww_class
);
228 INIT_WORK_ONSTACK(&abba
.work
, test_abba_work
);
229 init_completion(&abba
.a_ready
);
230 init_completion(&abba
.b_ready
);
231 abba
.trylock
= trylock
;
232 abba
.resolve
= resolve
;
234 schedule_work(&abba
.work
);
236 ww_acquire_init_noinject(&ctx
, &ww_class
);
238 ww_mutex_lock(&abba
.a_mutex
, &ctx
);
240 WARN_ON(!ww_mutex_trylock(&abba
.a_mutex
, &ctx
));
242 WARN_ON(READ_ONCE(abba
.a_mutex
.ctx
) != &ctx
);
244 complete(&abba
.a_ready
);
245 wait_for_completion(&abba
.b_ready
);
247 err
= ww_mutex_lock(&abba
.b_mutex
, &ctx
);
248 if (resolve
&& err
== -EDEADLK
) {
249 ww_mutex_unlock(&abba
.a_mutex
);
250 ww_mutex_lock_slow(&abba
.b_mutex
, &ctx
);
251 err
= ww_mutex_lock(&abba
.a_mutex
, &ctx
);
255 ww_mutex_unlock(&abba
.b_mutex
);
256 ww_mutex_unlock(&abba
.a_mutex
);
257 ww_acquire_fini(&ctx
);
259 flush_work(&abba
.work
);
260 destroy_work_on_stack(&abba
.work
);
264 if (err
|| abba
.result
) {
265 pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
266 __func__
, err
, abba
.result
);
270 if (err
!= -EDEADLK
&& abba
.result
!= -EDEADLK
) {
271 pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
272 __func__
, err
, abba
.result
);
280 struct work_struct work
;
281 struct ww_mutex a_mutex
;
282 struct ww_mutex
*b_mutex
;
283 struct completion
*a_signal
;
284 struct completion b_signal
;
288 static void test_cycle_work(struct work_struct
*work
)
290 struct test_cycle
*cycle
= container_of(work
, typeof(*cycle
), work
);
291 struct ww_acquire_ctx ctx
;
294 ww_acquire_init_noinject(&ctx
, &ww_class
);
295 ww_mutex_lock(&cycle
->a_mutex
, &ctx
);
297 complete(cycle
->a_signal
);
298 wait_for_completion(&cycle
->b_signal
);
300 err
= ww_mutex_lock(cycle
->b_mutex
, &ctx
);
301 if (err
== -EDEADLK
) {
303 ww_mutex_unlock(&cycle
->a_mutex
);
304 ww_mutex_lock_slow(cycle
->b_mutex
, &ctx
);
305 erra
= ww_mutex_lock(&cycle
->a_mutex
, &ctx
);
309 ww_mutex_unlock(cycle
->b_mutex
);
311 ww_mutex_unlock(&cycle
->a_mutex
);
312 ww_acquire_fini(&ctx
);
314 cycle
->result
= err
?: erra
;
317 static int __test_cycle(unsigned int nthreads
)
319 struct test_cycle
*cycles
;
320 unsigned int n
, last
= nthreads
- 1;
323 cycles
= kmalloc_array(nthreads
, sizeof(*cycles
), GFP_KERNEL
);
327 for (n
= 0; n
< nthreads
; n
++) {
328 struct test_cycle
*cycle
= &cycles
[n
];
330 ww_mutex_init(&cycle
->a_mutex
, &ww_class
);
332 cycle
->b_mutex
= &cycles
[0].a_mutex
;
334 cycle
->b_mutex
= &cycles
[n
+ 1].a_mutex
;
337 cycle
->a_signal
= &cycles
[last
].b_signal
;
339 cycle
->a_signal
= &cycles
[n
- 1].b_signal
;
340 init_completion(&cycle
->b_signal
);
342 INIT_WORK(&cycle
->work
, test_cycle_work
);
346 for (n
= 0; n
< nthreads
; n
++)
347 queue_work(wq
, &cycles
[n
].work
);
352 for (n
= 0; n
< nthreads
; n
++) {
353 struct test_cycle
*cycle
= &cycles
[n
];
358 pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
359 n
, nthreads
, cycle
->result
);
364 for (n
= 0; n
< nthreads
; n
++)
365 ww_mutex_destroy(&cycles
[n
].a_mutex
);
370 static int test_cycle(unsigned int ncpus
)
375 for (n
= 2; n
<= ncpus
+ 1; n
++) {
376 ret
= __test_cycle(n
);
385 struct work_struct work
;
386 struct ww_mutex
*locks
;
387 unsigned long timeout
;
391 struct rnd_state rng
;
392 DEFINE_SPINLOCK(rng_lock
);
394 static inline u32
prandom_u32_below(u32 ceil
)
398 spin_lock(&rng_lock
);
399 ret
= prandom_u32_state(&rng
) % ceil
;
400 spin_unlock(&rng_lock
);
404 static int *get_random_order(int count
)
409 order
= kmalloc_array(count
, sizeof(*order
), GFP_KERNEL
);
413 for (n
= 0; n
< count
; n
++)
416 for (n
= count
- 1; n
> 1; n
--) {
417 r
= prandom_u32_below(n
+ 1);
428 static void dummy_load(struct stress
*stress
)
430 usleep_range(1000, 2000);
433 static void stress_inorder_work(struct work_struct
*work
)
435 struct stress
*stress
= container_of(work
, typeof(*stress
), work
);
436 const int nlocks
= stress
->nlocks
;
437 struct ww_mutex
*locks
= stress
->locks
;
438 struct ww_acquire_ctx ctx
;
441 order
= get_random_order(nlocks
);
449 ww_acquire_init(&ctx
, &ww_class
);
452 for (n
= 0; n
< nlocks
; n
++) {
456 err
= ww_mutex_lock(&locks
[order
[n
]], &ctx
);
464 ww_mutex_unlock(&locks
[order
[contended
]]);
467 ww_mutex_unlock(&locks
[order
[n
]]);
469 if (err
== -EDEADLK
) {
470 if (!time_after(jiffies
, stress
->timeout
)) {
471 ww_mutex_lock_slow(&locks
[order
[contended
]], &ctx
);
476 ww_acquire_fini(&ctx
);
478 pr_err_once("stress (%s) failed with %d\n",
482 } while (!time_after(jiffies
, stress
->timeout
));
487 struct reorder_lock
{
488 struct list_head link
;
489 struct ww_mutex
*lock
;
492 static void stress_reorder_work(struct work_struct
*work
)
494 struct stress
*stress
= container_of(work
, typeof(*stress
), work
);
496 struct ww_acquire_ctx ctx
;
497 struct reorder_lock
*ll
, *ln
;
501 order
= get_random_order(stress
->nlocks
);
505 for (n
= 0; n
< stress
->nlocks
; n
++) {
506 ll
= kmalloc(sizeof(*ll
), GFP_KERNEL
);
510 ll
->lock
= &stress
->locks
[order
[n
]];
511 list_add(&ll
->link
, &locks
);
517 ww_acquire_init(&ctx
, &ww_class
);
519 list_for_each_entry(ll
, &locks
, link
) {
520 err
= ww_mutex_lock(ll
->lock
, &ctx
);
525 list_for_each_entry_continue_reverse(ln
, &locks
, link
)
526 ww_mutex_unlock(ln
->lock
);
528 if (err
!= -EDEADLK
) {
529 pr_err_once("stress (%s) failed with %d\n",
534 ww_mutex_lock_slow(ll
->lock
, &ctx
);
535 list_move(&ll
->link
, &locks
); /* restarts iteration */
539 list_for_each_entry(ll
, &locks
, link
)
540 ww_mutex_unlock(ll
->lock
);
542 ww_acquire_fini(&ctx
);
543 } while (!time_after(jiffies
, stress
->timeout
));
546 list_for_each_entry_safe(ll
, ln
, &locks
, link
)
551 static void stress_one_work(struct work_struct
*work
)
553 struct stress
*stress
= container_of(work
, typeof(*stress
), work
);
554 const int nlocks
= stress
->nlocks
;
555 struct ww_mutex
*lock
= stress
->locks
+ get_random_u32_below(nlocks
);
559 err
= ww_mutex_lock(lock
, NULL
);
562 ww_mutex_unlock(lock
);
564 pr_err_once("stress (%s) failed with %d\n",
568 } while (!time_after(jiffies
, stress
->timeout
));
571 #define STRESS_INORDER BIT(0)
572 #define STRESS_REORDER BIT(1)
573 #define STRESS_ONE BIT(2)
574 #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
576 static int stress(int nlocks
, int nthreads
, unsigned int flags
)
578 struct ww_mutex
*locks
;
579 struct stress
*stress_array
;
582 locks
= kmalloc_array(nlocks
, sizeof(*locks
), GFP_KERNEL
);
586 stress_array
= kmalloc_array(nthreads
, sizeof(*stress_array
),
593 for (n
= 0; n
< nlocks
; n
++)
594 ww_mutex_init(&locks
[n
], &ww_class
);
597 for (n
= 0; nthreads
; n
++) {
598 struct stress
*stress
;
599 void (*fn
)(struct work_struct
*work
);
604 if (flags
& STRESS_INORDER
)
605 fn
= stress_inorder_work
;
608 if (flags
& STRESS_REORDER
)
609 fn
= stress_reorder_work
;
612 if (flags
& STRESS_ONE
)
613 fn
= stress_one_work
;
620 stress
= &stress_array
[count
++];
622 INIT_WORK(&stress
->work
, fn
);
623 stress
->locks
= locks
;
624 stress
->nlocks
= nlocks
;
625 stress
->timeout
= jiffies
+ 2*HZ
;
627 queue_work(wq
, &stress
->work
);
633 for (n
= 0; n
< nlocks
; n
++)
634 ww_mutex_destroy(&locks
[n
]);
641 static int __init
test_ww_mutex_init(void)
643 int ncpus
= num_online_cpus();
646 printk(KERN_INFO
"Beginning ww mutex selftests\n");
648 prandom_seed_state(&rng
, get_random_u64());
650 wq
= alloc_workqueue("test-ww_mutex", WQ_UNBOUND
, 0);
658 ret
= test_aa(false);
666 for (i
= 0; i
< 4; i
++) {
667 ret
= test_abba(i
& 1, i
& 2);
672 ret
= test_cycle(ncpus
);
676 ret
= stress(16, 2*ncpus
, STRESS_INORDER
);
680 ret
= stress(16, 2*ncpus
, STRESS_REORDER
);
684 ret
= stress(2046, hweight32(STRESS_ALL
)*ncpus
, STRESS_ALL
);
688 printk(KERN_INFO
"All ww mutex selftests passed\n");
692 static void __exit
test_ww_mutex_exit(void)
694 destroy_workqueue(wq
);
697 module_init(test_ww_mutex_init
);
698 module_exit(test_ww_mutex_exit
);
700 MODULE_LICENSE("GPL");
701 MODULE_AUTHOR("Intel Corporation");
702 MODULE_DESCRIPTION("API test facility for ww_mutexes");