1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Module-based API test facility for ww_mutexes
6 #include <linux/kernel.h>
8 #include <linux/completion.h>
9 #include <linux/delay.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/ww_mutex.h>
16 static DEFINE_WD_CLASS(ww_class
);
17 struct workqueue_struct
*wq
;
20 struct work_struct work
;
21 struct ww_mutex mutex
;
22 struct completion ready
, go
, done
;
26 #define TEST_MTX_SPIN BIT(0)
27 #define TEST_MTX_TRY BIT(1)
28 #define TEST_MTX_CTX BIT(2)
29 #define __TEST_MTX_LAST BIT(3)
31 static void test_mutex_work(struct work_struct
*work
)
33 struct test_mutex
*mtx
= container_of(work
, typeof(*mtx
), work
);
35 complete(&mtx
->ready
);
36 wait_for_completion(&mtx
->go
);
38 if (mtx
->flags
& TEST_MTX_TRY
) {
39 while (!ww_mutex_trylock(&mtx
->mutex
))
42 ww_mutex_lock(&mtx
->mutex
, NULL
);
45 ww_mutex_unlock(&mtx
->mutex
);
48 static int __test_mutex(unsigned int flags
)
50 #define TIMEOUT (HZ / 16)
51 struct test_mutex mtx
;
52 struct ww_acquire_ctx ctx
;
55 ww_mutex_init(&mtx
.mutex
, &ww_class
);
56 ww_acquire_init(&ctx
, &ww_class
);
58 INIT_WORK_ONSTACK(&mtx
.work
, test_mutex_work
);
59 init_completion(&mtx
.ready
);
60 init_completion(&mtx
.go
);
61 init_completion(&mtx
.done
);
64 schedule_work(&mtx
.work
);
66 wait_for_completion(&mtx
.ready
);
67 ww_mutex_lock(&mtx
.mutex
, (flags
& TEST_MTX_CTX
) ? &ctx
: NULL
);
69 if (flags
& TEST_MTX_SPIN
) {
70 unsigned long timeout
= jiffies
+ TIMEOUT
;
74 if (completion_done(&mtx
.done
)) {
79 } while (time_before(jiffies
, timeout
));
81 ret
= wait_for_completion_timeout(&mtx
.done
, TIMEOUT
);
83 ww_mutex_unlock(&mtx
.mutex
);
84 ww_acquire_fini(&ctx
);
87 pr_err("%s(flags=%x): mutual exclusion failure\n",
92 flush_work(&mtx
.work
);
93 destroy_work_on_stack(&mtx
.work
);
98 static int test_mutex(void)
103 for (i
= 0; i
< __TEST_MTX_LAST
; i
++) {
104 ret
= __test_mutex(i
);
112 static int test_aa(void)
114 struct ww_mutex mutex
;
115 struct ww_acquire_ctx ctx
;
118 ww_mutex_init(&mutex
, &ww_class
);
119 ww_acquire_init(&ctx
, &ww_class
);
121 ww_mutex_lock(&mutex
, &ctx
);
123 if (ww_mutex_trylock(&mutex
)) {
124 pr_err("%s: trylocked itself!\n", __func__
);
125 ww_mutex_unlock(&mutex
);
130 ret
= ww_mutex_lock(&mutex
, &ctx
);
131 if (ret
!= -EALREADY
) {
132 pr_err("%s: missed deadlock for recursing, ret=%d\n",
135 ww_mutex_unlock(&mutex
);
142 ww_mutex_unlock(&mutex
);
143 ww_acquire_fini(&ctx
);
148 struct work_struct work
;
149 struct ww_mutex a_mutex
;
150 struct ww_mutex b_mutex
;
151 struct completion a_ready
;
152 struct completion b_ready
;
157 static void test_abba_work(struct work_struct
*work
)
159 struct test_abba
*abba
= container_of(work
, typeof(*abba
), work
);
160 struct ww_acquire_ctx ctx
;
163 ww_acquire_init(&ctx
, &ww_class
);
164 ww_mutex_lock(&abba
->b_mutex
, &ctx
);
166 complete(&abba
->b_ready
);
167 wait_for_completion(&abba
->a_ready
);
169 err
= ww_mutex_lock(&abba
->a_mutex
, &ctx
);
170 if (abba
->resolve
&& err
== -EDEADLK
) {
171 ww_mutex_unlock(&abba
->b_mutex
);
172 ww_mutex_lock_slow(&abba
->a_mutex
, &ctx
);
173 err
= ww_mutex_lock(&abba
->b_mutex
, &ctx
);
177 ww_mutex_unlock(&abba
->a_mutex
);
178 ww_mutex_unlock(&abba
->b_mutex
);
179 ww_acquire_fini(&ctx
);
184 static int test_abba(bool resolve
)
186 struct test_abba abba
;
187 struct ww_acquire_ctx ctx
;
190 ww_mutex_init(&abba
.a_mutex
, &ww_class
);
191 ww_mutex_init(&abba
.b_mutex
, &ww_class
);
192 INIT_WORK_ONSTACK(&abba
.work
, test_abba_work
);
193 init_completion(&abba
.a_ready
);
194 init_completion(&abba
.b_ready
);
195 abba
.resolve
= resolve
;
197 schedule_work(&abba
.work
);
199 ww_acquire_init(&ctx
, &ww_class
);
200 ww_mutex_lock(&abba
.a_mutex
, &ctx
);
202 complete(&abba
.a_ready
);
203 wait_for_completion(&abba
.b_ready
);
205 err
= ww_mutex_lock(&abba
.b_mutex
, &ctx
);
206 if (resolve
&& err
== -EDEADLK
) {
207 ww_mutex_unlock(&abba
.a_mutex
);
208 ww_mutex_lock_slow(&abba
.b_mutex
, &ctx
);
209 err
= ww_mutex_lock(&abba
.a_mutex
, &ctx
);
213 ww_mutex_unlock(&abba
.b_mutex
);
214 ww_mutex_unlock(&abba
.a_mutex
);
215 ww_acquire_fini(&ctx
);
217 flush_work(&abba
.work
);
218 destroy_work_on_stack(&abba
.work
);
222 if (err
|| abba
.result
) {
223 pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
224 __func__
, err
, abba
.result
);
228 if (err
!= -EDEADLK
&& abba
.result
!= -EDEADLK
) {
229 pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
230 __func__
, err
, abba
.result
);
238 struct work_struct work
;
239 struct ww_mutex a_mutex
;
240 struct ww_mutex
*b_mutex
;
241 struct completion
*a_signal
;
242 struct completion b_signal
;
246 static void test_cycle_work(struct work_struct
*work
)
248 struct test_cycle
*cycle
= container_of(work
, typeof(*cycle
), work
);
249 struct ww_acquire_ctx ctx
;
252 ww_acquire_init(&ctx
, &ww_class
);
253 ww_mutex_lock(&cycle
->a_mutex
, &ctx
);
255 complete(cycle
->a_signal
);
256 wait_for_completion(&cycle
->b_signal
);
258 err
= ww_mutex_lock(cycle
->b_mutex
, &ctx
);
259 if (err
== -EDEADLK
) {
261 ww_mutex_unlock(&cycle
->a_mutex
);
262 ww_mutex_lock_slow(cycle
->b_mutex
, &ctx
);
263 erra
= ww_mutex_lock(&cycle
->a_mutex
, &ctx
);
267 ww_mutex_unlock(cycle
->b_mutex
);
269 ww_mutex_unlock(&cycle
->a_mutex
);
270 ww_acquire_fini(&ctx
);
272 cycle
->result
= err
?: erra
;
275 static int __test_cycle(unsigned int nthreads
)
277 struct test_cycle
*cycles
;
278 unsigned int n
, last
= nthreads
- 1;
281 cycles
= kmalloc_array(nthreads
, sizeof(*cycles
), GFP_KERNEL
);
285 for (n
= 0; n
< nthreads
; n
++) {
286 struct test_cycle
*cycle
= &cycles
[n
];
288 ww_mutex_init(&cycle
->a_mutex
, &ww_class
);
290 cycle
->b_mutex
= &cycles
[0].a_mutex
;
292 cycle
->b_mutex
= &cycles
[n
+ 1].a_mutex
;
295 cycle
->a_signal
= &cycles
[last
].b_signal
;
297 cycle
->a_signal
= &cycles
[n
- 1].b_signal
;
298 init_completion(&cycle
->b_signal
);
300 INIT_WORK(&cycle
->work
, test_cycle_work
);
304 for (n
= 0; n
< nthreads
; n
++)
305 queue_work(wq
, &cycles
[n
].work
);
310 for (n
= 0; n
< nthreads
; n
++) {
311 struct test_cycle
*cycle
= &cycles
[n
];
316 pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
317 n
, nthreads
, cycle
->result
);
322 for (n
= 0; n
< nthreads
; n
++)
323 ww_mutex_destroy(&cycles
[n
].a_mutex
);
328 static int test_cycle(unsigned int ncpus
)
333 for (n
= 2; n
<= ncpus
+ 1; n
++) {
334 ret
= __test_cycle(n
);
343 struct work_struct work
;
344 struct ww_mutex
*locks
;
345 unsigned long timeout
;
349 static int *get_random_order(int count
)
354 order
= kmalloc_array(count
, sizeof(*order
), GFP_KERNEL
);
358 for (n
= 0; n
< count
; n
++)
361 for (n
= count
- 1; n
> 1; n
--) {
362 r
= get_random_int() % (n
+ 1);
373 static void dummy_load(struct stress
*stress
)
375 usleep_range(1000, 2000);
378 static void stress_inorder_work(struct work_struct
*work
)
380 struct stress
*stress
= container_of(work
, typeof(*stress
), work
);
381 const int nlocks
= stress
->nlocks
;
382 struct ww_mutex
*locks
= stress
->locks
;
383 struct ww_acquire_ctx ctx
;
386 order
= get_random_order(nlocks
);
394 ww_acquire_init(&ctx
, &ww_class
);
397 for (n
= 0; n
< nlocks
; n
++) {
401 err
= ww_mutex_lock(&locks
[order
[n
]], &ctx
);
409 ww_mutex_unlock(&locks
[order
[contended
]]);
412 ww_mutex_unlock(&locks
[order
[n
]]);
414 if (err
== -EDEADLK
) {
415 ww_mutex_lock_slow(&locks
[order
[contended
]], &ctx
);
420 pr_err_once("stress (%s) failed with %d\n",
425 ww_acquire_fini(&ctx
);
426 } while (!time_after(jiffies
, stress
->timeout
));
432 struct reorder_lock
{
433 struct list_head link
;
434 struct ww_mutex
*lock
;
437 static void stress_reorder_work(struct work_struct
*work
)
439 struct stress
*stress
= container_of(work
, typeof(*stress
), work
);
441 struct ww_acquire_ctx ctx
;
442 struct reorder_lock
*ll
, *ln
;
446 order
= get_random_order(stress
->nlocks
);
450 for (n
= 0; n
< stress
->nlocks
; n
++) {
451 ll
= kmalloc(sizeof(*ll
), GFP_KERNEL
);
455 ll
->lock
= &stress
->locks
[order
[n
]];
456 list_add(&ll
->link
, &locks
);
462 ww_acquire_init(&ctx
, &ww_class
);
464 list_for_each_entry(ll
, &locks
, link
) {
465 err
= ww_mutex_lock(ll
->lock
, &ctx
);
470 list_for_each_entry_continue_reverse(ln
, &locks
, link
)
471 ww_mutex_unlock(ln
->lock
);
473 if (err
!= -EDEADLK
) {
474 pr_err_once("stress (%s) failed with %d\n",
479 ww_mutex_lock_slow(ll
->lock
, &ctx
);
480 list_move(&ll
->link
, &locks
); /* restarts iteration */
484 list_for_each_entry(ll
, &locks
, link
)
485 ww_mutex_unlock(ll
->lock
);
487 ww_acquire_fini(&ctx
);
488 } while (!time_after(jiffies
, stress
->timeout
));
491 list_for_each_entry_safe(ll
, ln
, &locks
, link
)
497 static void stress_one_work(struct work_struct
*work
)
499 struct stress
*stress
= container_of(work
, typeof(*stress
), work
);
500 const int nlocks
= stress
->nlocks
;
501 struct ww_mutex
*lock
= stress
->locks
+ (get_random_int() % nlocks
);
505 err
= ww_mutex_lock(lock
, NULL
);
508 ww_mutex_unlock(lock
);
510 pr_err_once("stress (%s) failed with %d\n",
514 } while (!time_after(jiffies
, stress
->timeout
));
519 #define STRESS_INORDER BIT(0)
520 #define STRESS_REORDER BIT(1)
521 #define STRESS_ONE BIT(2)
522 #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
524 static int stress(int nlocks
, int nthreads
, unsigned int flags
)
526 struct ww_mutex
*locks
;
529 locks
= kmalloc_array(nlocks
, sizeof(*locks
), GFP_KERNEL
);
533 for (n
= 0; n
< nlocks
; n
++)
534 ww_mutex_init(&locks
[n
], &ww_class
);
536 for (n
= 0; nthreads
; n
++) {
537 struct stress
*stress
;
538 void (*fn
)(struct work_struct
*work
);
543 if (flags
& STRESS_INORDER
)
544 fn
= stress_inorder_work
;
547 if (flags
& STRESS_REORDER
)
548 fn
= stress_reorder_work
;
551 if (flags
& STRESS_ONE
)
552 fn
= stress_one_work
;
559 stress
= kmalloc(sizeof(*stress
), GFP_KERNEL
);
563 INIT_WORK(&stress
->work
, fn
);
564 stress
->locks
= locks
;
565 stress
->nlocks
= nlocks
;
566 stress
->timeout
= jiffies
+ 2*HZ
;
568 queue_work(wq
, &stress
->work
);
574 for (n
= 0; n
< nlocks
; n
++)
575 ww_mutex_destroy(&locks
[n
]);
581 static int __init
test_ww_mutex_init(void)
583 int ncpus
= num_online_cpus();
586 wq
= alloc_workqueue("test-ww_mutex", WQ_UNBOUND
, 0);
598 ret
= test_abba(false);
602 ret
= test_abba(true);
606 ret
= test_cycle(ncpus
);
610 ret
= stress(16, 2*ncpus
, STRESS_INORDER
);
614 ret
= stress(16, 2*ncpus
, STRESS_REORDER
);
618 ret
= stress(4095, hweight32(STRESS_ALL
)*ncpus
, STRESS_ALL
);
625 static void __exit
test_ww_mutex_exit(void)
627 destroy_workqueue(wq
);
630 module_init(test_ww_mutex_init
);
631 module_exit(test_ww_mutex_exit
);
633 MODULE_LICENSE("GPL");
634 MODULE_AUTHOR("Intel Corporation");