2 * Module-based API test facility for ww_mutexes
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
19 #include <linux/kernel.h>
21 #include <linux/completion.h>
22 #include <linux/delay.h>
23 #include <linux/kthread.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/ww_mutex.h>
29 static DEFINE_WW_CLASS(ww_class
);
30 struct workqueue_struct
*wq
;
33 struct work_struct work
;
34 struct ww_mutex mutex
;
35 struct completion ready
, go
, done
;
39 #define TEST_MTX_SPIN BIT(0)
40 #define TEST_MTX_TRY BIT(1)
41 #define TEST_MTX_CTX BIT(2)
42 #define __TEST_MTX_LAST BIT(3)
44 static void test_mutex_work(struct work_struct
*work
)
46 struct test_mutex
*mtx
= container_of(work
, typeof(*mtx
), work
);
48 complete(&mtx
->ready
);
49 wait_for_completion(&mtx
->go
);
51 if (mtx
->flags
& TEST_MTX_TRY
) {
52 while (!ww_mutex_trylock(&mtx
->mutex
))
55 ww_mutex_lock(&mtx
->mutex
, NULL
);
58 ww_mutex_unlock(&mtx
->mutex
);
61 static int __test_mutex(unsigned int flags
)
63 #define TIMEOUT (HZ / 16)
64 struct test_mutex mtx
;
65 struct ww_acquire_ctx ctx
;
68 ww_mutex_init(&mtx
.mutex
, &ww_class
);
69 ww_acquire_init(&ctx
, &ww_class
);
71 INIT_WORK_ONSTACK(&mtx
.work
, test_mutex_work
);
72 init_completion(&mtx
.ready
);
73 init_completion(&mtx
.go
);
74 init_completion(&mtx
.done
);
77 schedule_work(&mtx
.work
);
79 wait_for_completion(&mtx
.ready
);
80 ww_mutex_lock(&mtx
.mutex
, (flags
& TEST_MTX_CTX
) ? &ctx
: NULL
);
82 if (flags
& TEST_MTX_SPIN
) {
83 unsigned long timeout
= jiffies
+ TIMEOUT
;
87 if (completion_done(&mtx
.done
)) {
92 } while (time_before(jiffies
, timeout
));
94 ret
= wait_for_completion_timeout(&mtx
.done
, TIMEOUT
);
96 ww_mutex_unlock(&mtx
.mutex
);
97 ww_acquire_fini(&ctx
);
100 pr_err("%s(flags=%x): mutual exclusion failure\n",
105 flush_work(&mtx
.work
);
106 destroy_work_on_stack(&mtx
.work
);
111 static int test_mutex(void)
116 for (i
= 0; i
< __TEST_MTX_LAST
; i
++) {
117 ret
= __test_mutex(i
);
125 static int test_aa(void)
127 struct ww_mutex mutex
;
128 struct ww_acquire_ctx ctx
;
131 ww_mutex_init(&mutex
, &ww_class
);
132 ww_acquire_init(&ctx
, &ww_class
);
134 ww_mutex_lock(&mutex
, &ctx
);
136 if (ww_mutex_trylock(&mutex
)) {
137 pr_err("%s: trylocked itself!\n", __func__
);
138 ww_mutex_unlock(&mutex
);
143 ret
= ww_mutex_lock(&mutex
, &ctx
);
144 if (ret
!= -EALREADY
) {
145 pr_err("%s: missed deadlock for recursing, ret=%d\n",
148 ww_mutex_unlock(&mutex
);
155 ww_mutex_unlock(&mutex
);
156 ww_acquire_fini(&ctx
);
161 struct work_struct work
;
162 struct ww_mutex a_mutex
;
163 struct ww_mutex b_mutex
;
164 struct completion a_ready
;
165 struct completion b_ready
;
170 static void test_abba_work(struct work_struct
*work
)
172 struct test_abba
*abba
= container_of(work
, typeof(*abba
), work
);
173 struct ww_acquire_ctx ctx
;
176 ww_acquire_init(&ctx
, &ww_class
);
177 ww_mutex_lock(&abba
->b_mutex
, &ctx
);
179 complete(&abba
->b_ready
);
180 wait_for_completion(&abba
->a_ready
);
182 err
= ww_mutex_lock(&abba
->a_mutex
, &ctx
);
183 if (abba
->resolve
&& err
== -EDEADLK
) {
184 ww_mutex_unlock(&abba
->b_mutex
);
185 ww_mutex_lock_slow(&abba
->a_mutex
, &ctx
);
186 err
= ww_mutex_lock(&abba
->b_mutex
, &ctx
);
190 ww_mutex_unlock(&abba
->a_mutex
);
191 ww_mutex_unlock(&abba
->b_mutex
);
192 ww_acquire_fini(&ctx
);
197 static int test_abba(bool resolve
)
199 struct test_abba abba
;
200 struct ww_acquire_ctx ctx
;
203 ww_mutex_init(&abba
.a_mutex
, &ww_class
);
204 ww_mutex_init(&abba
.b_mutex
, &ww_class
);
205 INIT_WORK_ONSTACK(&abba
.work
, test_abba_work
);
206 init_completion(&abba
.a_ready
);
207 init_completion(&abba
.b_ready
);
208 abba
.resolve
= resolve
;
210 schedule_work(&abba
.work
);
212 ww_acquire_init(&ctx
, &ww_class
);
213 ww_mutex_lock(&abba
.a_mutex
, &ctx
);
215 complete(&abba
.a_ready
);
216 wait_for_completion(&abba
.b_ready
);
218 err
= ww_mutex_lock(&abba
.b_mutex
, &ctx
);
219 if (resolve
&& err
== -EDEADLK
) {
220 ww_mutex_unlock(&abba
.a_mutex
);
221 ww_mutex_lock_slow(&abba
.b_mutex
, &ctx
);
222 err
= ww_mutex_lock(&abba
.a_mutex
, &ctx
);
226 ww_mutex_unlock(&abba
.b_mutex
);
227 ww_mutex_unlock(&abba
.a_mutex
);
228 ww_acquire_fini(&ctx
);
230 flush_work(&abba
.work
);
231 destroy_work_on_stack(&abba
.work
);
235 if (err
|| abba
.result
) {
236 pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
237 __func__
, err
, abba
.result
);
241 if (err
!= -EDEADLK
&& abba
.result
!= -EDEADLK
) {
242 pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
243 __func__
, err
, abba
.result
);
251 struct work_struct work
;
252 struct ww_mutex a_mutex
;
253 struct ww_mutex
*b_mutex
;
254 struct completion
*a_signal
;
255 struct completion b_signal
;
259 static void test_cycle_work(struct work_struct
*work
)
261 struct test_cycle
*cycle
= container_of(work
, typeof(*cycle
), work
);
262 struct ww_acquire_ctx ctx
;
265 ww_acquire_init(&ctx
, &ww_class
);
266 ww_mutex_lock(&cycle
->a_mutex
, &ctx
);
268 complete(cycle
->a_signal
);
269 wait_for_completion(&cycle
->b_signal
);
271 err
= ww_mutex_lock(cycle
->b_mutex
, &ctx
);
272 if (err
== -EDEADLK
) {
273 ww_mutex_unlock(&cycle
->a_mutex
);
274 ww_mutex_lock_slow(cycle
->b_mutex
, &ctx
);
275 err
= ww_mutex_lock(&cycle
->a_mutex
, &ctx
);
279 ww_mutex_unlock(cycle
->b_mutex
);
280 ww_mutex_unlock(&cycle
->a_mutex
);
281 ww_acquire_fini(&ctx
);
286 static int __test_cycle(unsigned int nthreads
)
288 struct test_cycle
*cycles
;
289 unsigned int n
, last
= nthreads
- 1;
292 cycles
= kmalloc_array(nthreads
, sizeof(*cycles
), GFP_KERNEL
);
296 for (n
= 0; n
< nthreads
; n
++) {
297 struct test_cycle
*cycle
= &cycles
[n
];
299 ww_mutex_init(&cycle
->a_mutex
, &ww_class
);
301 cycle
->b_mutex
= &cycles
[0].a_mutex
;
303 cycle
->b_mutex
= &cycles
[n
+ 1].a_mutex
;
306 cycle
->a_signal
= &cycles
[last
].b_signal
;
308 cycle
->a_signal
= &cycles
[n
- 1].b_signal
;
309 init_completion(&cycle
->b_signal
);
311 INIT_WORK(&cycle
->work
, test_cycle_work
);
315 for (n
= 0; n
< nthreads
; n
++)
316 queue_work(wq
, &cycles
[n
].work
);
321 for (n
= 0; n
< nthreads
; n
++) {
322 struct test_cycle
*cycle
= &cycles
[n
];
327 pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
328 n
, nthreads
, cycle
->result
);
333 for (n
= 0; n
< nthreads
; n
++)
334 ww_mutex_destroy(&cycles
[n
].a_mutex
);
339 static int test_cycle(unsigned int ncpus
)
344 for (n
= 2; n
<= ncpus
+ 1; n
++) {
345 ret
= __test_cycle(n
);
354 struct work_struct work
;
355 struct ww_mutex
*locks
;
356 unsigned long timeout
;
360 static int *get_random_order(int count
)
365 order
= kmalloc_array(count
, sizeof(*order
), GFP_KERNEL
);
369 for (n
= 0; n
< count
; n
++)
372 for (n
= count
- 1; n
> 1; n
--) {
373 r
= get_random_int() % (n
+ 1);
384 static void dummy_load(struct stress
*stress
)
386 usleep_range(1000, 2000);
389 static void stress_inorder_work(struct work_struct
*work
)
391 struct stress
*stress
= container_of(work
, typeof(*stress
), work
);
392 const int nlocks
= stress
->nlocks
;
393 struct ww_mutex
*locks
= stress
->locks
;
394 struct ww_acquire_ctx ctx
;
397 order
= get_random_order(nlocks
);
405 ww_acquire_init(&ctx
, &ww_class
);
408 for (n
= 0; n
< nlocks
; n
++) {
412 err
= ww_mutex_lock(&locks
[order
[n
]], &ctx
);
420 ww_mutex_unlock(&locks
[order
[contended
]]);
423 ww_mutex_unlock(&locks
[order
[n
]]);
425 if (err
== -EDEADLK
) {
426 ww_mutex_lock_slow(&locks
[order
[contended
]], &ctx
);
431 pr_err_once("stress (%s) failed with %d\n",
436 ww_acquire_fini(&ctx
);
437 } while (!time_after(jiffies
, stress
->timeout
));
443 struct reorder_lock
{
444 struct list_head link
;
445 struct ww_mutex
*lock
;
448 static void stress_reorder_work(struct work_struct
*work
)
450 struct stress
*stress
= container_of(work
, typeof(*stress
), work
);
452 struct ww_acquire_ctx ctx
;
453 struct reorder_lock
*ll
, *ln
;
457 order
= get_random_order(stress
->nlocks
);
461 for (n
= 0; n
< stress
->nlocks
; n
++) {
462 ll
= kmalloc(sizeof(*ll
), GFP_KERNEL
);
466 ll
->lock
= &stress
->locks
[order
[n
]];
467 list_add(&ll
->link
, &locks
);
473 ww_acquire_init(&ctx
, &ww_class
);
475 list_for_each_entry(ll
, &locks
, link
) {
476 err
= ww_mutex_lock(ll
->lock
, &ctx
);
481 list_for_each_entry_continue_reverse(ln
, &locks
, link
)
482 ww_mutex_unlock(ln
->lock
);
484 if (err
!= -EDEADLK
) {
485 pr_err_once("stress (%s) failed with %d\n",
490 ww_mutex_lock_slow(ll
->lock
, &ctx
);
491 list_move(&ll
->link
, &locks
); /* restarts iteration */
495 list_for_each_entry(ll
, &locks
, link
)
496 ww_mutex_unlock(ll
->lock
);
498 ww_acquire_fini(&ctx
);
499 } while (!time_after(jiffies
, stress
->timeout
));
502 list_for_each_entry_safe(ll
, ln
, &locks
, link
)
508 static void stress_one_work(struct work_struct
*work
)
510 struct stress
*stress
= container_of(work
, typeof(*stress
), work
);
511 const int nlocks
= stress
->nlocks
;
512 struct ww_mutex
*lock
= stress
->locks
+ (get_random_int() % nlocks
);
516 err
= ww_mutex_lock(lock
, NULL
);
519 ww_mutex_unlock(lock
);
521 pr_err_once("stress (%s) failed with %d\n",
525 } while (!time_after(jiffies
, stress
->timeout
));
530 #define STRESS_INORDER BIT(0)
531 #define STRESS_REORDER BIT(1)
532 #define STRESS_ONE BIT(2)
533 #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
535 static int stress(int nlocks
, int nthreads
, unsigned int flags
)
537 struct ww_mutex
*locks
;
540 locks
= kmalloc_array(nlocks
, sizeof(*locks
), GFP_KERNEL
);
544 for (n
= 0; n
< nlocks
; n
++)
545 ww_mutex_init(&locks
[n
], &ww_class
);
547 for (n
= 0; nthreads
; n
++) {
548 struct stress
*stress
;
549 void (*fn
)(struct work_struct
*work
);
554 if (flags
& STRESS_INORDER
)
555 fn
= stress_inorder_work
;
558 if (flags
& STRESS_REORDER
)
559 fn
= stress_reorder_work
;
562 if (flags
& STRESS_ONE
)
563 fn
= stress_one_work
;
570 stress
= kmalloc(sizeof(*stress
), GFP_KERNEL
);
574 INIT_WORK(&stress
->work
, fn
);
575 stress
->locks
= locks
;
576 stress
->nlocks
= nlocks
;
577 stress
->timeout
= jiffies
+ 2*HZ
;
579 queue_work(wq
, &stress
->work
);
585 for (n
= 0; n
< nlocks
; n
++)
586 ww_mutex_destroy(&locks
[n
]);
592 static int __init
test_ww_mutex_init(void)
594 int ncpus
= num_online_cpus();
597 wq
= alloc_workqueue("test-ww_mutex", WQ_UNBOUND
, 0);
609 ret
= test_abba(false);
613 ret
= test_abba(true);
617 ret
= test_cycle(ncpus
);
621 ret
= stress(16, 2*ncpus
, STRESS_INORDER
);
625 ret
= stress(16, 2*ncpus
, STRESS_REORDER
);
629 ret
= stress(4095, hweight32(STRESS_ALL
)*ncpus
, STRESS_ALL
);
636 static void __exit
test_ww_mutex_exit(void)
638 destroy_workqueue(wq
);
641 module_init(test_ww_mutex_init
);
642 module_exit(test_ww_mutex_exit
);
644 MODULE_LICENSE("GPL");
645 MODULE_AUTHOR("Intel Corporation");