Merge branch 'x86/microcode' into x86/urgent, to pick up cleanup
[linux/fpc-iii.git] / kernel / locking / test-ww_mutex.c
blob6b7abb334ca6027dd2b189a211671fa98a33be82
1 /*
2 * Module-based API test facility for ww_mutexes
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
19 #include <linux/kernel.h>
21 #include <linux/completion.h>
22 #include <linux/delay.h>
23 #include <linux/kthread.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/ww_mutex.h>
29 static DEFINE_WW_CLASS(ww_class);
30 struct workqueue_struct *wq;
32 struct test_mutex {
33 struct work_struct work;
34 struct ww_mutex mutex;
35 struct completion ready, go, done;
36 unsigned int flags;
39 #define TEST_MTX_SPIN BIT(0)
40 #define TEST_MTX_TRY BIT(1)
41 #define TEST_MTX_CTX BIT(2)
42 #define __TEST_MTX_LAST BIT(3)
44 static void test_mutex_work(struct work_struct *work)
46 struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
48 complete(&mtx->ready);
49 wait_for_completion(&mtx->go);
51 if (mtx->flags & TEST_MTX_TRY) {
52 while (!ww_mutex_trylock(&mtx->mutex))
53 cond_resched();
54 } else {
55 ww_mutex_lock(&mtx->mutex, NULL);
57 complete(&mtx->done);
58 ww_mutex_unlock(&mtx->mutex);
61 static int __test_mutex(unsigned int flags)
63 #define TIMEOUT (HZ / 16)
64 struct test_mutex mtx;
65 struct ww_acquire_ctx ctx;
66 int ret;
68 ww_mutex_init(&mtx.mutex, &ww_class);
69 ww_acquire_init(&ctx, &ww_class);
71 INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
72 init_completion(&mtx.ready);
73 init_completion(&mtx.go);
74 init_completion(&mtx.done);
75 mtx.flags = flags;
77 schedule_work(&mtx.work);
79 wait_for_completion(&mtx.ready);
80 ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
81 complete(&mtx.go);
82 if (flags & TEST_MTX_SPIN) {
83 unsigned long timeout = jiffies + TIMEOUT;
85 ret = 0;
86 do {
87 if (completion_done(&mtx.done)) {
88 ret = -EINVAL;
89 break;
91 cond_resched();
92 } while (time_before(jiffies, timeout));
93 } else {
94 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
96 ww_mutex_unlock(&mtx.mutex);
97 ww_acquire_fini(&ctx);
99 if (ret) {
100 pr_err("%s(flags=%x): mutual exclusion failure\n",
101 __func__, flags);
102 ret = -EINVAL;
105 flush_work(&mtx.work);
106 destroy_work_on_stack(&mtx.work);
107 return ret;
108 #undef TIMEOUT
111 static int test_mutex(void)
113 int ret;
114 int i;
116 for (i = 0; i < __TEST_MTX_LAST; i++) {
117 ret = __test_mutex(i);
118 if (ret)
119 return ret;
122 return 0;
125 static int test_aa(void)
127 struct ww_mutex mutex;
128 struct ww_acquire_ctx ctx;
129 int ret;
131 ww_mutex_init(&mutex, &ww_class);
132 ww_acquire_init(&ctx, &ww_class);
134 ww_mutex_lock(&mutex, &ctx);
136 if (ww_mutex_trylock(&mutex)) {
137 pr_err("%s: trylocked itself!\n", __func__);
138 ww_mutex_unlock(&mutex);
139 ret = -EINVAL;
140 goto out;
143 ret = ww_mutex_lock(&mutex, &ctx);
144 if (ret != -EALREADY) {
145 pr_err("%s: missed deadlock for recursing, ret=%d\n",
146 __func__, ret);
147 if (!ret)
148 ww_mutex_unlock(&mutex);
149 ret = -EINVAL;
150 goto out;
153 ret = 0;
154 out:
155 ww_mutex_unlock(&mutex);
156 ww_acquire_fini(&ctx);
157 return ret;
160 struct test_abba {
161 struct work_struct work;
162 struct ww_mutex a_mutex;
163 struct ww_mutex b_mutex;
164 struct completion a_ready;
165 struct completion b_ready;
166 bool resolve;
167 int result;
170 static void test_abba_work(struct work_struct *work)
172 struct test_abba *abba = container_of(work, typeof(*abba), work);
173 struct ww_acquire_ctx ctx;
174 int err;
176 ww_acquire_init(&ctx, &ww_class);
177 ww_mutex_lock(&abba->b_mutex, &ctx);
179 complete(&abba->b_ready);
180 wait_for_completion(&abba->a_ready);
182 err = ww_mutex_lock(&abba->a_mutex, &ctx);
183 if (abba->resolve && err == -EDEADLK) {
184 ww_mutex_unlock(&abba->b_mutex);
185 ww_mutex_lock_slow(&abba->a_mutex, &ctx);
186 err = ww_mutex_lock(&abba->b_mutex, &ctx);
189 if (!err)
190 ww_mutex_unlock(&abba->a_mutex);
191 ww_mutex_unlock(&abba->b_mutex);
192 ww_acquire_fini(&ctx);
194 abba->result = err;
197 static int test_abba(bool resolve)
199 struct test_abba abba;
200 struct ww_acquire_ctx ctx;
201 int err, ret;
203 ww_mutex_init(&abba.a_mutex, &ww_class);
204 ww_mutex_init(&abba.b_mutex, &ww_class);
205 INIT_WORK_ONSTACK(&abba.work, test_abba_work);
206 init_completion(&abba.a_ready);
207 init_completion(&abba.b_ready);
208 abba.resolve = resolve;
210 schedule_work(&abba.work);
212 ww_acquire_init(&ctx, &ww_class);
213 ww_mutex_lock(&abba.a_mutex, &ctx);
215 complete(&abba.a_ready);
216 wait_for_completion(&abba.b_ready);
218 err = ww_mutex_lock(&abba.b_mutex, &ctx);
219 if (resolve && err == -EDEADLK) {
220 ww_mutex_unlock(&abba.a_mutex);
221 ww_mutex_lock_slow(&abba.b_mutex, &ctx);
222 err = ww_mutex_lock(&abba.a_mutex, &ctx);
225 if (!err)
226 ww_mutex_unlock(&abba.b_mutex);
227 ww_mutex_unlock(&abba.a_mutex);
228 ww_acquire_fini(&ctx);
230 flush_work(&abba.work);
231 destroy_work_on_stack(&abba.work);
233 ret = 0;
234 if (resolve) {
235 if (err || abba.result) {
236 pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
237 __func__, err, abba.result);
238 ret = -EINVAL;
240 } else {
241 if (err != -EDEADLK && abba.result != -EDEADLK) {
242 pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
243 __func__, err, abba.result);
244 ret = -EINVAL;
247 return ret;
250 struct test_cycle {
251 struct work_struct work;
252 struct ww_mutex a_mutex;
253 struct ww_mutex *b_mutex;
254 struct completion *a_signal;
255 struct completion b_signal;
256 int result;
259 static void test_cycle_work(struct work_struct *work)
261 struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
262 struct ww_acquire_ctx ctx;
263 int err;
265 ww_acquire_init(&ctx, &ww_class);
266 ww_mutex_lock(&cycle->a_mutex, &ctx);
268 complete(cycle->a_signal);
269 wait_for_completion(&cycle->b_signal);
271 err = ww_mutex_lock(cycle->b_mutex, &ctx);
272 if (err == -EDEADLK) {
273 ww_mutex_unlock(&cycle->a_mutex);
274 ww_mutex_lock_slow(cycle->b_mutex, &ctx);
275 err = ww_mutex_lock(&cycle->a_mutex, &ctx);
278 if (!err)
279 ww_mutex_unlock(cycle->b_mutex);
280 ww_mutex_unlock(&cycle->a_mutex);
281 ww_acquire_fini(&ctx);
283 cycle->result = err;
286 static int __test_cycle(unsigned int nthreads)
288 struct test_cycle *cycles;
289 unsigned int n, last = nthreads - 1;
290 int ret;
292 cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
293 if (!cycles)
294 return -ENOMEM;
296 for (n = 0; n < nthreads; n++) {
297 struct test_cycle *cycle = &cycles[n];
299 ww_mutex_init(&cycle->a_mutex, &ww_class);
300 if (n == last)
301 cycle->b_mutex = &cycles[0].a_mutex;
302 else
303 cycle->b_mutex = &cycles[n + 1].a_mutex;
305 if (n == 0)
306 cycle->a_signal = &cycles[last].b_signal;
307 else
308 cycle->a_signal = &cycles[n - 1].b_signal;
309 init_completion(&cycle->b_signal);
311 INIT_WORK(&cycle->work, test_cycle_work);
312 cycle->result = 0;
315 for (n = 0; n < nthreads; n++)
316 queue_work(wq, &cycles[n].work);
318 flush_workqueue(wq);
320 ret = 0;
321 for (n = 0; n < nthreads; n++) {
322 struct test_cycle *cycle = &cycles[n];
324 if (!cycle->result)
325 continue;
327 pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
328 n, nthreads, cycle->result);
329 ret = -EINVAL;
330 break;
333 for (n = 0; n < nthreads; n++)
334 ww_mutex_destroy(&cycles[n].a_mutex);
335 kfree(cycles);
336 return ret;
339 static int test_cycle(unsigned int ncpus)
341 unsigned int n;
342 int ret;
344 for (n = 2; n <= ncpus + 1; n++) {
345 ret = __test_cycle(n);
346 if (ret)
347 return ret;
350 return 0;
353 struct stress {
354 struct work_struct work;
355 struct ww_mutex *locks;
356 int nlocks;
357 int nloops;
360 static int *get_random_order(int count)
362 int *order;
363 int n, r, tmp;
365 order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY);
366 if (!order)
367 return order;
369 for (n = 0; n < count; n++)
370 order[n] = n;
372 for (n = count - 1; n > 1; n--) {
373 r = get_random_int() % (n + 1);
374 if (r != n) {
375 tmp = order[n];
376 order[n] = order[r];
377 order[r] = tmp;
381 return order;
384 static void dummy_load(struct stress *stress)
386 usleep_range(1000, 2000);
389 static void stress_inorder_work(struct work_struct *work)
391 struct stress *stress = container_of(work, typeof(*stress), work);
392 const int nlocks = stress->nlocks;
393 struct ww_mutex *locks = stress->locks;
394 struct ww_acquire_ctx ctx;
395 int *order;
397 order = get_random_order(nlocks);
398 if (!order)
399 return;
401 ww_acquire_init(&ctx, &ww_class);
403 do {
404 int contended = -1;
405 int n, err;
407 retry:
408 err = 0;
409 for (n = 0; n < nlocks; n++) {
410 if (n == contended)
411 continue;
413 err = ww_mutex_lock(&locks[order[n]], &ctx);
414 if (err < 0)
415 break;
417 if (!err)
418 dummy_load(stress);
420 if (contended > n)
421 ww_mutex_unlock(&locks[order[contended]]);
422 contended = n;
423 while (n--)
424 ww_mutex_unlock(&locks[order[n]]);
426 if (err == -EDEADLK) {
427 ww_mutex_lock_slow(&locks[order[contended]], &ctx);
428 goto retry;
431 if (err) {
432 pr_err_once("stress (%s) failed with %d\n",
433 __func__, err);
434 break;
436 } while (--stress->nloops);
438 ww_acquire_fini(&ctx);
440 kfree(order);
441 kfree(stress);
444 struct reorder_lock {
445 struct list_head link;
446 struct ww_mutex *lock;
449 static void stress_reorder_work(struct work_struct *work)
451 struct stress *stress = container_of(work, typeof(*stress), work);
452 LIST_HEAD(locks);
453 struct ww_acquire_ctx ctx;
454 struct reorder_lock *ll, *ln;
455 int *order;
456 int n, err;
458 order = get_random_order(stress->nlocks);
459 if (!order)
460 return;
462 for (n = 0; n < stress->nlocks; n++) {
463 ll = kmalloc(sizeof(*ll), GFP_KERNEL);
464 if (!ll)
465 goto out;
467 ll->lock = &stress->locks[order[n]];
468 list_add(&ll->link, &locks);
470 kfree(order);
471 order = NULL;
473 ww_acquire_init(&ctx, &ww_class);
475 do {
476 list_for_each_entry(ll, &locks, link) {
477 err = ww_mutex_lock(ll->lock, &ctx);
478 if (!err)
479 continue;
481 ln = ll;
482 list_for_each_entry_continue_reverse(ln, &locks, link)
483 ww_mutex_unlock(ln->lock);
485 if (err != -EDEADLK) {
486 pr_err_once("stress (%s) failed with %d\n",
487 __func__, err);
488 break;
491 ww_mutex_lock_slow(ll->lock, &ctx);
492 list_move(&ll->link, &locks); /* restarts iteration */
495 dummy_load(stress);
496 list_for_each_entry(ll, &locks, link)
497 ww_mutex_unlock(ll->lock);
498 } while (--stress->nloops);
500 ww_acquire_fini(&ctx);
502 out:
503 list_for_each_entry_safe(ll, ln, &locks, link)
504 kfree(ll);
505 kfree(order);
506 kfree(stress);
509 static void stress_one_work(struct work_struct *work)
511 struct stress *stress = container_of(work, typeof(*stress), work);
512 const int nlocks = stress->nlocks;
513 struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
514 int err;
516 do {
517 err = ww_mutex_lock(lock, NULL);
518 if (!err) {
519 dummy_load(stress);
520 ww_mutex_unlock(lock);
521 } else {
522 pr_err_once("stress (%s) failed with %d\n",
523 __func__, err);
524 break;
526 } while (--stress->nloops);
528 kfree(stress);
531 #define STRESS_INORDER BIT(0)
532 #define STRESS_REORDER BIT(1)
533 #define STRESS_ONE BIT(2)
534 #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
536 static int stress(int nlocks, int nthreads, int nloops, unsigned int flags)
538 struct ww_mutex *locks;
539 int n;
541 locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
542 if (!locks)
543 return -ENOMEM;
545 for (n = 0; n < nlocks; n++)
546 ww_mutex_init(&locks[n], &ww_class);
548 for (n = 0; nthreads; n++) {
549 struct stress *stress;
550 void (*fn)(struct work_struct *work);
552 fn = NULL;
553 switch (n & 3) {
554 case 0:
555 if (flags & STRESS_INORDER)
556 fn = stress_inorder_work;
557 break;
558 case 1:
559 if (flags & STRESS_REORDER)
560 fn = stress_reorder_work;
561 break;
562 case 2:
563 if (flags & STRESS_ONE)
564 fn = stress_one_work;
565 break;
568 if (!fn)
569 continue;
571 stress = kmalloc(sizeof(*stress), GFP_KERNEL);
572 if (!stress)
573 break;
575 INIT_WORK(&stress->work, fn);
576 stress->locks = locks;
577 stress->nlocks = nlocks;
578 stress->nloops = nloops;
580 queue_work(wq, &stress->work);
581 nthreads--;
584 flush_workqueue(wq);
586 for (n = 0; n < nlocks; n++)
587 ww_mutex_destroy(&locks[n]);
588 kfree(locks);
590 return 0;
593 static int __init test_ww_mutex_init(void)
595 int ncpus = num_online_cpus();
596 int ret;
598 wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
599 if (!wq)
600 return -ENOMEM;
602 ret = test_mutex();
603 if (ret)
604 return ret;
606 ret = test_aa();
607 if (ret)
608 return ret;
610 ret = test_abba(false);
611 if (ret)
612 return ret;
614 ret = test_abba(true);
615 if (ret)
616 return ret;
618 ret = test_cycle(ncpus);
619 if (ret)
620 return ret;
622 ret = stress(16, 2*ncpus, 1<<10, STRESS_INORDER);
623 if (ret)
624 return ret;
626 ret = stress(16, 2*ncpus, 1<<10, STRESS_REORDER);
627 if (ret)
628 return ret;
630 ret = stress(4095, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL);
631 if (ret)
632 return ret;
634 return 0;
637 static void __exit test_ww_mutex_exit(void)
639 destroy_workqueue(wq);
642 module_init(test_ww_mutex_init);
643 module_exit(test_ww_mutex_exit);
645 MODULE_LICENSE("GPL");
646 MODULE_AUTHOR("Intel Corporation");