Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / dma-buf / st-dma-fence.c
blobcf2ce3744ce6e9924175f5201bd0c69e268d19ff
1 /* SPDX-License-Identifier: MIT */
3 /*
4 * Copyright © 2019 Intel Corporation
5 */
7 #include <linux/delay.h>
8 #include <linux/dma-fence.h>
9 #include <linux/kernel.h>
10 #include <linux/kthread.h>
11 #include <linux/sched/signal.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
15 #include "selftest.h"
17 static struct kmem_cache *slab_fences;
19 static struct mock_fence {
20 struct dma_fence base;
21 struct spinlock lock;
22 } *to_mock_fence(struct dma_fence *f) {
23 return container_of(f, struct mock_fence, base);
26 static const char *mock_name(struct dma_fence *f)
28 return "mock";
31 static void mock_fence_release(struct dma_fence *f)
33 kmem_cache_free(slab_fences, to_mock_fence(f));
36 struct wait_cb {
37 struct dma_fence_cb cb;
38 struct task_struct *task;
41 static void mock_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
43 wake_up_process(container_of(cb, struct wait_cb, cb)->task);
46 static long mock_wait(struct dma_fence *f, bool intr, long timeout)
48 const int state = intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
49 struct wait_cb cb = { .task = current };
51 if (dma_fence_add_callback(f, &cb.cb, mock_wakeup))
52 return timeout;
54 while (timeout) {
55 set_current_state(state);
57 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
58 break;
60 if (signal_pending_state(state, current))
61 break;
63 timeout = schedule_timeout(timeout);
65 __set_current_state(TASK_RUNNING);
67 if (!dma_fence_remove_callback(f, &cb.cb))
68 return timeout;
70 if (signal_pending_state(state, current))
71 return -ERESTARTSYS;
73 return -ETIME;
76 static const struct dma_fence_ops mock_ops = {
77 .get_driver_name = mock_name,
78 .get_timeline_name = mock_name,
79 .wait = mock_wait,
80 .release = mock_fence_release,
83 static struct dma_fence *mock_fence(void)
85 struct mock_fence *f;
87 f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
88 if (!f)
89 return NULL;
91 spin_lock_init(&f->lock);
92 dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
94 return &f->base;
97 static int sanitycheck(void *arg)
99 struct dma_fence *f;
101 f = mock_fence();
102 if (!f)
103 return -ENOMEM;
105 dma_fence_enable_sw_signaling(f);
107 dma_fence_signal(f);
108 dma_fence_put(f);
110 return 0;
113 static int test_signaling(void *arg)
115 struct dma_fence *f;
116 int err = -EINVAL;
118 f = mock_fence();
119 if (!f)
120 return -ENOMEM;
122 dma_fence_enable_sw_signaling(f);
124 if (dma_fence_is_signaled(f)) {
125 pr_err("Fence unexpectedly signaled on creation\n");
126 goto err_free;
129 if (dma_fence_signal(f)) {
130 pr_err("Fence reported being already signaled\n");
131 goto err_free;
134 if (!dma_fence_is_signaled(f)) {
135 pr_err("Fence not reporting signaled\n");
136 goto err_free;
139 if (!dma_fence_signal(f)) {
140 pr_err("Fence reported not being already signaled\n");
141 goto err_free;
144 err = 0;
145 err_free:
146 dma_fence_put(f);
147 return err;
150 struct simple_cb {
151 struct dma_fence_cb cb;
152 bool seen;
155 static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb)
157 smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true);
160 static int test_add_callback(void *arg)
162 struct simple_cb cb = {};
163 struct dma_fence *f;
164 int err = -EINVAL;
166 f = mock_fence();
167 if (!f)
168 return -ENOMEM;
170 if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
171 pr_err("Failed to add callback, fence already signaled!\n");
172 goto err_free;
175 dma_fence_signal(f);
176 if (!cb.seen) {
177 pr_err("Callback failed!\n");
178 goto err_free;
181 err = 0;
182 err_free:
183 dma_fence_put(f);
184 return err;
187 static int test_late_add_callback(void *arg)
189 struct simple_cb cb = {};
190 struct dma_fence *f;
191 int err = -EINVAL;
193 f = mock_fence();
194 if (!f)
195 return -ENOMEM;
197 dma_fence_enable_sw_signaling(f);
199 dma_fence_signal(f);
201 if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) {
202 pr_err("Added callback, but fence was already signaled!\n");
203 goto err_free;
206 dma_fence_signal(f);
207 if (cb.seen) {
208 pr_err("Callback called after failed attachment !\n");
209 goto err_free;
212 err = 0;
213 err_free:
214 dma_fence_put(f);
215 return err;
218 static int test_rm_callback(void *arg)
220 struct simple_cb cb = {};
221 struct dma_fence *f;
222 int err = -EINVAL;
224 f = mock_fence();
225 if (!f)
226 return -ENOMEM;
228 if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
229 pr_err("Failed to add callback, fence already signaled!\n");
230 goto err_free;
233 if (!dma_fence_remove_callback(f, &cb.cb)) {
234 pr_err("Failed to remove callback!\n");
235 goto err_free;
238 dma_fence_signal(f);
239 if (cb.seen) {
240 pr_err("Callback still signaled after removal!\n");
241 goto err_free;
244 err = 0;
245 err_free:
246 dma_fence_put(f);
247 return err;
250 static int test_late_rm_callback(void *arg)
252 struct simple_cb cb = {};
253 struct dma_fence *f;
254 int err = -EINVAL;
256 f = mock_fence();
257 if (!f)
258 return -ENOMEM;
260 if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
261 pr_err("Failed to add callback, fence already signaled!\n");
262 goto err_free;
265 dma_fence_signal(f);
266 if (!cb.seen) {
267 pr_err("Callback failed!\n");
268 goto err_free;
271 if (dma_fence_remove_callback(f, &cb.cb)) {
272 pr_err("Callback removal succeed after being executed!\n");
273 goto err_free;
276 err = 0;
277 err_free:
278 dma_fence_put(f);
279 return err;
282 static int test_status(void *arg)
284 struct dma_fence *f;
285 int err = -EINVAL;
287 f = mock_fence();
288 if (!f)
289 return -ENOMEM;
291 dma_fence_enable_sw_signaling(f);
293 if (dma_fence_get_status(f)) {
294 pr_err("Fence unexpectedly has signaled status on creation\n");
295 goto err_free;
298 dma_fence_signal(f);
299 if (!dma_fence_get_status(f)) {
300 pr_err("Fence not reporting signaled status\n");
301 goto err_free;
304 err = 0;
305 err_free:
306 dma_fence_put(f);
307 return err;
310 static int test_error(void *arg)
312 struct dma_fence *f;
313 int err = -EINVAL;
315 f = mock_fence();
316 if (!f)
317 return -ENOMEM;
319 dma_fence_enable_sw_signaling(f);
321 dma_fence_set_error(f, -EIO);
323 if (dma_fence_get_status(f)) {
324 pr_err("Fence unexpectedly has error status before signal\n");
325 goto err_free;
328 dma_fence_signal(f);
329 if (dma_fence_get_status(f) != -EIO) {
330 pr_err("Fence not reporting error status, got %d\n",
331 dma_fence_get_status(f));
332 goto err_free;
335 err = 0;
336 err_free:
337 dma_fence_put(f);
338 return err;
341 static int test_wait(void *arg)
343 struct dma_fence *f;
344 int err = -EINVAL;
346 f = mock_fence();
347 if (!f)
348 return -ENOMEM;
350 dma_fence_enable_sw_signaling(f);
352 if (dma_fence_wait_timeout(f, false, 0) != -ETIME) {
353 pr_err("Wait reported complete before being signaled\n");
354 goto err_free;
357 dma_fence_signal(f);
359 if (dma_fence_wait_timeout(f, false, 0) != 0) {
360 pr_err("Wait reported incomplete after being signaled\n");
361 goto err_free;
364 err = 0;
365 err_free:
366 dma_fence_signal(f);
367 dma_fence_put(f);
368 return err;
371 struct wait_timer {
372 struct timer_list timer;
373 struct dma_fence *f;
376 static void wait_timer(struct timer_list *timer)
378 struct wait_timer *wt = from_timer(wt, timer, timer);
380 dma_fence_signal(wt->f);
383 static int test_wait_timeout(void *arg)
385 struct wait_timer wt;
386 int err = -EINVAL;
388 timer_setup_on_stack(&wt.timer, wait_timer, 0);
390 wt.f = mock_fence();
391 if (!wt.f)
392 return -ENOMEM;
394 dma_fence_enable_sw_signaling(wt.f);
396 if (dma_fence_wait_timeout(wt.f, false, 1) != -ETIME) {
397 pr_err("Wait reported complete before being signaled\n");
398 goto err_free;
401 mod_timer(&wt.timer, jiffies + 1);
403 if (dma_fence_wait_timeout(wt.f, false, 2) == -ETIME) {
404 if (timer_pending(&wt.timer)) {
405 pr_notice("Timer did not fire within the jiffy!\n");
406 err = 0; /* not our fault! */
407 } else {
408 pr_err("Wait reported incomplete after timeout\n");
410 goto err_free;
413 err = 0;
414 err_free:
415 del_timer_sync(&wt.timer);
416 destroy_timer_on_stack(&wt.timer);
417 dma_fence_signal(wt.f);
418 dma_fence_put(wt.f);
419 return err;
422 static int test_stub(void *arg)
424 struct dma_fence *f[64];
425 int err = -EINVAL;
426 int i;
428 for (i = 0; i < ARRAY_SIZE(f); i++) {
429 f[i] = dma_fence_get_stub();
430 if (!dma_fence_is_signaled(f[i])) {
431 pr_err("Obtained unsignaled stub fence!\n");
432 goto err;
436 err = 0;
437 err:
438 while (i--)
439 dma_fence_put(f[i]);
440 return err;
443 /* Now off to the races! */
445 struct race_thread {
446 struct dma_fence __rcu **fences;
447 struct task_struct *task;
448 bool before;
449 int id;
452 static void __wait_for_callbacks(struct dma_fence *f)
454 spin_lock_irq(f->lock);
455 spin_unlock_irq(f->lock);
458 static int thread_signal_callback(void *arg)
460 const struct race_thread *t = arg;
461 unsigned long pass = 0;
462 unsigned long miss = 0;
463 int err = 0;
465 while (!err && !kthread_should_stop()) {
466 struct dma_fence *f1, *f2;
467 struct simple_cb cb;
469 f1 = mock_fence();
470 if (!f1) {
471 err = -ENOMEM;
472 break;
475 dma_fence_enable_sw_signaling(f1);
477 rcu_assign_pointer(t->fences[t->id], f1);
478 smp_wmb();
480 rcu_read_lock();
481 do {
482 f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]);
483 } while (!f2 && !kthread_should_stop());
484 rcu_read_unlock();
486 if (t->before)
487 dma_fence_signal(f1);
489 smp_store_mb(cb.seen, false);
490 if (!f2 ||
491 dma_fence_add_callback(f2, &cb.cb, simple_callback)) {
492 miss++;
493 cb.seen = true;
496 if (!t->before)
497 dma_fence_signal(f1);
499 if (!cb.seen) {
500 dma_fence_wait(f2, false);
501 __wait_for_callbacks(f2);
504 if (!READ_ONCE(cb.seen)) {
505 pr_err("Callback not seen on thread %d, pass %lu (%lu misses), signaling %s add_callback; fence signaled? %s\n",
506 t->id, pass, miss,
507 t->before ? "before" : "after",
508 dma_fence_is_signaled(f2) ? "yes" : "no");
509 err = -EINVAL;
512 dma_fence_put(f2);
514 rcu_assign_pointer(t->fences[t->id], NULL);
515 smp_wmb();
517 dma_fence_put(f1);
519 pass++;
522 pr_info("%s[%d] completed %lu passes, %lu misses\n",
523 __func__, t->id, pass, miss);
524 return err;
527 static int race_signal_callback(void *arg)
529 struct dma_fence __rcu *f[2] = {};
530 int ret = 0;
531 int pass;
533 for (pass = 0; !ret && pass <= 1; pass++) {
534 struct race_thread t[2];
535 int i;
537 for (i = 0; i < ARRAY_SIZE(t); i++) {
538 t[i].fences = f;
539 t[i].id = i;
540 t[i].before = pass;
541 t[i].task = kthread_run(thread_signal_callback, &t[i],
542 "dma-fence:%d", i);
543 if (IS_ERR(t[i].task)) {
544 ret = PTR_ERR(t[i].task);
545 while (--i >= 0)
546 kthread_stop_put(t[i].task);
547 return ret;
549 get_task_struct(t[i].task);
552 msleep(50);
554 for (i = 0; i < ARRAY_SIZE(t); i++) {
555 int err;
557 err = kthread_stop_put(t[i].task);
558 if (err && !ret)
559 ret = err;
563 return ret;
566 int dma_fence(void)
568 static const struct subtest tests[] = {
569 SUBTEST(sanitycheck),
570 SUBTEST(test_signaling),
571 SUBTEST(test_add_callback),
572 SUBTEST(test_late_add_callback),
573 SUBTEST(test_rm_callback),
574 SUBTEST(test_late_rm_callback),
575 SUBTEST(test_status),
576 SUBTEST(test_error),
577 SUBTEST(test_wait),
578 SUBTEST(test_wait_timeout),
579 SUBTEST(test_stub),
580 SUBTEST(race_signal_callback),
582 int ret;
584 pr_info("sizeof(dma_fence)=%zu\n", sizeof(struct dma_fence));
586 slab_fences = KMEM_CACHE(mock_fence,
587 SLAB_TYPESAFE_BY_RCU |
588 SLAB_HWCACHE_ALIGN);
589 if (!slab_fences)
590 return -ENOMEM;
592 ret = subtests(tests, NULL);
594 kmem_cache_destroy(slab_fences);
596 return ret;