Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / dma-buf / st-dma-fence.c
blobe593064341c8c2a973546eeab98acc8f16ef0f71
1 /* SPDX-License-Identifier: MIT */
3 /*
4 * Copyright © 2019 Intel Corporation
5 */
7 #include <linux/delay.h>
8 #include <linux/dma-fence.h>
9 #include <linux/kernel.h>
10 #include <linux/kthread.h>
11 #include <linux/sched/signal.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
15 #include "selftest.h"
17 static struct kmem_cache *slab_fences;
19 static struct mock_fence {
20 struct dma_fence base;
21 struct spinlock lock;
22 } *to_mock_fence(struct dma_fence *f) {
23 return container_of(f, struct mock_fence, base);
26 static const char *mock_name(struct dma_fence *f)
28 return "mock";
31 static void mock_fence_release(struct dma_fence *f)
33 kmem_cache_free(slab_fences, to_mock_fence(f));
36 struct wait_cb {
37 struct dma_fence_cb cb;
38 struct task_struct *task;
41 static void mock_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
43 wake_up_process(container_of(cb, struct wait_cb, cb)->task);
46 static long mock_wait(struct dma_fence *f, bool intr, long timeout)
48 const int state = intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
49 struct wait_cb cb = { .task = current };
51 if (dma_fence_add_callback(f, &cb.cb, mock_wakeup))
52 return timeout;
54 while (timeout) {
55 set_current_state(state);
57 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
58 break;
60 if (signal_pending_state(state, current))
61 break;
63 timeout = schedule_timeout(timeout);
65 __set_current_state(TASK_RUNNING);
67 if (!dma_fence_remove_callback(f, &cb.cb))
68 return timeout;
70 if (signal_pending_state(state, current))
71 return -ERESTARTSYS;
73 return -ETIME;
76 static const struct dma_fence_ops mock_ops = {
77 .get_driver_name = mock_name,
78 .get_timeline_name = mock_name,
79 .wait = mock_wait,
80 .release = mock_fence_release,
83 static struct dma_fence *mock_fence(void)
85 struct mock_fence *f;
87 f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
88 if (!f)
89 return NULL;
91 spin_lock_init(&f->lock);
92 dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
94 return &f->base;
97 static int sanitycheck(void *arg)
99 struct dma_fence *f;
101 f = mock_fence();
102 if (!f)
103 return -ENOMEM;
105 dma_fence_signal(f);
106 dma_fence_put(f);
108 return 0;
111 static int test_signaling(void *arg)
113 struct dma_fence *f;
114 int err = -EINVAL;
116 f = mock_fence();
117 if (!f)
118 return -ENOMEM;
120 if (dma_fence_is_signaled(f)) {
121 pr_err("Fence unexpectedly signaled on creation\n");
122 goto err_free;
125 if (dma_fence_signal(f)) {
126 pr_err("Fence reported being already signaled\n");
127 goto err_free;
130 if (!dma_fence_is_signaled(f)) {
131 pr_err("Fence not reporting signaled\n");
132 goto err_free;
135 if (!dma_fence_signal(f)) {
136 pr_err("Fence reported not being already signaled\n");
137 goto err_free;
140 err = 0;
141 err_free:
142 dma_fence_put(f);
143 return err;
146 struct simple_cb {
147 struct dma_fence_cb cb;
148 bool seen;
151 static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb)
153 smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true);
156 static int test_add_callback(void *arg)
158 struct simple_cb cb = {};
159 struct dma_fence *f;
160 int err = -EINVAL;
162 f = mock_fence();
163 if (!f)
164 return -ENOMEM;
166 if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
167 pr_err("Failed to add callback, fence already signaled!\n");
168 goto err_free;
171 dma_fence_signal(f);
172 if (!cb.seen) {
173 pr_err("Callback failed!\n");
174 goto err_free;
177 err = 0;
178 err_free:
179 dma_fence_put(f);
180 return err;
183 static int test_late_add_callback(void *arg)
185 struct simple_cb cb = {};
186 struct dma_fence *f;
187 int err = -EINVAL;
189 f = mock_fence();
190 if (!f)
191 return -ENOMEM;
193 dma_fence_signal(f);
195 if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) {
196 pr_err("Added callback, but fence was already signaled!\n");
197 goto err_free;
200 dma_fence_signal(f);
201 if (cb.seen) {
202 pr_err("Callback called after failed attachment !\n");
203 goto err_free;
206 err = 0;
207 err_free:
208 dma_fence_put(f);
209 return err;
212 static int test_rm_callback(void *arg)
214 struct simple_cb cb = {};
215 struct dma_fence *f;
216 int err = -EINVAL;
218 f = mock_fence();
219 if (!f)
220 return -ENOMEM;
222 if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
223 pr_err("Failed to add callback, fence already signaled!\n");
224 goto err_free;
227 if (!dma_fence_remove_callback(f, &cb.cb)) {
228 pr_err("Failed to remove callback!\n");
229 goto err_free;
232 dma_fence_signal(f);
233 if (cb.seen) {
234 pr_err("Callback still signaled after removal!\n");
235 goto err_free;
238 err = 0;
239 err_free:
240 dma_fence_put(f);
241 return err;
244 static int test_late_rm_callback(void *arg)
246 struct simple_cb cb = {};
247 struct dma_fence *f;
248 int err = -EINVAL;
250 f = mock_fence();
251 if (!f)
252 return -ENOMEM;
254 if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
255 pr_err("Failed to add callback, fence already signaled!\n");
256 goto err_free;
259 dma_fence_signal(f);
260 if (!cb.seen) {
261 pr_err("Callback failed!\n");
262 goto err_free;
265 if (dma_fence_remove_callback(f, &cb.cb)) {
266 pr_err("Callback removal succeed after being executed!\n");
267 goto err_free;
270 err = 0;
271 err_free:
272 dma_fence_put(f);
273 return err;
276 static int test_status(void *arg)
278 struct dma_fence *f;
279 int err = -EINVAL;
281 f = mock_fence();
282 if (!f)
283 return -ENOMEM;
285 if (dma_fence_get_status(f)) {
286 pr_err("Fence unexpectedly has signaled status on creation\n");
287 goto err_free;
290 dma_fence_signal(f);
291 if (!dma_fence_get_status(f)) {
292 pr_err("Fence not reporting signaled status\n");
293 goto err_free;
296 err = 0;
297 err_free:
298 dma_fence_put(f);
299 return err;
302 static int test_error(void *arg)
304 struct dma_fence *f;
305 int err = -EINVAL;
307 f = mock_fence();
308 if (!f)
309 return -ENOMEM;
311 dma_fence_set_error(f, -EIO);
313 if (dma_fence_get_status(f)) {
314 pr_err("Fence unexpectedly has error status before signal\n");
315 goto err_free;
318 dma_fence_signal(f);
319 if (dma_fence_get_status(f) != -EIO) {
320 pr_err("Fence not reporting error status, got %d\n",
321 dma_fence_get_status(f));
322 goto err_free;
325 err = 0;
326 err_free:
327 dma_fence_put(f);
328 return err;
331 static int test_wait(void *arg)
333 struct dma_fence *f;
334 int err = -EINVAL;
336 f = mock_fence();
337 if (!f)
338 return -ENOMEM;
340 if (dma_fence_wait_timeout(f, false, 0) != -ETIME) {
341 pr_err("Wait reported complete before being signaled\n");
342 goto err_free;
345 dma_fence_signal(f);
347 if (dma_fence_wait_timeout(f, false, 0) != 0) {
348 pr_err("Wait reported incomplete after being signaled\n");
349 goto err_free;
352 err = 0;
353 err_free:
354 dma_fence_signal(f);
355 dma_fence_put(f);
356 return err;
359 struct wait_timer {
360 struct timer_list timer;
361 struct dma_fence *f;
364 static void wait_timer(struct timer_list *timer)
366 struct wait_timer *wt = from_timer(wt, timer, timer);
368 dma_fence_signal(wt->f);
371 static int test_wait_timeout(void *arg)
373 struct wait_timer wt;
374 int err = -EINVAL;
376 timer_setup_on_stack(&wt.timer, wait_timer, 0);
378 wt.f = mock_fence();
379 if (!wt.f)
380 return -ENOMEM;
382 if (dma_fence_wait_timeout(wt.f, false, 1) != -ETIME) {
383 pr_err("Wait reported complete before being signaled\n");
384 goto err_free;
387 mod_timer(&wt.timer, jiffies + 1);
389 if (dma_fence_wait_timeout(wt.f, false, 2) == -ETIME) {
390 if (timer_pending(&wt.timer)) {
391 pr_notice("Timer did not fire within the jiffie!\n");
392 err = 0; /* not our fault! */
393 } else {
394 pr_err("Wait reported incomplete after timeout\n");
396 goto err_free;
399 err = 0;
400 err_free:
401 del_timer_sync(&wt.timer);
402 destroy_timer_on_stack(&wt.timer);
403 dma_fence_signal(wt.f);
404 dma_fence_put(wt.f);
405 return err;
408 static int test_stub(void *arg)
410 struct dma_fence *f[64];
411 int err = -EINVAL;
412 int i;
414 for (i = 0; i < ARRAY_SIZE(f); i++) {
415 f[i] = dma_fence_get_stub();
416 if (!dma_fence_is_signaled(f[i])) {
417 pr_err("Obtained unsignaled stub fence!\n");
418 goto err;
422 err = 0;
423 err:
424 while (i--)
425 dma_fence_put(f[i]);
426 return err;
429 /* Now off to the races! */
431 struct race_thread {
432 struct dma_fence __rcu **fences;
433 struct task_struct *task;
434 bool before;
435 int id;
438 static void __wait_for_callbacks(struct dma_fence *f)
440 spin_lock_irq(f->lock);
441 spin_unlock_irq(f->lock);
444 static int thread_signal_callback(void *arg)
446 const struct race_thread *t = arg;
447 unsigned long pass = 0;
448 unsigned long miss = 0;
449 int err = 0;
451 while (!err && !kthread_should_stop()) {
452 struct dma_fence *f1, *f2;
453 struct simple_cb cb;
455 f1 = mock_fence();
456 if (!f1) {
457 err = -ENOMEM;
458 break;
461 rcu_assign_pointer(t->fences[t->id], f1);
462 smp_wmb();
464 rcu_read_lock();
465 do {
466 f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]);
467 } while (!f2 && !kthread_should_stop());
468 rcu_read_unlock();
470 if (t->before)
471 dma_fence_signal(f1);
473 smp_store_mb(cb.seen, false);
474 if (!f2 || dma_fence_add_callback(f2, &cb.cb, simple_callback))
475 miss++, cb.seen = true;
477 if (!t->before)
478 dma_fence_signal(f1);
480 if (!cb.seen) {
481 dma_fence_wait(f2, false);
482 __wait_for_callbacks(f2);
485 if (!READ_ONCE(cb.seen)) {
486 pr_err("Callback not seen on thread %d, pass %lu (%lu misses), signaling %s add_callback; fence signaled? %s\n",
487 t->id, pass, miss,
488 t->before ? "before" : "after",
489 dma_fence_is_signaled(f2) ? "yes" : "no");
490 err = -EINVAL;
493 dma_fence_put(f2);
495 rcu_assign_pointer(t->fences[t->id], NULL);
496 smp_wmb();
498 dma_fence_put(f1);
500 pass++;
503 pr_info("%s[%d] completed %lu passes, %lu misses\n",
504 __func__, t->id, pass, miss);
505 return err;
508 static int race_signal_callback(void *arg)
510 struct dma_fence __rcu *f[2] = {};
511 int ret = 0;
512 int pass;
514 for (pass = 0; !ret && pass <= 1; pass++) {
515 struct race_thread t[2];
516 int i;
518 for (i = 0; i < ARRAY_SIZE(t); i++) {
519 t[i].fences = f;
520 t[i].id = i;
521 t[i].before = pass;
522 t[i].task = kthread_run(thread_signal_callback, &t[i],
523 "dma-fence:%d", i);
524 get_task_struct(t[i].task);
527 msleep(50);
529 for (i = 0; i < ARRAY_SIZE(t); i++) {
530 int err;
532 err = kthread_stop(t[i].task);
533 if (err && !ret)
534 ret = err;
536 put_task_struct(t[i].task);
540 return ret;
543 int dma_fence(void)
545 static const struct subtest tests[] = {
546 SUBTEST(sanitycheck),
547 SUBTEST(test_signaling),
548 SUBTEST(test_add_callback),
549 SUBTEST(test_late_add_callback),
550 SUBTEST(test_rm_callback),
551 SUBTEST(test_late_rm_callback),
552 SUBTEST(test_status),
553 SUBTEST(test_error),
554 SUBTEST(test_wait),
555 SUBTEST(test_wait_timeout),
556 SUBTEST(test_stub),
557 SUBTEST(race_signal_callback),
559 int ret;
561 pr_info("sizeof(dma_fence)=%zu\n", sizeof(struct dma_fence));
563 slab_fences = KMEM_CACHE(mock_fence,
564 SLAB_TYPESAFE_BY_RCU |
565 SLAB_HWCACHE_ALIGN);
566 if (!slab_fences)
567 return -ENOMEM;
569 ret = subtests(tests, NULL);
571 kmem_cache_destroy(slab_fences);
573 return ret;