1 /* SPDX-License-Identifier: MIT */
4 * Copyright © 2019 Intel Corporation
7 #include <linux/delay.h>
8 #include <linux/dma-fence.h>
9 #include <linux/kernel.h>
10 #include <linux/kthread.h>
11 #include <linux/sched/signal.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
17 static struct kmem_cache
*slab_fences
;
19 static struct mock_fence
{
20 struct dma_fence base
;
22 } *to_mock_fence(struct dma_fence
*f
) {
23 return container_of(f
, struct mock_fence
, base
);
26 static const char *mock_name(struct dma_fence
*f
)
31 static void mock_fence_release(struct dma_fence
*f
)
33 kmem_cache_free(slab_fences
, to_mock_fence(f
));
37 struct dma_fence_cb cb
;
38 struct task_struct
*task
;
41 static void mock_wakeup(struct dma_fence
*f
, struct dma_fence_cb
*cb
)
43 wake_up_process(container_of(cb
, struct wait_cb
, cb
)->task
);
46 static long mock_wait(struct dma_fence
*f
, bool intr
, long timeout
)
48 const int state
= intr
? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
;
49 struct wait_cb cb
= { .task
= current
};
51 if (dma_fence_add_callback(f
, &cb
.cb
, mock_wakeup
))
55 set_current_state(state
);
57 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &f
->flags
))
60 if (signal_pending_state(state
, current
))
63 timeout
= schedule_timeout(timeout
);
65 __set_current_state(TASK_RUNNING
);
67 if (!dma_fence_remove_callback(f
, &cb
.cb
))
70 if (signal_pending_state(state
, current
))
76 static const struct dma_fence_ops mock_ops
= {
77 .get_driver_name
= mock_name
,
78 .get_timeline_name
= mock_name
,
80 .release
= mock_fence_release
,
83 static struct dma_fence
*mock_fence(void)
87 f
= kmem_cache_alloc(slab_fences
, GFP_KERNEL
);
91 spin_lock_init(&f
->lock
);
92 dma_fence_init(&f
->base
, &mock_ops
, &f
->lock
, 0, 0);
97 static int sanitycheck(void *arg
)
111 static int test_signaling(void *arg
)
120 if (dma_fence_is_signaled(f
)) {
121 pr_err("Fence unexpectedly signaled on creation\n");
125 if (dma_fence_signal(f
)) {
126 pr_err("Fence reported being already signaled\n");
130 if (!dma_fence_is_signaled(f
)) {
131 pr_err("Fence not reporting signaled\n");
135 if (!dma_fence_signal(f
)) {
136 pr_err("Fence reported not being already signaled\n");
147 struct dma_fence_cb cb
;
151 static void simple_callback(struct dma_fence
*f
, struct dma_fence_cb
*cb
)
153 smp_store_mb(container_of(cb
, struct simple_cb
, cb
)->seen
, true);
156 static int test_add_callback(void *arg
)
158 struct simple_cb cb
= {};
166 if (dma_fence_add_callback(f
, &cb
.cb
, simple_callback
)) {
167 pr_err("Failed to add callback, fence already signaled!\n");
173 pr_err("Callback failed!\n");
183 static int test_late_add_callback(void *arg
)
185 struct simple_cb cb
= {};
195 if (!dma_fence_add_callback(f
, &cb
.cb
, simple_callback
)) {
196 pr_err("Added callback, but fence was already signaled!\n");
202 pr_err("Callback called after failed attachment !\n");
212 static int test_rm_callback(void *arg
)
214 struct simple_cb cb
= {};
222 if (dma_fence_add_callback(f
, &cb
.cb
, simple_callback
)) {
223 pr_err("Failed to add callback, fence already signaled!\n");
227 if (!dma_fence_remove_callback(f
, &cb
.cb
)) {
228 pr_err("Failed to remove callback!\n");
234 pr_err("Callback still signaled after removal!\n");
244 static int test_late_rm_callback(void *arg
)
246 struct simple_cb cb
= {};
254 if (dma_fence_add_callback(f
, &cb
.cb
, simple_callback
)) {
255 pr_err("Failed to add callback, fence already signaled!\n");
261 pr_err("Callback failed!\n");
265 if (dma_fence_remove_callback(f
, &cb
.cb
)) {
266 pr_err("Callback removal succeed after being executed!\n");
276 static int test_status(void *arg
)
285 if (dma_fence_get_status(f
)) {
286 pr_err("Fence unexpectedly has signaled status on creation\n");
291 if (!dma_fence_get_status(f
)) {
292 pr_err("Fence not reporting signaled status\n");
302 static int test_error(void *arg
)
311 dma_fence_set_error(f
, -EIO
);
313 if (dma_fence_get_status(f
)) {
314 pr_err("Fence unexpectedly has error status before signal\n");
319 if (dma_fence_get_status(f
) != -EIO
) {
320 pr_err("Fence not reporting error status, got %d\n",
321 dma_fence_get_status(f
));
331 static int test_wait(void *arg
)
340 if (dma_fence_wait_timeout(f
, false, 0) != -ETIME
) {
341 pr_err("Wait reported complete before being signaled\n");
347 if (dma_fence_wait_timeout(f
, false, 0) != 0) {
348 pr_err("Wait reported incomplete after being signaled\n");
360 struct timer_list timer
;
364 static void wait_timer(struct timer_list
*timer
)
366 struct wait_timer
*wt
= from_timer(wt
, timer
, timer
);
368 dma_fence_signal(wt
->f
);
371 static int test_wait_timeout(void *arg
)
373 struct wait_timer wt
;
376 timer_setup_on_stack(&wt
.timer
, wait_timer
, 0);
382 if (dma_fence_wait_timeout(wt
.f
, false, 1) != -ETIME
) {
383 pr_err("Wait reported complete before being signaled\n");
387 mod_timer(&wt
.timer
, jiffies
+ 1);
389 if (dma_fence_wait_timeout(wt
.f
, false, 2) == -ETIME
) {
390 if (timer_pending(&wt
.timer
)) {
391 pr_notice("Timer did not fire within the jiffie!\n");
392 err
= 0; /* not our fault! */
394 pr_err("Wait reported incomplete after timeout\n");
401 del_timer_sync(&wt
.timer
);
402 destroy_timer_on_stack(&wt
.timer
);
403 dma_fence_signal(wt
.f
);
408 static int test_stub(void *arg
)
410 struct dma_fence
*f
[64];
414 for (i
= 0; i
< ARRAY_SIZE(f
); i
++) {
415 f
[i
] = dma_fence_get_stub();
416 if (!dma_fence_is_signaled(f
[i
])) {
417 pr_err("Obtained unsignaled stub fence!\n");
429 /* Now off to the races! */
432 struct dma_fence __rcu
**fences
;
433 struct task_struct
*task
;
438 static void __wait_for_callbacks(struct dma_fence
*f
)
440 spin_lock_irq(f
->lock
);
441 spin_unlock_irq(f
->lock
);
444 static int thread_signal_callback(void *arg
)
446 const struct race_thread
*t
= arg
;
447 unsigned long pass
= 0;
448 unsigned long miss
= 0;
451 while (!err
&& !kthread_should_stop()) {
452 struct dma_fence
*f1
, *f2
;
461 rcu_assign_pointer(t
->fences
[t
->id
], f1
);
466 f2
= dma_fence_get_rcu_safe(&t
->fences
[!t
->id
]);
467 } while (!f2
&& !kthread_should_stop());
471 dma_fence_signal(f1
);
473 smp_store_mb(cb
.seen
, false);
474 if (!f2
|| dma_fence_add_callback(f2
, &cb
.cb
, simple_callback
))
475 miss
++, cb
.seen
= true;
478 dma_fence_signal(f1
);
481 dma_fence_wait(f2
, false);
482 __wait_for_callbacks(f2
);
485 if (!READ_ONCE(cb
.seen
)) {
486 pr_err("Callback not seen on thread %d, pass %lu (%lu misses), signaling %s add_callback; fence signaled? %s\n",
488 t
->before
? "before" : "after",
489 dma_fence_is_signaled(f2
) ? "yes" : "no");
495 rcu_assign_pointer(t
->fences
[t
->id
], NULL
);
503 pr_info("%s[%d] completed %lu passes, %lu misses\n",
504 __func__
, t
->id
, pass
, miss
);
508 static int race_signal_callback(void *arg
)
510 struct dma_fence __rcu
*f
[2] = {};
514 for (pass
= 0; !ret
&& pass
<= 1; pass
++) {
515 struct race_thread t
[2];
518 for (i
= 0; i
< ARRAY_SIZE(t
); i
++) {
522 t
[i
].task
= kthread_run(thread_signal_callback
, &t
[i
],
524 get_task_struct(t
[i
].task
);
529 for (i
= 0; i
< ARRAY_SIZE(t
); i
++) {
532 err
= kthread_stop(t
[i
].task
);
536 put_task_struct(t
[i
].task
);
545 static const struct subtest tests
[] = {
546 SUBTEST(sanitycheck
),
547 SUBTEST(test_signaling
),
548 SUBTEST(test_add_callback
),
549 SUBTEST(test_late_add_callback
),
550 SUBTEST(test_rm_callback
),
551 SUBTEST(test_late_rm_callback
),
552 SUBTEST(test_status
),
555 SUBTEST(test_wait_timeout
),
557 SUBTEST(race_signal_callback
),
561 pr_info("sizeof(dma_fence)=%zu\n", sizeof(struct dma_fence
));
563 slab_fences
= KMEM_CACHE(mock_fence
,
564 SLAB_TYPESAFE_BY_RCU
|
569 ret
= subtests(tests
, NULL
);
571 kmem_cache_destroy(slab_fences
);