1 /* SPDX-License-Identifier: MIT */
4 * Copyright © 2019 Intel Corporation
7 #include <linux/delay.h>
8 #include <linux/dma-fence.h>
9 #include <linux/kernel.h>
10 #include <linux/kthread.h>
11 #include <linux/sched/signal.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
17 static struct kmem_cache
*slab_fences
;
19 static struct mock_fence
{
20 struct dma_fence base
;
22 } *to_mock_fence(struct dma_fence
*f
) {
23 return container_of(f
, struct mock_fence
, base
);
26 static const char *mock_name(struct dma_fence
*f
)
31 static void mock_fence_release(struct dma_fence
*f
)
33 kmem_cache_free(slab_fences
, to_mock_fence(f
));
37 struct dma_fence_cb cb
;
38 struct task_struct
*task
;
41 static void mock_wakeup(struct dma_fence
*f
, struct dma_fence_cb
*cb
)
43 wake_up_process(container_of(cb
, struct wait_cb
, cb
)->task
);
46 static long mock_wait(struct dma_fence
*f
, bool intr
, long timeout
)
48 const int state
= intr
? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
;
49 struct wait_cb cb
= { .task
= current
};
51 if (dma_fence_add_callback(f
, &cb
.cb
, mock_wakeup
))
55 set_current_state(state
);
57 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &f
->flags
))
60 if (signal_pending_state(state
, current
))
63 timeout
= schedule_timeout(timeout
);
65 __set_current_state(TASK_RUNNING
);
67 if (!dma_fence_remove_callback(f
, &cb
.cb
))
70 if (signal_pending_state(state
, current
))
76 static const struct dma_fence_ops mock_ops
= {
77 .get_driver_name
= mock_name
,
78 .get_timeline_name
= mock_name
,
80 .release
= mock_fence_release
,
83 static struct dma_fence
*mock_fence(void)
87 f
= kmem_cache_alloc(slab_fences
, GFP_KERNEL
);
91 spin_lock_init(&f
->lock
);
92 dma_fence_init(&f
->base
, &mock_ops
, &f
->lock
, 0, 0);
97 static int sanitycheck(void *arg
)
105 dma_fence_enable_sw_signaling(f
);
113 static int test_signaling(void *arg
)
122 dma_fence_enable_sw_signaling(f
);
124 if (dma_fence_is_signaled(f
)) {
125 pr_err("Fence unexpectedly signaled on creation\n");
129 if (dma_fence_signal(f
)) {
130 pr_err("Fence reported being already signaled\n");
134 if (!dma_fence_is_signaled(f
)) {
135 pr_err("Fence not reporting signaled\n");
139 if (!dma_fence_signal(f
)) {
140 pr_err("Fence reported not being already signaled\n");
151 struct dma_fence_cb cb
;
155 static void simple_callback(struct dma_fence
*f
, struct dma_fence_cb
*cb
)
157 smp_store_mb(container_of(cb
, struct simple_cb
, cb
)->seen
, true);
160 static int test_add_callback(void *arg
)
162 struct simple_cb cb
= {};
170 if (dma_fence_add_callback(f
, &cb
.cb
, simple_callback
)) {
171 pr_err("Failed to add callback, fence already signaled!\n");
177 pr_err("Callback failed!\n");
187 static int test_late_add_callback(void *arg
)
189 struct simple_cb cb
= {};
197 dma_fence_enable_sw_signaling(f
);
201 if (!dma_fence_add_callback(f
, &cb
.cb
, simple_callback
)) {
202 pr_err("Added callback, but fence was already signaled!\n");
208 pr_err("Callback called after failed attachment !\n");
218 static int test_rm_callback(void *arg
)
220 struct simple_cb cb
= {};
228 if (dma_fence_add_callback(f
, &cb
.cb
, simple_callback
)) {
229 pr_err("Failed to add callback, fence already signaled!\n");
233 if (!dma_fence_remove_callback(f
, &cb
.cb
)) {
234 pr_err("Failed to remove callback!\n");
240 pr_err("Callback still signaled after removal!\n");
250 static int test_late_rm_callback(void *arg
)
252 struct simple_cb cb
= {};
260 if (dma_fence_add_callback(f
, &cb
.cb
, simple_callback
)) {
261 pr_err("Failed to add callback, fence already signaled!\n");
267 pr_err("Callback failed!\n");
271 if (dma_fence_remove_callback(f
, &cb
.cb
)) {
272 pr_err("Callback removal succeed after being executed!\n");
282 static int test_status(void *arg
)
291 dma_fence_enable_sw_signaling(f
);
293 if (dma_fence_get_status(f
)) {
294 pr_err("Fence unexpectedly has signaled status on creation\n");
299 if (!dma_fence_get_status(f
)) {
300 pr_err("Fence not reporting signaled status\n");
310 static int test_error(void *arg
)
319 dma_fence_enable_sw_signaling(f
);
321 dma_fence_set_error(f
, -EIO
);
323 if (dma_fence_get_status(f
)) {
324 pr_err("Fence unexpectedly has error status before signal\n");
329 if (dma_fence_get_status(f
) != -EIO
) {
330 pr_err("Fence not reporting error status, got %d\n",
331 dma_fence_get_status(f
));
341 static int test_wait(void *arg
)
350 dma_fence_enable_sw_signaling(f
);
352 if (dma_fence_wait_timeout(f
, false, 0) != -ETIME
) {
353 pr_err("Wait reported complete before being signaled\n");
359 if (dma_fence_wait_timeout(f
, false, 0) != 0) {
360 pr_err("Wait reported incomplete after being signaled\n");
372 struct timer_list timer
;
376 static void wait_timer(struct timer_list
*timer
)
378 struct wait_timer
*wt
= from_timer(wt
, timer
, timer
);
380 dma_fence_signal(wt
->f
);
383 static int test_wait_timeout(void *arg
)
385 struct wait_timer wt
;
388 timer_setup_on_stack(&wt
.timer
, wait_timer
, 0);
394 dma_fence_enable_sw_signaling(wt
.f
);
396 if (dma_fence_wait_timeout(wt
.f
, false, 1) != -ETIME
) {
397 pr_err("Wait reported complete before being signaled\n");
401 mod_timer(&wt
.timer
, jiffies
+ 1);
403 if (dma_fence_wait_timeout(wt
.f
, false, 2) == -ETIME
) {
404 if (timer_pending(&wt
.timer
)) {
405 pr_notice("Timer did not fire within the jiffy!\n");
406 err
= 0; /* not our fault! */
408 pr_err("Wait reported incomplete after timeout\n");
415 del_timer_sync(&wt
.timer
);
416 destroy_timer_on_stack(&wt
.timer
);
417 dma_fence_signal(wt
.f
);
422 static int test_stub(void *arg
)
424 struct dma_fence
*f
[64];
428 for (i
= 0; i
< ARRAY_SIZE(f
); i
++) {
429 f
[i
] = dma_fence_get_stub();
430 if (!dma_fence_is_signaled(f
[i
])) {
431 pr_err("Obtained unsignaled stub fence!\n");
443 /* Now off to the races! */
446 struct dma_fence __rcu
**fences
;
447 struct task_struct
*task
;
452 static void __wait_for_callbacks(struct dma_fence
*f
)
454 spin_lock_irq(f
->lock
);
455 spin_unlock_irq(f
->lock
);
458 static int thread_signal_callback(void *arg
)
460 const struct race_thread
*t
= arg
;
461 unsigned long pass
= 0;
462 unsigned long miss
= 0;
465 while (!err
&& !kthread_should_stop()) {
466 struct dma_fence
*f1
, *f2
;
475 dma_fence_enable_sw_signaling(f1
);
477 rcu_assign_pointer(t
->fences
[t
->id
], f1
);
482 f2
= dma_fence_get_rcu_safe(&t
->fences
[!t
->id
]);
483 } while (!f2
&& !kthread_should_stop());
487 dma_fence_signal(f1
);
489 smp_store_mb(cb
.seen
, false);
491 dma_fence_add_callback(f2
, &cb
.cb
, simple_callback
)) {
497 dma_fence_signal(f1
);
500 dma_fence_wait(f2
, false);
501 __wait_for_callbacks(f2
);
504 if (!READ_ONCE(cb
.seen
)) {
505 pr_err("Callback not seen on thread %d, pass %lu (%lu misses), signaling %s add_callback; fence signaled? %s\n",
507 t
->before
? "before" : "after",
508 dma_fence_is_signaled(f2
) ? "yes" : "no");
514 rcu_assign_pointer(t
->fences
[t
->id
], NULL
);
522 pr_info("%s[%d] completed %lu passes, %lu misses\n",
523 __func__
, t
->id
, pass
, miss
);
527 static int race_signal_callback(void *arg
)
529 struct dma_fence __rcu
*f
[2] = {};
533 for (pass
= 0; !ret
&& pass
<= 1; pass
++) {
534 struct race_thread t
[2];
537 for (i
= 0; i
< ARRAY_SIZE(t
); i
++) {
541 t
[i
].task
= kthread_run(thread_signal_callback
, &t
[i
],
543 if (IS_ERR(t
[i
].task
)) {
544 ret
= PTR_ERR(t
[i
].task
);
546 kthread_stop_put(t
[i
].task
);
549 get_task_struct(t
[i
].task
);
554 for (i
= 0; i
< ARRAY_SIZE(t
); i
++) {
557 err
= kthread_stop_put(t
[i
].task
);
568 static const struct subtest tests
[] = {
569 SUBTEST(sanitycheck
),
570 SUBTEST(test_signaling
),
571 SUBTEST(test_add_callback
),
572 SUBTEST(test_late_add_callback
),
573 SUBTEST(test_rm_callback
),
574 SUBTEST(test_late_rm_callback
),
575 SUBTEST(test_status
),
578 SUBTEST(test_wait_timeout
),
580 SUBTEST(race_signal_callback
),
584 pr_info("sizeof(dma_fence)=%zu\n", sizeof(struct dma_fence
));
586 slab_fences
= KMEM_CACHE(mock_fence
,
587 SLAB_TYPESAFE_BY_RCU
|
592 ret
= subtests(tests
, NULL
);
594 kmem_cache_destroy(slab_fences
);