2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/completion.h>
26 #include <linux/delay.h>
27 #include <linux/prime_numbers.h>
29 #include "../i915_selftest.h"
31 static int __i915_sw_fence_call
32 fence_notify(struct i915_sw_fence
*fence
, enum i915_sw_fence_notify state
)
39 /* Leave the fence for the caller to free it after testing */
46 static struct i915_sw_fence
*alloc_fence(void)
48 struct i915_sw_fence
*fence
;
50 fence
= kmalloc(sizeof(*fence
), GFP_KERNEL
);
54 i915_sw_fence_init(fence
, fence_notify
);
58 static void free_fence(struct i915_sw_fence
*fence
)
60 i915_sw_fence_fini(fence
);
64 static int __test_self(struct i915_sw_fence
*fence
)
66 if (i915_sw_fence_done(fence
))
69 i915_sw_fence_commit(fence
);
70 if (!i915_sw_fence_done(fence
))
73 i915_sw_fence_wait(fence
);
74 if (!i915_sw_fence_done(fence
))
80 static int test_self(void *arg
)
82 struct i915_sw_fence
*fence
;
85 /* Test i915_sw_fence signaling and completion testing */
86 fence
= alloc_fence();
90 ret
= __test_self(fence
);
96 static int test_dag(void *arg
)
98 struct i915_sw_fence
*A
, *B
, *C
;
101 /* Test detection of cycles within the i915_sw_fence graphs */
102 if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG
))
109 if (i915_sw_fence_await_sw_fence_gfp(A
, A
, GFP_KERNEL
) != -EINVAL
) {
110 pr_err("recursive cycle not detected (AA)\n");
120 i915_sw_fence_await_sw_fence_gfp(A
, B
, GFP_KERNEL
);
121 if (i915_sw_fence_await_sw_fence_gfp(B
, A
, GFP_KERNEL
) != -EINVAL
) {
122 pr_err("single depth cycle not detected (BAB)\n");
132 if (i915_sw_fence_await_sw_fence_gfp(B
, C
, GFP_KERNEL
) == -EINVAL
) {
133 pr_err("invalid cycle detected\n");
136 if (i915_sw_fence_await_sw_fence_gfp(C
, B
, GFP_KERNEL
) != -EINVAL
) {
137 pr_err("single depth cycle not detected (CBC)\n");
140 if (i915_sw_fence_await_sw_fence_gfp(C
, A
, GFP_KERNEL
) != -EINVAL
) {
141 pr_err("cycle not detected (BA, CB, AC)\n");
144 if (i915_sw_fence_await_sw_fence_gfp(A
, C
, GFP_KERNEL
) == -EINVAL
) {
145 pr_err("invalid cycle detected\n");
149 i915_sw_fence_commit(A
);
150 i915_sw_fence_commit(B
);
151 i915_sw_fence_commit(C
);
154 if (!i915_sw_fence_done(C
)) {
155 pr_err("fence C not done\n");
158 if (!i915_sw_fence_done(B
)) {
159 pr_err("fence B not done\n");
162 if (!i915_sw_fence_done(A
)) {
163 pr_err("fence A not done\n");
175 static int test_AB(void *arg
)
177 struct i915_sw_fence
*A
, *B
;
180 /* Test i915_sw_fence (A) waiting on an event source (B) */
190 ret
= i915_sw_fence_await_sw_fence_gfp(A
, B
, GFP_KERNEL
);
194 pr_err("Incorrectly reported fence A was complete before await\n");
200 i915_sw_fence_commit(A
);
201 if (i915_sw_fence_done(A
))
204 i915_sw_fence_commit(B
);
205 if (!i915_sw_fence_done(B
)) {
206 pr_err("Fence B is not done\n");
210 if (!i915_sw_fence_done(A
)) {
211 pr_err("Fence A is not done\n");
223 static int test_ABC(void *arg
)
225 struct i915_sw_fence
*A
, *B
, *C
;
228 /* Test a chain of fences, A waits on B who waits on C */
245 ret
= i915_sw_fence_await_sw_fence_gfp(A
, B
, GFP_KERNEL
);
249 pr_err("Incorrectly reported fence B was complete before await\n");
253 ret
= i915_sw_fence_await_sw_fence_gfp(B
, C
, GFP_KERNEL
);
257 pr_err("Incorrectly reported fence C was complete before await\n");
262 i915_sw_fence_commit(A
);
263 if (i915_sw_fence_done(A
)) {
264 pr_err("Fence A completed early\n");
268 i915_sw_fence_commit(B
);
269 if (i915_sw_fence_done(B
)) {
270 pr_err("Fence B completed early\n");
274 if (i915_sw_fence_done(A
)) {
275 pr_err("Fence A completed early (after signaling B)\n");
279 i915_sw_fence_commit(C
);
282 if (!i915_sw_fence_done(C
)) {
283 pr_err("Fence C not done\n");
286 if (!i915_sw_fence_done(B
)) {
287 pr_err("Fence B not done\n");
290 if (!i915_sw_fence_done(A
)) {
291 pr_err("Fence A not done\n");
303 static int test_AB_C(void *arg
)
305 struct i915_sw_fence
*A
, *B
, *C
;
308 /* Test multiple fences (AB) waiting on a single event (C) */
325 ret
= i915_sw_fence_await_sw_fence_gfp(A
, C
, GFP_KERNEL
);
333 ret
= i915_sw_fence_await_sw_fence_gfp(B
, C
, GFP_KERNEL
);
341 i915_sw_fence_commit(A
);
342 i915_sw_fence_commit(B
);
345 if (i915_sw_fence_done(A
)) {
346 pr_err("Fence A completed early\n");
350 if (i915_sw_fence_done(B
)) {
351 pr_err("Fence B completed early\n");
355 i915_sw_fence_commit(C
);
356 if (!i915_sw_fence_done(C
)) {
357 pr_err("Fence C not done\n");
361 if (!i915_sw_fence_done(B
)) {
362 pr_err("Fence B not done\n");
366 if (!i915_sw_fence_done(A
)) {
367 pr_err("Fence A not done\n");
380 static int test_C_AB(void *arg
)
382 struct i915_sw_fence
*A
, *B
, *C
;
385 /* Test multiple event sources (A,B) for a single fence (C) */
402 ret
= i915_sw_fence_await_sw_fence_gfp(C
, A
, GFP_KERNEL
);
410 ret
= i915_sw_fence_await_sw_fence_gfp(C
, B
, GFP_KERNEL
);
419 i915_sw_fence_commit(C
);
420 if (i915_sw_fence_done(C
))
423 i915_sw_fence_commit(A
);
424 i915_sw_fence_commit(B
);
426 if (!i915_sw_fence_done(A
)) {
427 pr_err("Fence A not done\n");
431 if (!i915_sw_fence_done(B
)) {
432 pr_err("Fence B not done\n");
436 if (!i915_sw_fence_done(C
)) {
437 pr_err("Fence C not done\n");
450 static int test_chain(void *arg
)
453 struct i915_sw_fence
**fences
;
456 /* Test a long chain of fences */
457 fences
= kmalloc_array(nfences
, sizeof(*fences
), GFP_KERNEL
);
461 for (i
= 0; i
< nfences
; i
++) {
462 fences
[i
] = alloc_fence();
470 ret
= i915_sw_fence_await_sw_fence_gfp(fences
[i
],
478 i915_sw_fence_commit(fences
[i
]);
483 for (i
= nfences
; --i
; ) {
484 if (i915_sw_fence_done(fences
[i
])) {
486 pr_err("Fence[%d] completed early\n", i
);
490 i915_sw_fence_commit(fences
[0]);
491 for (i
= 0; ret
== 0 && i
< nfences
; i
++) {
492 if (!i915_sw_fence_done(fences
[i
])) {
493 pr_err("Fence[%d] is not done\n", i
);
499 for (i
= 0; i
< nfences
; i
++)
500 free_fence(fences
[i
]);
506 struct work_struct work
;
507 struct completion started
;
508 struct i915_sw_fence
*in
, *out
;
512 static void task_ipc(struct work_struct
*work
)
514 struct task_ipc
*ipc
= container_of(work
, typeof(*ipc
), work
);
516 complete(&ipc
->started
);
518 i915_sw_fence_wait(ipc
->in
);
519 smp_store_mb(ipc
->value
, 1);
520 i915_sw_fence_commit(ipc
->out
);
523 static int test_ipc(void *arg
)
528 /* Test use of i915_sw_fence as an interprocess signaling mechanism */
529 ipc
.in
= alloc_fence();
532 ipc
.out
= alloc_fence();
538 /* use a completion to avoid chicken-and-egg testing */
539 init_completion(&ipc
.started
);
542 INIT_WORK_ONSTACK(&ipc
.work
, task_ipc
);
543 schedule_work(&ipc
.work
);
545 wait_for_completion(&ipc
.started
);
547 usleep_range(1000, 2000);
548 if (READ_ONCE(ipc
.value
)) {
549 pr_err("worker updated value before i915_sw_fence was signaled\n");
553 i915_sw_fence_commit(ipc
.in
);
554 i915_sw_fence_wait(ipc
.out
);
556 if (!READ_ONCE(ipc
.value
)) {
557 pr_err("worker signaled i915_sw_fence before value was posted\n");
561 flush_work(&ipc
.work
);
562 destroy_work_on_stack(&ipc
.work
);
569 static int test_timer(void *arg
)
571 unsigned long target
, delay
;
572 struct timed_fence tf
;
575 timed_fence_init(&tf
, target
= jiffies
);
576 if (!i915_sw_fence_done(&tf
.fence
)) {
577 pr_err("Fence with immediate expiration not signaled\n");
581 timed_fence_fini(&tf
);
583 for_each_prime_number(delay
, i915_selftest
.timeout_jiffies
/2) {
585 timed_fence_init(&tf
, target
= jiffies
+ delay
);
586 if (i915_sw_fence_done(&tf
.fence
)) {
587 pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay
);
592 i915_sw_fence_wait(&tf
.fence
);
595 if (!i915_sw_fence_done(&tf
.fence
)) {
596 pr_err("Fence not signaled after wait\n");
599 if (time_before(jiffies
, target
)) {
600 pr_err("Fence signaled too early, target=%lu, now=%lu\n",
605 timed_fence_fini(&tf
);
612 timed_fence_fini(&tf
);
616 static const char *mock_name(struct dma_fence
*fence
)
621 static const struct dma_fence_ops mock_fence_ops
= {
622 .get_driver_name
= mock_name
,
623 .get_timeline_name
= mock_name
,
626 static DEFINE_SPINLOCK(mock_fence_lock
);
628 static struct dma_fence
*alloc_dma_fence(void)
630 struct dma_fence
*dma
;
632 dma
= kmalloc(sizeof(*dma
), GFP_KERNEL
);
634 dma_fence_init(dma
, &mock_fence_ops
, &mock_fence_lock
, 0, 0);
639 static struct i915_sw_fence
*
640 wrap_dma_fence(struct dma_fence
*dma
, unsigned long delay
)
642 struct i915_sw_fence
*fence
;
645 fence
= alloc_fence();
647 return ERR_PTR(-ENOMEM
);
649 err
= i915_sw_fence_await_dma_fence(fence
, dma
, delay
, GFP_NOWAIT
);
650 i915_sw_fence_commit(fence
);
659 static int test_dma_fence(void *arg
)
661 struct i915_sw_fence
*timeout
= NULL
, *not = NULL
;
662 unsigned long delay
= i915_selftest
.timeout_jiffies
;
663 unsigned long end
, sleep
;
664 struct dma_fence
*dma
;
667 dma
= alloc_dma_fence();
671 timeout
= wrap_dma_fence(dma
, delay
);
672 if (IS_ERR(timeout
)) {
673 err
= PTR_ERR(timeout
);
677 not = wrap_dma_fence(dma
, 0);
684 if (i915_sw_fence_done(timeout
) || i915_sw_fence_done(not)) {
685 pr_err("Fences immediately signaled\n");
689 /* We round the timeout for the fence up to the next second */
690 end
= round_jiffies_up(jiffies
+ delay
);
692 sleep
= jiffies_to_usecs(delay
) / 3;
693 usleep_range(sleep
, 2 * sleep
);
694 if (time_after(jiffies
, end
)) {
695 pr_debug("Slept too long, delay=%lu, (target=%lu, now=%lu) skipping\n",
696 delay
, end
, jiffies
);
700 if (i915_sw_fence_done(timeout
) || i915_sw_fence_done(not)) {
701 pr_err("Fences signaled too early\n");
705 if (!wait_event_timeout(timeout
->wait
,
706 i915_sw_fence_done(timeout
),
707 2 * (end
- jiffies
) + 1)) {
708 pr_err("Timeout fence unsignaled!\n");
712 if (i915_sw_fence_done(not)) {
713 pr_err("No timeout fence signaled!\n");
718 dma_fence_signal(dma
);
720 if (!i915_sw_fence_done(timeout
) || !i915_sw_fence_done(not)) {
721 pr_err("Fences unsignaled\n");
732 dma_fence_signal(dma
);
733 if (!IS_ERR_OR_NULL(timeout
))
735 if (!IS_ERR_OR_NULL(not))
741 int i915_sw_fence_mock_selftests(void)
743 static const struct i915_subtest tests
[] = {
753 SUBTEST(test_dma_fence
),
756 return i915_subtests(tests
, NULL
);