2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/prime_numbers.h>
27 #include "../i915_selftest.h"
29 #include "mock_context.h"
30 #include "mock_gem_device.h"
32 static int igt_add_request(void *arg
)
34 struct drm_i915_private
*i915
= arg
;
35 struct drm_i915_gem_request
*request
;
38 /* Basic preliminary test to create a request and let it loose! */
40 mutex_lock(&i915
->drm
.struct_mutex
);
41 request
= mock_request(i915
->engine
[RCS
],
47 i915_add_request(request
);
51 mutex_unlock(&i915
->drm
.struct_mutex
);
55 static int igt_wait_request(void *arg
)
57 const long T
= HZ
/ 4;
58 struct drm_i915_private
*i915
= arg
;
59 struct drm_i915_gem_request
*request
;
62 /* Submit a request, then wait upon it */
64 mutex_lock(&i915
->drm
.struct_mutex
);
65 request
= mock_request(i915
->engine
[RCS
], i915
->kernel_context
, T
);
71 if (i915_wait_request(request
, I915_WAIT_LOCKED
, 0) != -ETIME
) {
72 pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
76 if (i915_wait_request(request
, I915_WAIT_LOCKED
, T
) != -ETIME
) {
77 pr_err("request wait succeeded (expected timeout before submit!)\n");
81 if (i915_gem_request_completed(request
)) {
82 pr_err("request completed before submit!!\n");
86 i915_add_request(request
);
88 if (i915_wait_request(request
, I915_WAIT_LOCKED
, 0) != -ETIME
) {
89 pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
93 if (i915_gem_request_completed(request
)) {
94 pr_err("request completed immediately!\n");
98 if (i915_wait_request(request
, I915_WAIT_LOCKED
, T
/ 2) != -ETIME
) {
99 pr_err("request wait succeeded (expected timeout!)\n");
103 if (i915_wait_request(request
, I915_WAIT_LOCKED
, T
) == -ETIME
) {
104 pr_err("request wait timed out!\n");
108 if (!i915_gem_request_completed(request
)) {
109 pr_err("request not complete after waiting!\n");
113 if (i915_wait_request(request
, I915_WAIT_LOCKED
, T
) == -ETIME
) {
114 pr_err("request wait timed out when already complete!\n");
120 mock_device_flush(i915
);
121 mutex_unlock(&i915
->drm
.struct_mutex
);
125 static int igt_fence_wait(void *arg
)
127 const long T
= HZ
/ 4;
128 struct drm_i915_private
*i915
= arg
;
129 struct drm_i915_gem_request
*request
;
132 /* Submit a request, treat it as a fence and wait upon it */
134 mutex_lock(&i915
->drm
.struct_mutex
);
135 request
= mock_request(i915
->engine
[RCS
], i915
->kernel_context
, T
);
140 mutex_unlock(&i915
->drm
.struct_mutex
); /* safe as we are single user */
142 if (dma_fence_wait_timeout(&request
->fence
, false, T
) != -ETIME
) {
143 pr_err("fence wait success before submit (expected timeout)!\n");
147 mutex_lock(&i915
->drm
.struct_mutex
);
148 i915_add_request(request
);
149 mutex_unlock(&i915
->drm
.struct_mutex
);
151 if (dma_fence_is_signaled(&request
->fence
)) {
152 pr_err("fence signaled immediately!\n");
156 if (dma_fence_wait_timeout(&request
->fence
, false, T
/ 2) != -ETIME
) {
157 pr_err("fence wait success after submit (expected timeout)!\n");
161 if (dma_fence_wait_timeout(&request
->fence
, false, T
) <= 0) {
162 pr_err("fence wait timed out (expected success)!\n");
166 if (!dma_fence_is_signaled(&request
->fence
)) {
167 pr_err("fence unsignaled after waiting!\n");
171 if (dma_fence_wait_timeout(&request
->fence
, false, T
) <= 0) {
172 pr_err("fence wait timed out when complete (expected success)!\n");
178 mutex_lock(&i915
->drm
.struct_mutex
);
180 mock_device_flush(i915
);
181 mutex_unlock(&i915
->drm
.struct_mutex
);
185 static int igt_request_rewind(void *arg
)
187 struct drm_i915_private
*i915
= arg
;
188 struct drm_i915_gem_request
*request
, *vip
;
189 struct i915_gem_context
*ctx
[2];
192 mutex_lock(&i915
->drm
.struct_mutex
);
193 ctx
[0] = mock_context(i915
, "A");
194 request
= mock_request(i915
->engine
[RCS
], ctx
[0], 2 * HZ
);
200 i915_gem_request_get(request
);
201 i915_add_request(request
);
203 ctx
[1] = mock_context(i915
, "B");
204 vip
= mock_request(i915
->engine
[RCS
], ctx
[1], 0);
210 /* Simulate preemption by manual reordering */
211 if (!mock_cancel_request(request
)) {
212 pr_err("failed to cancel request (already executed)!\n");
213 i915_add_request(vip
);
216 i915_gem_request_get(vip
);
217 i915_add_request(vip
);
219 request
->engine
->submit_request(request
);
222 mutex_unlock(&i915
->drm
.struct_mutex
);
224 if (i915_wait_request(vip
, 0, HZ
) == -ETIME
) {
225 pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
226 vip
->global_seqno
, intel_engine_get_seqno(i915
->engine
[RCS
]));
230 if (i915_gem_request_completed(request
)) {
231 pr_err("low priority request already completed\n");
237 i915_gem_request_put(vip
);
238 mutex_lock(&i915
->drm
.struct_mutex
);
240 mock_context_close(ctx
[1]);
241 i915_gem_request_put(request
);
243 mock_context_close(ctx
[0]);
244 mock_device_flush(i915
);
245 mutex_unlock(&i915
->drm
.struct_mutex
);
249 int i915_gem_request_mock_selftests(void)
251 static const struct i915_subtest tests
[] = {
252 SUBTEST(igt_add_request
),
253 SUBTEST(igt_wait_request
),
254 SUBTEST(igt_fence_wait
),
255 SUBTEST(igt_request_rewind
),
257 struct drm_i915_private
*i915
;
260 i915
= mock_gem_device();
264 err
= i915_subtests(tests
, i915
);
265 drm_dev_unref(&i915
->drm
);
271 struct drm_i915_private
*i915
;
275 unsigned int reset_count
;
278 static int begin_live_test(struct live_test
*t
,
279 struct drm_i915_private
*i915
,
289 err
= i915_gem_wait_for_idle(i915
, I915_WAIT_LOCKED
);
291 pr_err("%s(%s): failed to idle before, with err=%d!",
296 i915
->gpu_error
.missed_irq_rings
= 0;
297 t
->reset_count
= i915_reset_count(&i915
->gpu_error
);
302 static int end_live_test(struct live_test
*t
)
304 struct drm_i915_private
*i915
= t
->i915
;
306 i915_gem_retire_requests(i915
);
308 if (wait_for(intel_engines_are_idle(i915
), 10)) {
309 pr_err("%s(%s): GPU not idle\n", t
->func
, t
->name
);
313 if (t
->reset_count
!= i915_reset_count(&i915
->gpu_error
)) {
314 pr_err("%s(%s): GPU was reset %d times!\n",
316 i915_reset_count(&i915
->gpu_error
) - t
->reset_count
);
320 if (i915
->gpu_error
.missed_irq_rings
) {
321 pr_err("%s(%s): Missed interrupts on engines %lx\n",
322 t
->func
, t
->name
, i915
->gpu_error
.missed_irq_rings
);
329 static int live_nop_request(void *arg
)
331 struct drm_i915_private
*i915
= arg
;
332 struct intel_engine_cs
*engine
;
337 /* Submit various sized batches of empty requests, to each engine
338 * (individually), and wait for the batch to complete. We can check
339 * the overhead of submitting requests to the hardware.
342 mutex_lock(&i915
->drm
.struct_mutex
);
344 for_each_engine(engine
, i915
, id
) {
345 IGT_TIMEOUT(end_time
);
346 struct drm_i915_gem_request
*request
;
347 unsigned long n
, prime
;
348 ktime_t times
[2] = {};
350 err
= begin_live_test(&t
, i915
, __func__
, engine
->name
);
354 for_each_prime_number_from(prime
, 1, 8192) {
355 times
[1] = ktime_get_raw();
357 for (n
= 0; n
< prime
; n
++) {
358 request
= i915_gem_request_alloc(engine
,
359 i915
->kernel_context
);
360 if (IS_ERR(request
)) {
361 err
= PTR_ERR(request
);
365 /* This space is left intentionally blank.
367 * We do not actually want to perform any
368 * action with this request, we just want
369 * to measure the latency in allocation
370 * and submission of our breadcrumbs -
371 * ensuring that the bare request is sufficient
372 * for the system to work (i.e. proper HEAD
373 * tracking of the rings, interrupt handling,
374 * etc). It also gives us the lowest bounds
378 i915_add_request(request
);
380 i915_wait_request(request
,
382 MAX_SCHEDULE_TIMEOUT
);
384 times
[1] = ktime_sub(ktime_get_raw(), times
[1]);
388 if (__igt_timeout(end_time
, NULL
))
392 err
= end_live_test(&t
);
396 pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
398 ktime_to_ns(times
[0]),
399 prime
, div64_u64(ktime_to_ns(times
[1]), prime
));
403 mutex_unlock(&i915
->drm
.struct_mutex
);
407 static struct i915_vma
*empty_batch(struct drm_i915_private
*i915
)
409 struct drm_i915_gem_object
*obj
;
410 struct i915_vma
*vma
;
414 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
416 return ERR_CAST(obj
);
418 cmd
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
424 *cmd
= MI_BATCH_BUFFER_END
;
425 i915_gem_chipset_flush(i915
);
427 i915_gem_object_unpin_map(obj
);
429 err
= i915_gem_object_set_to_gtt_domain(obj
, false);
433 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
439 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
| PIN_GLOBAL
);
446 i915_gem_object_put(obj
);
450 static struct drm_i915_gem_request
*
451 empty_request(struct intel_engine_cs
*engine
,
452 struct i915_vma
*batch
)
454 struct drm_i915_gem_request
*request
;
457 request
= i915_gem_request_alloc(engine
,
458 engine
->i915
->kernel_context
);
462 err
= engine
->emit_bb_start(request
,
465 I915_DISPATCH_SECURE
);
470 __i915_add_request(request
, err
== 0);
471 return err
? ERR_PTR(err
) : request
;
474 static int live_empty_request(void *arg
)
476 struct drm_i915_private
*i915
= arg
;
477 struct intel_engine_cs
*engine
;
479 struct i915_vma
*batch
;
483 /* Submit various sized batches of empty requests, to each engine
484 * (individually), and wait for the batch to complete. We can check
485 * the overhead of submitting requests to the hardware.
488 mutex_lock(&i915
->drm
.struct_mutex
);
490 batch
= empty_batch(i915
);
492 err
= PTR_ERR(batch
);
496 for_each_engine(engine
, i915
, id
) {
497 IGT_TIMEOUT(end_time
);
498 struct drm_i915_gem_request
*request
;
499 unsigned long n
, prime
;
500 ktime_t times
[2] = {};
502 err
= begin_live_test(&t
, i915
, __func__
, engine
->name
);
506 /* Warmup / preload */
507 request
= empty_request(engine
, batch
);
508 if (IS_ERR(request
)) {
509 err
= PTR_ERR(request
);
512 i915_wait_request(request
,
514 MAX_SCHEDULE_TIMEOUT
);
516 for_each_prime_number_from(prime
, 1, 8192) {
517 times
[1] = ktime_get_raw();
519 for (n
= 0; n
< prime
; n
++) {
520 request
= empty_request(engine
, batch
);
521 if (IS_ERR(request
)) {
522 err
= PTR_ERR(request
);
526 i915_wait_request(request
,
528 MAX_SCHEDULE_TIMEOUT
);
530 times
[1] = ktime_sub(ktime_get_raw(), times
[1]);
534 if (__igt_timeout(end_time
, NULL
))
538 err
= end_live_test(&t
);
542 pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
544 ktime_to_ns(times
[0]),
545 prime
, div64_u64(ktime_to_ns(times
[1]), prime
));
549 i915_vma_unpin(batch
);
552 mutex_unlock(&i915
->drm
.struct_mutex
);
556 static struct i915_vma
*recursive_batch(struct drm_i915_private
*i915
)
558 struct i915_gem_context
*ctx
= i915
->kernel_context
;
559 struct i915_address_space
*vm
= ctx
->ppgtt
? &ctx
->ppgtt
->base
: &i915
->ggtt
.base
;
560 struct drm_i915_gem_object
*obj
;
561 const int gen
= INTEL_GEN(i915
);
562 struct i915_vma
*vma
;
566 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
568 return ERR_CAST(obj
);
570 vma
= i915_vma_instance(obj
, vm
, NULL
);
576 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
);
580 err
= i915_gem_object_set_to_wc_domain(obj
, true);
584 cmd
= i915_gem_object_pin_map(obj
, I915_MAP_WC
);
591 *cmd
++ = MI_BATCH_BUFFER_START
| 1 << 8 | 1;
592 *cmd
++ = lower_32_bits(vma
->node
.start
);
593 *cmd
++ = upper_32_bits(vma
->node
.start
);
594 } else if (gen
>= 6) {
595 *cmd
++ = MI_BATCH_BUFFER_START
| 1 << 8;
596 *cmd
++ = lower_32_bits(vma
->node
.start
);
597 } else if (gen
>= 4) {
598 *cmd
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
;
599 *cmd
++ = lower_32_bits(vma
->node
.start
);
601 *cmd
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
| 1;
602 *cmd
++ = lower_32_bits(vma
->node
.start
);
604 *cmd
++ = MI_BATCH_BUFFER_END
; /* terminate early in case of error */
605 i915_gem_chipset_flush(i915
);
607 i915_gem_object_unpin_map(obj
);
612 i915_gem_object_put(obj
);
616 static int recursive_batch_resolve(struct i915_vma
*batch
)
620 cmd
= i915_gem_object_pin_map(batch
->obj
, I915_MAP_WC
);
624 *cmd
= MI_BATCH_BUFFER_END
;
625 i915_gem_chipset_flush(batch
->vm
->i915
);
627 i915_gem_object_unpin_map(batch
->obj
);
632 static int live_all_engines(void *arg
)
634 struct drm_i915_private
*i915
= arg
;
635 struct intel_engine_cs
*engine
;
636 struct drm_i915_gem_request
*request
[I915_NUM_ENGINES
];
637 struct i915_vma
*batch
;
642 /* Check we can submit requests to all engines simultaneously. We
643 * send a recursive batch to each engine - checking that we don't
644 * block doing so, and that they don't complete too soon.
647 mutex_lock(&i915
->drm
.struct_mutex
);
649 err
= begin_live_test(&t
, i915
, __func__
, "");
653 batch
= recursive_batch(i915
);
655 err
= PTR_ERR(batch
);
656 pr_err("%s: Unable to create batch, err=%d\n", __func__
, err
);
660 for_each_engine(engine
, i915
, id
) {
661 request
[id
] = i915_gem_request_alloc(engine
,
662 i915
->kernel_context
);
663 if (IS_ERR(request
[id
])) {
664 err
= PTR_ERR(request
[id
]);
665 pr_err("%s: Request allocation failed with err=%d\n",
670 err
= engine
->emit_bb_start(request
[id
],
675 request
[id
]->batch
= batch
;
677 if (!i915_gem_object_has_active_reference(batch
->obj
)) {
678 i915_gem_object_get(batch
->obj
);
679 i915_gem_object_set_active_reference(batch
->obj
);
682 i915_vma_move_to_active(batch
, request
[id
], 0);
683 i915_gem_request_get(request
[id
]);
684 i915_add_request(request
[id
]);
687 for_each_engine(engine
, i915
, id
) {
688 if (i915_gem_request_completed(request
[id
])) {
689 pr_err("%s(%s): request completed too early!\n",
690 __func__
, engine
->name
);
696 err
= recursive_batch_resolve(batch
);
698 pr_err("%s: failed to resolve batch, err=%d\n", __func__
, err
);
702 for_each_engine(engine
, i915
, id
) {
705 timeout
= i915_wait_request(request
[id
],
707 MAX_SCHEDULE_TIMEOUT
);
710 pr_err("%s: error waiting for request on %s, err=%d\n",
711 __func__
, engine
->name
, err
);
715 GEM_BUG_ON(!i915_gem_request_completed(request
[id
]));
716 i915_gem_request_put(request
[id
]);
720 err
= end_live_test(&t
);
723 for_each_engine(engine
, i915
, id
)
725 i915_gem_request_put(request
[id
]);
726 i915_vma_unpin(batch
);
729 mutex_unlock(&i915
->drm
.struct_mutex
);
733 static int live_sequential_engines(void *arg
)
735 struct drm_i915_private
*i915
= arg
;
736 struct drm_i915_gem_request
*request
[I915_NUM_ENGINES
] = {};
737 struct drm_i915_gem_request
*prev
= NULL
;
738 struct intel_engine_cs
*engine
;
743 /* Check we can submit requests to all engines sequentially, such
744 * that each successive request waits for the earlier ones. This
745 * tests that we don't execute requests out of order, even though
746 * they are running on independent engines.
749 mutex_lock(&i915
->drm
.struct_mutex
);
751 err
= begin_live_test(&t
, i915
, __func__
, "");
755 for_each_engine(engine
, i915
, id
) {
756 struct i915_vma
*batch
;
758 batch
= recursive_batch(i915
);
760 err
= PTR_ERR(batch
);
761 pr_err("%s: Unable to create batch for %s, err=%d\n",
762 __func__
, engine
->name
, err
);
766 request
[id
] = i915_gem_request_alloc(engine
,
767 i915
->kernel_context
);
768 if (IS_ERR(request
[id
])) {
769 err
= PTR_ERR(request
[id
]);
770 pr_err("%s: Request allocation failed for %s with err=%d\n",
771 __func__
, engine
->name
, err
);
776 err
= i915_gem_request_await_dma_fence(request
[id
],
779 i915_add_request(request
[id
]);
780 pr_err("%s: Request await failed for %s with err=%d\n",
781 __func__
, engine
->name
, err
);
786 err
= engine
->emit_bb_start(request
[id
],
791 request
[id
]->batch
= batch
;
793 i915_vma_move_to_active(batch
, request
[id
], 0);
794 i915_gem_object_set_active_reference(batch
->obj
);
797 i915_gem_request_get(request
[id
]);
798 i915_add_request(request
[id
]);
803 for_each_engine(engine
, i915
, id
) {
806 if (i915_gem_request_completed(request
[id
])) {
807 pr_err("%s(%s): request completed too early!\n",
808 __func__
, engine
->name
);
813 err
= recursive_batch_resolve(request
[id
]->batch
);
815 pr_err("%s: failed to resolve batch, err=%d\n",
820 timeout
= i915_wait_request(request
[id
],
822 MAX_SCHEDULE_TIMEOUT
);
825 pr_err("%s: error waiting for request on %s, err=%d\n",
826 __func__
, engine
->name
, err
);
830 GEM_BUG_ON(!i915_gem_request_completed(request
[id
]));
833 err
= end_live_test(&t
);
836 for_each_engine(engine
, i915
, id
) {
842 cmd
= i915_gem_object_pin_map(request
[id
]->batch
->obj
,
845 *cmd
= MI_BATCH_BUFFER_END
;
846 i915_gem_chipset_flush(i915
);
848 i915_gem_object_unpin_map(request
[id
]->batch
->obj
);
851 i915_vma_put(request
[id
]->batch
);
852 i915_gem_request_put(request
[id
]);
855 mutex_unlock(&i915
->drm
.struct_mutex
);
859 int i915_gem_request_live_selftests(struct drm_i915_private
*i915
)
861 static const struct i915_subtest tests
[] = {
862 SUBTEST(live_nop_request
),
863 SUBTEST(live_all_engines
),
864 SUBTEST(live_sequential_engines
),
865 SUBTEST(live_empty_request
),
867 return i915_subtests(tests
, i915
);