Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cris-mirror.git] / drivers / gpu / drm / i915 / selftests / i915_gem_request.c
blob647bf2bbd7993672d6b94042cca45d7877d4563c
1 /*
2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
25 #include <linux/prime_numbers.h>
27 #include "../i915_selftest.h"
29 #include "mock_context.h"
30 #include "mock_gem_device.h"
32 static int igt_add_request(void *arg)
34 struct drm_i915_private *i915 = arg;
35 struct drm_i915_gem_request *request;
36 int err = -ENOMEM;
38 /* Basic preliminary test to create a request and let it loose! */
40 mutex_lock(&i915->drm.struct_mutex);
41 request = mock_request(i915->engine[RCS],
42 i915->kernel_context,
43 HZ / 10);
44 if (!request)
45 goto out_unlock;
47 i915_add_request(request);
49 err = 0;
50 out_unlock:
51 mutex_unlock(&i915->drm.struct_mutex);
52 return err;
55 static int igt_wait_request(void *arg)
57 const long T = HZ / 4;
58 struct drm_i915_private *i915 = arg;
59 struct drm_i915_gem_request *request;
60 int err = -EINVAL;
62 /* Submit a request, then wait upon it */
64 mutex_lock(&i915->drm.struct_mutex);
65 request = mock_request(i915->engine[RCS], i915->kernel_context, T);
66 if (!request) {
67 err = -ENOMEM;
68 goto out_unlock;
71 if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
72 pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
73 goto out_unlock;
76 if (i915_wait_request(request, I915_WAIT_LOCKED, T) != -ETIME) {
77 pr_err("request wait succeeded (expected timeout before submit!)\n");
78 goto out_unlock;
81 if (i915_gem_request_completed(request)) {
82 pr_err("request completed before submit!!\n");
83 goto out_unlock;
86 i915_add_request(request);
88 if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
89 pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
90 goto out_unlock;
93 if (i915_gem_request_completed(request)) {
94 pr_err("request completed immediately!\n");
95 goto out_unlock;
98 if (i915_wait_request(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
99 pr_err("request wait succeeded (expected timeout!)\n");
100 goto out_unlock;
103 if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
104 pr_err("request wait timed out!\n");
105 goto out_unlock;
108 if (!i915_gem_request_completed(request)) {
109 pr_err("request not complete after waiting!\n");
110 goto out_unlock;
113 if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
114 pr_err("request wait timed out when already complete!\n");
115 goto out_unlock;
118 err = 0;
119 out_unlock:
120 mock_device_flush(i915);
121 mutex_unlock(&i915->drm.struct_mutex);
122 return err;
125 static int igt_fence_wait(void *arg)
127 const long T = HZ / 4;
128 struct drm_i915_private *i915 = arg;
129 struct drm_i915_gem_request *request;
130 int err = -EINVAL;
132 /* Submit a request, treat it as a fence and wait upon it */
134 mutex_lock(&i915->drm.struct_mutex);
135 request = mock_request(i915->engine[RCS], i915->kernel_context, T);
136 if (!request) {
137 err = -ENOMEM;
138 goto out_locked;
140 mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
142 if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
143 pr_err("fence wait success before submit (expected timeout)!\n");
144 goto out_device;
147 mutex_lock(&i915->drm.struct_mutex);
148 i915_add_request(request);
149 mutex_unlock(&i915->drm.struct_mutex);
151 if (dma_fence_is_signaled(&request->fence)) {
152 pr_err("fence signaled immediately!\n");
153 goto out_device;
156 if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
157 pr_err("fence wait success after submit (expected timeout)!\n");
158 goto out_device;
161 if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
162 pr_err("fence wait timed out (expected success)!\n");
163 goto out_device;
166 if (!dma_fence_is_signaled(&request->fence)) {
167 pr_err("fence unsignaled after waiting!\n");
168 goto out_device;
171 if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
172 pr_err("fence wait timed out when complete (expected success)!\n");
173 goto out_device;
176 err = 0;
177 out_device:
178 mutex_lock(&i915->drm.struct_mutex);
179 out_locked:
180 mock_device_flush(i915);
181 mutex_unlock(&i915->drm.struct_mutex);
182 return err;
185 static int igt_request_rewind(void *arg)
187 struct drm_i915_private *i915 = arg;
188 struct drm_i915_gem_request *request, *vip;
189 struct i915_gem_context *ctx[2];
190 int err = -EINVAL;
192 mutex_lock(&i915->drm.struct_mutex);
193 ctx[0] = mock_context(i915, "A");
194 request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
195 if (!request) {
196 err = -ENOMEM;
197 goto err_context_0;
200 i915_gem_request_get(request);
201 i915_add_request(request);
203 ctx[1] = mock_context(i915, "B");
204 vip = mock_request(i915->engine[RCS], ctx[1], 0);
205 if (!vip) {
206 err = -ENOMEM;
207 goto err_context_1;
210 /* Simulate preemption by manual reordering */
211 if (!mock_cancel_request(request)) {
212 pr_err("failed to cancel request (already executed)!\n");
213 i915_add_request(vip);
214 goto err_context_1;
216 i915_gem_request_get(vip);
217 i915_add_request(vip);
218 rcu_read_lock();
219 request->engine->submit_request(request);
220 rcu_read_unlock();
222 mutex_unlock(&i915->drm.struct_mutex);
224 if (i915_wait_request(vip, 0, HZ) == -ETIME) {
225 pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
226 vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
227 goto err;
230 if (i915_gem_request_completed(request)) {
231 pr_err("low priority request already completed\n");
232 goto err;
235 err = 0;
236 err:
237 i915_gem_request_put(vip);
238 mutex_lock(&i915->drm.struct_mutex);
239 err_context_1:
240 mock_context_close(ctx[1]);
241 i915_gem_request_put(request);
242 err_context_0:
243 mock_context_close(ctx[0]);
244 mock_device_flush(i915);
245 mutex_unlock(&i915->drm.struct_mutex);
246 return err;
249 int i915_gem_request_mock_selftests(void)
251 static const struct i915_subtest tests[] = {
252 SUBTEST(igt_add_request),
253 SUBTEST(igt_wait_request),
254 SUBTEST(igt_fence_wait),
255 SUBTEST(igt_request_rewind),
257 struct drm_i915_private *i915;
258 int err;
260 i915 = mock_gem_device();
261 if (!i915)
262 return -ENOMEM;
264 err = i915_subtests(tests, i915);
265 drm_dev_unref(&i915->drm);
267 return err;
270 struct live_test {
271 struct drm_i915_private *i915;
272 const char *func;
273 const char *name;
275 unsigned int reset_count;
278 static int begin_live_test(struct live_test *t,
279 struct drm_i915_private *i915,
280 const char *func,
281 const char *name)
283 int err;
285 t->i915 = i915;
286 t->func = func;
287 t->name = name;
289 err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
290 if (err) {
291 pr_err("%s(%s): failed to idle before, with err=%d!",
292 func, name, err);
293 return err;
296 i915->gpu_error.missed_irq_rings = 0;
297 t->reset_count = i915_reset_count(&i915->gpu_error);
299 return 0;
302 static int end_live_test(struct live_test *t)
304 struct drm_i915_private *i915 = t->i915;
306 i915_gem_retire_requests(i915);
308 if (wait_for(intel_engines_are_idle(i915), 10)) {
309 pr_err("%s(%s): GPU not idle\n", t->func, t->name);
310 return -EIO;
313 if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
314 pr_err("%s(%s): GPU was reset %d times!\n",
315 t->func, t->name,
316 i915_reset_count(&i915->gpu_error) - t->reset_count);
317 return -EIO;
320 if (i915->gpu_error.missed_irq_rings) {
321 pr_err("%s(%s): Missed interrupts on engines %lx\n",
322 t->func, t->name, i915->gpu_error.missed_irq_rings);
323 return -EIO;
326 return 0;
329 static int live_nop_request(void *arg)
331 struct drm_i915_private *i915 = arg;
332 struct intel_engine_cs *engine;
333 struct live_test t;
334 unsigned int id;
335 int err = -ENODEV;
337 /* Submit various sized batches of empty requests, to each engine
338 * (individually), and wait for the batch to complete. We can check
339 * the overhead of submitting requests to the hardware.
342 mutex_lock(&i915->drm.struct_mutex);
344 for_each_engine(engine, i915, id) {
345 IGT_TIMEOUT(end_time);
346 struct drm_i915_gem_request *request;
347 unsigned long n, prime;
348 ktime_t times[2] = {};
350 err = begin_live_test(&t, i915, __func__, engine->name);
351 if (err)
352 goto out_unlock;
354 for_each_prime_number_from(prime, 1, 8192) {
355 times[1] = ktime_get_raw();
357 for (n = 0; n < prime; n++) {
358 request = i915_gem_request_alloc(engine,
359 i915->kernel_context);
360 if (IS_ERR(request)) {
361 err = PTR_ERR(request);
362 goto out_unlock;
365 /* This space is left intentionally blank.
367 * We do not actually want to perform any
368 * action with this request, we just want
369 * to measure the latency in allocation
370 * and submission of our breadcrumbs -
371 * ensuring that the bare request is sufficient
372 * for the system to work (i.e. proper HEAD
373 * tracking of the rings, interrupt handling,
374 * etc). It also gives us the lowest bounds
375 * for latency.
378 i915_add_request(request);
380 i915_wait_request(request,
381 I915_WAIT_LOCKED,
382 MAX_SCHEDULE_TIMEOUT);
384 times[1] = ktime_sub(ktime_get_raw(), times[1]);
385 if (prime == 1)
386 times[0] = times[1];
388 if (__igt_timeout(end_time, NULL))
389 break;
392 err = end_live_test(&t);
393 if (err)
394 goto out_unlock;
396 pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
397 engine->name,
398 ktime_to_ns(times[0]),
399 prime, div64_u64(ktime_to_ns(times[1]), prime));
402 out_unlock:
403 mutex_unlock(&i915->drm.struct_mutex);
404 return err;
407 static struct i915_vma *empty_batch(struct drm_i915_private *i915)
409 struct drm_i915_gem_object *obj;
410 struct i915_vma *vma;
411 u32 *cmd;
412 int err;
414 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
415 if (IS_ERR(obj))
416 return ERR_CAST(obj);
418 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
419 if (IS_ERR(cmd)) {
420 err = PTR_ERR(cmd);
421 goto err;
424 *cmd = MI_BATCH_BUFFER_END;
425 i915_gem_chipset_flush(i915);
427 i915_gem_object_unpin_map(obj);
429 err = i915_gem_object_set_to_gtt_domain(obj, false);
430 if (err)
431 goto err;
433 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
434 if (IS_ERR(vma)) {
435 err = PTR_ERR(vma);
436 goto err;
439 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
440 if (err)
441 goto err;
443 return vma;
445 err:
446 i915_gem_object_put(obj);
447 return ERR_PTR(err);
450 static struct drm_i915_gem_request *
451 empty_request(struct intel_engine_cs *engine,
452 struct i915_vma *batch)
454 struct drm_i915_gem_request *request;
455 int err;
457 request = i915_gem_request_alloc(engine,
458 engine->i915->kernel_context);
459 if (IS_ERR(request))
460 return request;
462 err = engine->emit_bb_start(request,
463 batch->node.start,
464 batch->node.size,
465 I915_DISPATCH_SECURE);
466 if (err)
467 goto out_request;
469 out_request:
470 __i915_add_request(request, err == 0);
471 return err ? ERR_PTR(err) : request;
474 static int live_empty_request(void *arg)
476 struct drm_i915_private *i915 = arg;
477 struct intel_engine_cs *engine;
478 struct live_test t;
479 struct i915_vma *batch;
480 unsigned int id;
481 int err = 0;
483 /* Submit various sized batches of empty requests, to each engine
484 * (individually), and wait for the batch to complete. We can check
485 * the overhead of submitting requests to the hardware.
488 mutex_lock(&i915->drm.struct_mutex);
490 batch = empty_batch(i915);
491 if (IS_ERR(batch)) {
492 err = PTR_ERR(batch);
493 goto out_unlock;
496 for_each_engine(engine, i915, id) {
497 IGT_TIMEOUT(end_time);
498 struct drm_i915_gem_request *request;
499 unsigned long n, prime;
500 ktime_t times[2] = {};
502 err = begin_live_test(&t, i915, __func__, engine->name);
503 if (err)
504 goto out_batch;
506 /* Warmup / preload */
507 request = empty_request(engine, batch);
508 if (IS_ERR(request)) {
509 err = PTR_ERR(request);
510 goto out_batch;
512 i915_wait_request(request,
513 I915_WAIT_LOCKED,
514 MAX_SCHEDULE_TIMEOUT);
516 for_each_prime_number_from(prime, 1, 8192) {
517 times[1] = ktime_get_raw();
519 for (n = 0; n < prime; n++) {
520 request = empty_request(engine, batch);
521 if (IS_ERR(request)) {
522 err = PTR_ERR(request);
523 goto out_batch;
526 i915_wait_request(request,
527 I915_WAIT_LOCKED,
528 MAX_SCHEDULE_TIMEOUT);
530 times[1] = ktime_sub(ktime_get_raw(), times[1]);
531 if (prime == 1)
532 times[0] = times[1];
534 if (__igt_timeout(end_time, NULL))
535 break;
538 err = end_live_test(&t);
539 if (err)
540 goto out_batch;
542 pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
543 engine->name,
544 ktime_to_ns(times[0]),
545 prime, div64_u64(ktime_to_ns(times[1]), prime));
548 out_batch:
549 i915_vma_unpin(batch);
550 i915_vma_put(batch);
551 out_unlock:
552 mutex_unlock(&i915->drm.struct_mutex);
553 return err;
556 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
558 struct i915_gem_context *ctx = i915->kernel_context;
559 struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
560 struct drm_i915_gem_object *obj;
561 const int gen = INTEL_GEN(i915);
562 struct i915_vma *vma;
563 u32 *cmd;
564 int err;
566 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
567 if (IS_ERR(obj))
568 return ERR_CAST(obj);
570 vma = i915_vma_instance(obj, vm, NULL);
571 if (IS_ERR(vma)) {
572 err = PTR_ERR(vma);
573 goto err;
576 err = i915_vma_pin(vma, 0, 0, PIN_USER);
577 if (err)
578 goto err;
580 err = i915_gem_object_set_to_wc_domain(obj, true);
581 if (err)
582 goto err;
584 cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
585 if (IS_ERR(cmd)) {
586 err = PTR_ERR(cmd);
587 goto err;
590 if (gen >= 8) {
591 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
592 *cmd++ = lower_32_bits(vma->node.start);
593 *cmd++ = upper_32_bits(vma->node.start);
594 } else if (gen >= 6) {
595 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
596 *cmd++ = lower_32_bits(vma->node.start);
597 } else if (gen >= 4) {
598 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
599 *cmd++ = lower_32_bits(vma->node.start);
600 } else {
601 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1;
602 *cmd++ = lower_32_bits(vma->node.start);
604 *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
605 i915_gem_chipset_flush(i915);
607 i915_gem_object_unpin_map(obj);
609 return vma;
611 err:
612 i915_gem_object_put(obj);
613 return ERR_PTR(err);
616 static int recursive_batch_resolve(struct i915_vma *batch)
618 u32 *cmd;
620 cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
621 if (IS_ERR(cmd))
622 return PTR_ERR(cmd);
624 *cmd = MI_BATCH_BUFFER_END;
625 i915_gem_chipset_flush(batch->vm->i915);
627 i915_gem_object_unpin_map(batch->obj);
629 return 0;
632 static int live_all_engines(void *arg)
634 struct drm_i915_private *i915 = arg;
635 struct intel_engine_cs *engine;
636 struct drm_i915_gem_request *request[I915_NUM_ENGINES];
637 struct i915_vma *batch;
638 struct live_test t;
639 unsigned int id;
640 int err;
642 /* Check we can submit requests to all engines simultaneously. We
643 * send a recursive batch to each engine - checking that we don't
644 * block doing so, and that they don't complete too soon.
647 mutex_lock(&i915->drm.struct_mutex);
649 err = begin_live_test(&t, i915, __func__, "");
650 if (err)
651 goto out_unlock;
653 batch = recursive_batch(i915);
654 if (IS_ERR(batch)) {
655 err = PTR_ERR(batch);
656 pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
657 goto out_unlock;
660 for_each_engine(engine, i915, id) {
661 request[id] = i915_gem_request_alloc(engine,
662 i915->kernel_context);
663 if (IS_ERR(request[id])) {
664 err = PTR_ERR(request[id]);
665 pr_err("%s: Request allocation failed with err=%d\n",
666 __func__, err);
667 goto out_request;
670 err = engine->emit_bb_start(request[id],
671 batch->node.start,
672 batch->node.size,
674 GEM_BUG_ON(err);
675 request[id]->batch = batch;
677 if (!i915_gem_object_has_active_reference(batch->obj)) {
678 i915_gem_object_get(batch->obj);
679 i915_gem_object_set_active_reference(batch->obj);
682 i915_vma_move_to_active(batch, request[id], 0);
683 i915_gem_request_get(request[id]);
684 i915_add_request(request[id]);
687 for_each_engine(engine, i915, id) {
688 if (i915_gem_request_completed(request[id])) {
689 pr_err("%s(%s): request completed too early!\n",
690 __func__, engine->name);
691 err = -EINVAL;
692 goto out_request;
696 err = recursive_batch_resolve(batch);
697 if (err) {
698 pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
699 goto out_request;
702 for_each_engine(engine, i915, id) {
703 long timeout;
705 timeout = i915_wait_request(request[id],
706 I915_WAIT_LOCKED,
707 MAX_SCHEDULE_TIMEOUT);
708 if (timeout < 0) {
709 err = timeout;
710 pr_err("%s: error waiting for request on %s, err=%d\n",
711 __func__, engine->name, err);
712 goto out_request;
715 GEM_BUG_ON(!i915_gem_request_completed(request[id]));
716 i915_gem_request_put(request[id]);
717 request[id] = NULL;
720 err = end_live_test(&t);
722 out_request:
723 for_each_engine(engine, i915, id)
724 if (request[id])
725 i915_gem_request_put(request[id]);
726 i915_vma_unpin(batch);
727 i915_vma_put(batch);
728 out_unlock:
729 mutex_unlock(&i915->drm.struct_mutex);
730 return err;
733 static int live_sequential_engines(void *arg)
735 struct drm_i915_private *i915 = arg;
736 struct drm_i915_gem_request *request[I915_NUM_ENGINES] = {};
737 struct drm_i915_gem_request *prev = NULL;
738 struct intel_engine_cs *engine;
739 struct live_test t;
740 unsigned int id;
741 int err;
743 /* Check we can submit requests to all engines sequentially, such
744 * that each successive request waits for the earlier ones. This
745 * tests that we don't execute requests out of order, even though
746 * they are running on independent engines.
749 mutex_lock(&i915->drm.struct_mutex);
751 err = begin_live_test(&t, i915, __func__, "");
752 if (err)
753 goto out_unlock;
755 for_each_engine(engine, i915, id) {
756 struct i915_vma *batch;
758 batch = recursive_batch(i915);
759 if (IS_ERR(batch)) {
760 err = PTR_ERR(batch);
761 pr_err("%s: Unable to create batch for %s, err=%d\n",
762 __func__, engine->name, err);
763 goto out_unlock;
766 request[id] = i915_gem_request_alloc(engine,
767 i915->kernel_context);
768 if (IS_ERR(request[id])) {
769 err = PTR_ERR(request[id]);
770 pr_err("%s: Request allocation failed for %s with err=%d\n",
771 __func__, engine->name, err);
772 goto out_request;
775 if (prev) {
776 err = i915_gem_request_await_dma_fence(request[id],
777 &prev->fence);
778 if (err) {
779 i915_add_request(request[id]);
780 pr_err("%s: Request await failed for %s with err=%d\n",
781 __func__, engine->name, err);
782 goto out_request;
786 err = engine->emit_bb_start(request[id],
787 batch->node.start,
788 batch->node.size,
790 GEM_BUG_ON(err);
791 request[id]->batch = batch;
793 i915_vma_move_to_active(batch, request[id], 0);
794 i915_gem_object_set_active_reference(batch->obj);
795 i915_vma_get(batch);
797 i915_gem_request_get(request[id]);
798 i915_add_request(request[id]);
800 prev = request[id];
803 for_each_engine(engine, i915, id) {
804 long timeout;
806 if (i915_gem_request_completed(request[id])) {
807 pr_err("%s(%s): request completed too early!\n",
808 __func__, engine->name);
809 err = -EINVAL;
810 goto out_request;
813 err = recursive_batch_resolve(request[id]->batch);
814 if (err) {
815 pr_err("%s: failed to resolve batch, err=%d\n",
816 __func__, err);
817 goto out_request;
820 timeout = i915_wait_request(request[id],
821 I915_WAIT_LOCKED,
822 MAX_SCHEDULE_TIMEOUT);
823 if (timeout < 0) {
824 err = timeout;
825 pr_err("%s: error waiting for request on %s, err=%d\n",
826 __func__, engine->name, err);
827 goto out_request;
830 GEM_BUG_ON(!i915_gem_request_completed(request[id]));
833 err = end_live_test(&t);
835 out_request:
836 for_each_engine(engine, i915, id) {
837 u32 *cmd;
839 if (!request[id])
840 break;
842 cmd = i915_gem_object_pin_map(request[id]->batch->obj,
843 I915_MAP_WC);
844 if (!IS_ERR(cmd)) {
845 *cmd = MI_BATCH_BUFFER_END;
846 i915_gem_chipset_flush(i915);
848 i915_gem_object_unpin_map(request[id]->batch->obj);
851 i915_vma_put(request[id]->batch);
852 i915_gem_request_put(request[id]);
854 out_unlock:
855 mutex_unlock(&i915->drm.struct_mutex);
856 return err;
859 int i915_gem_request_live_selftests(struct drm_i915_private *i915)
861 static const struct i915_subtest tests[] = {
862 SUBTEST(live_nop_request),
863 SUBTEST(live_all_engines),
864 SUBTEST(live_sequential_engines),
865 SUBTEST(live_empty_request),
867 return i915_subtests(tests, i915);