Linux 4.19.133
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / selftests / i915_request.c
blobc4aac6141e04d0a217302122ebff9d8668de9ed8
1 /*
2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
25 #include <linux/prime_numbers.h>
27 #include "../i915_selftest.h"
29 #include "mock_context.h"
30 #include "mock_gem_device.h"
32 static int igt_add_request(void *arg)
34 struct drm_i915_private *i915 = arg;
35 struct i915_request *request;
36 int err = -ENOMEM;
38 /* Basic preliminary test to create a request and let it loose! */
40 mutex_lock(&i915->drm.struct_mutex);
41 request = mock_request(i915->engine[RCS],
42 i915->kernel_context,
43 HZ / 10);
44 if (!request)
45 goto out_unlock;
47 i915_request_add(request);
49 err = 0;
50 out_unlock:
51 mutex_unlock(&i915->drm.struct_mutex);
52 return err;
55 static int igt_wait_request(void *arg)
57 const long T = HZ / 4;
58 struct drm_i915_private *i915 = arg;
59 struct i915_request *request;
60 int err = -EINVAL;
62 /* Submit a request, then wait upon it */
64 mutex_lock(&i915->drm.struct_mutex);
65 request = mock_request(i915->engine[RCS], i915->kernel_context, T);
66 if (!request) {
67 err = -ENOMEM;
68 goto out_unlock;
71 if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
72 pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
73 goto out_unlock;
76 if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
77 pr_err("request wait succeeded (expected timeout before submit!)\n");
78 goto out_unlock;
81 if (i915_request_completed(request)) {
82 pr_err("request completed before submit!!\n");
83 goto out_unlock;
86 i915_request_add(request);
88 if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
89 pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
90 goto out_unlock;
93 if (i915_request_completed(request)) {
94 pr_err("request completed immediately!\n");
95 goto out_unlock;
98 if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
99 pr_err("request wait succeeded (expected timeout!)\n");
100 goto out_unlock;
103 if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
104 pr_err("request wait timed out!\n");
105 goto out_unlock;
108 if (!i915_request_completed(request)) {
109 pr_err("request not complete after waiting!\n");
110 goto out_unlock;
113 if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
114 pr_err("request wait timed out when already complete!\n");
115 goto out_unlock;
118 err = 0;
119 out_unlock:
120 mock_device_flush(i915);
121 mutex_unlock(&i915->drm.struct_mutex);
122 return err;
125 static int igt_fence_wait(void *arg)
127 const long T = HZ / 4;
128 struct drm_i915_private *i915 = arg;
129 struct i915_request *request;
130 int err = -EINVAL;
132 /* Submit a request, treat it as a fence and wait upon it */
134 mutex_lock(&i915->drm.struct_mutex);
135 request = mock_request(i915->engine[RCS], i915->kernel_context, T);
136 if (!request) {
137 err = -ENOMEM;
138 goto out_locked;
140 mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
142 if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
143 pr_err("fence wait success before submit (expected timeout)!\n");
144 goto out_device;
147 mutex_lock(&i915->drm.struct_mutex);
148 i915_request_add(request);
149 mutex_unlock(&i915->drm.struct_mutex);
151 if (dma_fence_is_signaled(&request->fence)) {
152 pr_err("fence signaled immediately!\n");
153 goto out_device;
156 if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
157 pr_err("fence wait success after submit (expected timeout)!\n");
158 goto out_device;
161 if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
162 pr_err("fence wait timed out (expected success)!\n");
163 goto out_device;
166 if (!dma_fence_is_signaled(&request->fence)) {
167 pr_err("fence unsignaled after waiting!\n");
168 goto out_device;
171 if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
172 pr_err("fence wait timed out when complete (expected success)!\n");
173 goto out_device;
176 err = 0;
177 out_device:
178 mutex_lock(&i915->drm.struct_mutex);
179 out_locked:
180 mock_device_flush(i915);
181 mutex_unlock(&i915->drm.struct_mutex);
182 return err;
185 static int igt_request_rewind(void *arg)
187 struct drm_i915_private *i915 = arg;
188 struct i915_request *request, *vip;
189 struct i915_gem_context *ctx[2];
190 int err = -EINVAL;
192 mutex_lock(&i915->drm.struct_mutex);
193 ctx[0] = mock_context(i915, "A");
194 request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
195 if (!request) {
196 err = -ENOMEM;
197 goto err_context_0;
200 i915_request_get(request);
201 i915_request_add(request);
203 ctx[1] = mock_context(i915, "B");
204 vip = mock_request(i915->engine[RCS], ctx[1], 0);
205 if (!vip) {
206 err = -ENOMEM;
207 goto err_context_1;
210 /* Simulate preemption by manual reordering */
211 if (!mock_cancel_request(request)) {
212 pr_err("failed to cancel request (already executed)!\n");
213 i915_request_add(vip);
214 goto err_context_1;
216 i915_request_get(vip);
217 i915_request_add(vip);
218 rcu_read_lock();
219 request->engine->submit_request(request);
220 rcu_read_unlock();
222 mutex_unlock(&i915->drm.struct_mutex);
224 if (i915_request_wait(vip, 0, HZ) == -ETIME) {
225 pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
226 vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
227 goto err;
230 if (i915_request_completed(request)) {
231 pr_err("low priority request already completed\n");
232 goto err;
235 err = 0;
236 err:
237 i915_request_put(vip);
238 mutex_lock(&i915->drm.struct_mutex);
239 err_context_1:
240 mock_context_close(ctx[1]);
241 i915_request_put(request);
242 err_context_0:
243 mock_context_close(ctx[0]);
244 mock_device_flush(i915);
245 mutex_unlock(&i915->drm.struct_mutex);
246 return err;
249 int i915_request_mock_selftests(void)
251 static const struct i915_subtest tests[] = {
252 SUBTEST(igt_add_request),
253 SUBTEST(igt_wait_request),
254 SUBTEST(igt_fence_wait),
255 SUBTEST(igt_request_rewind),
257 struct drm_i915_private *i915;
258 int err;
260 i915 = mock_gem_device();
261 if (!i915)
262 return -ENOMEM;
264 err = i915_subtests(tests, i915);
265 drm_dev_put(&i915->drm);
267 return err;
270 struct live_test {
271 struct drm_i915_private *i915;
272 const char *func;
273 const char *name;
275 unsigned int reset_count;
278 static int begin_live_test(struct live_test *t,
279 struct drm_i915_private *i915,
280 const char *func,
281 const char *name)
283 int err;
285 t->i915 = i915;
286 t->func = func;
287 t->name = name;
289 err = i915_gem_wait_for_idle(i915,
290 I915_WAIT_LOCKED,
291 MAX_SCHEDULE_TIMEOUT);
292 if (err) {
293 pr_err("%s(%s): failed to idle before, with err=%d!",
294 func, name, err);
295 return err;
298 i915->gpu_error.missed_irq_rings = 0;
299 t->reset_count = i915_reset_count(&i915->gpu_error);
301 return 0;
304 static int end_live_test(struct live_test *t)
306 struct drm_i915_private *i915 = t->i915;
308 i915_retire_requests(i915);
310 if (wait_for(intel_engines_are_idle(i915), 10)) {
311 pr_err("%s(%s): GPU not idle\n", t->func, t->name);
312 return -EIO;
315 if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
316 pr_err("%s(%s): GPU was reset %d times!\n",
317 t->func, t->name,
318 i915_reset_count(&i915->gpu_error) - t->reset_count);
319 return -EIO;
322 if (i915->gpu_error.missed_irq_rings) {
323 pr_err("%s(%s): Missed interrupts on engines %lx\n",
324 t->func, t->name, i915->gpu_error.missed_irq_rings);
325 return -EIO;
328 return 0;
331 static int live_nop_request(void *arg)
333 struct drm_i915_private *i915 = arg;
334 struct intel_engine_cs *engine;
335 struct live_test t;
336 unsigned int id;
337 int err = -ENODEV;
339 /* Submit various sized batches of empty requests, to each engine
340 * (individually), and wait for the batch to complete. We can check
341 * the overhead of submitting requests to the hardware.
344 mutex_lock(&i915->drm.struct_mutex);
346 for_each_engine(engine, i915, id) {
347 struct i915_request *request = NULL;
348 unsigned long n, prime;
349 IGT_TIMEOUT(end_time);
350 ktime_t times[2] = {};
352 err = begin_live_test(&t, i915, __func__, engine->name);
353 if (err)
354 goto out_unlock;
356 for_each_prime_number_from(prime, 1, 8192) {
357 times[1] = ktime_get_raw();
359 for (n = 0; n < prime; n++) {
360 request = i915_request_alloc(engine,
361 i915->kernel_context);
362 if (IS_ERR(request)) {
363 err = PTR_ERR(request);
364 goto out_unlock;
367 /* This space is left intentionally blank.
369 * We do not actually want to perform any
370 * action with this request, we just want
371 * to measure the latency in allocation
372 * and submission of our breadcrumbs -
373 * ensuring that the bare request is sufficient
374 * for the system to work (i.e. proper HEAD
375 * tracking of the rings, interrupt handling,
376 * etc). It also gives us the lowest bounds
377 * for latency.
380 i915_request_add(request);
382 i915_request_wait(request,
383 I915_WAIT_LOCKED,
384 MAX_SCHEDULE_TIMEOUT);
386 times[1] = ktime_sub(ktime_get_raw(), times[1]);
387 if (prime == 1)
388 times[0] = times[1];
390 if (__igt_timeout(end_time, NULL))
391 break;
394 err = end_live_test(&t);
395 if (err)
396 goto out_unlock;
398 pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
399 engine->name,
400 ktime_to_ns(times[0]),
401 prime, div64_u64(ktime_to_ns(times[1]), prime));
404 out_unlock:
405 mutex_unlock(&i915->drm.struct_mutex);
406 return err;
409 static struct i915_vma *empty_batch(struct drm_i915_private *i915)
411 struct drm_i915_gem_object *obj;
412 struct i915_vma *vma;
413 u32 *cmd;
414 int err;
416 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
417 if (IS_ERR(obj))
418 return ERR_CAST(obj);
420 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
421 if (IS_ERR(cmd)) {
422 err = PTR_ERR(cmd);
423 goto err;
426 *cmd = MI_BATCH_BUFFER_END;
427 i915_gem_chipset_flush(i915);
429 i915_gem_object_unpin_map(obj);
431 err = i915_gem_object_set_to_gtt_domain(obj, false);
432 if (err)
433 goto err;
435 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
436 if (IS_ERR(vma)) {
437 err = PTR_ERR(vma);
438 goto err;
441 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
442 if (err)
443 goto err;
445 return vma;
447 err:
448 i915_gem_object_put(obj);
449 return ERR_PTR(err);
452 static struct i915_request *
453 empty_request(struct intel_engine_cs *engine,
454 struct i915_vma *batch)
456 struct i915_request *request;
457 int err;
459 request = i915_request_alloc(engine, engine->i915->kernel_context);
460 if (IS_ERR(request))
461 return request;
463 err = engine->emit_bb_start(request,
464 batch->node.start,
465 batch->node.size,
466 I915_DISPATCH_SECURE);
467 if (err)
468 goto out_request;
470 out_request:
471 i915_request_add(request);
472 return err ? ERR_PTR(err) : request;
475 static int live_empty_request(void *arg)
477 struct drm_i915_private *i915 = arg;
478 struct intel_engine_cs *engine;
479 struct live_test t;
480 struct i915_vma *batch;
481 unsigned int id;
482 int err = 0;
484 /* Submit various sized batches of empty requests, to each engine
485 * (individually), and wait for the batch to complete. We can check
486 * the overhead of submitting requests to the hardware.
489 mutex_lock(&i915->drm.struct_mutex);
491 batch = empty_batch(i915);
492 if (IS_ERR(batch)) {
493 err = PTR_ERR(batch);
494 goto out_unlock;
497 for_each_engine(engine, i915, id) {
498 IGT_TIMEOUT(end_time);
499 struct i915_request *request;
500 unsigned long n, prime;
501 ktime_t times[2] = {};
503 err = begin_live_test(&t, i915, __func__, engine->name);
504 if (err)
505 goto out_batch;
507 /* Warmup / preload */
508 request = empty_request(engine, batch);
509 if (IS_ERR(request)) {
510 err = PTR_ERR(request);
511 goto out_batch;
513 i915_request_wait(request,
514 I915_WAIT_LOCKED,
515 MAX_SCHEDULE_TIMEOUT);
517 for_each_prime_number_from(prime, 1, 8192) {
518 times[1] = ktime_get_raw();
520 for (n = 0; n < prime; n++) {
521 request = empty_request(engine, batch);
522 if (IS_ERR(request)) {
523 err = PTR_ERR(request);
524 goto out_batch;
527 i915_request_wait(request,
528 I915_WAIT_LOCKED,
529 MAX_SCHEDULE_TIMEOUT);
531 times[1] = ktime_sub(ktime_get_raw(), times[1]);
532 if (prime == 1)
533 times[0] = times[1];
535 if (__igt_timeout(end_time, NULL))
536 break;
539 err = end_live_test(&t);
540 if (err)
541 goto out_batch;
543 pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
544 engine->name,
545 ktime_to_ns(times[0]),
546 prime, div64_u64(ktime_to_ns(times[1]), prime));
549 out_batch:
550 i915_vma_unpin(batch);
551 i915_vma_put(batch);
552 out_unlock:
553 mutex_unlock(&i915->drm.struct_mutex);
554 return err;
557 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
559 struct i915_gem_context *ctx = i915->kernel_context;
560 struct i915_address_space *vm =
561 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
562 struct drm_i915_gem_object *obj;
563 const int gen = INTEL_GEN(i915);
564 struct i915_vma *vma;
565 u32 *cmd;
566 int err;
568 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
569 if (IS_ERR(obj))
570 return ERR_CAST(obj);
572 vma = i915_vma_instance(obj, vm, NULL);
573 if (IS_ERR(vma)) {
574 err = PTR_ERR(vma);
575 goto err;
578 err = i915_vma_pin(vma, 0, 0, PIN_USER);
579 if (err)
580 goto err;
582 err = i915_gem_object_set_to_wc_domain(obj, true);
583 if (err)
584 goto err;
586 cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
587 if (IS_ERR(cmd)) {
588 err = PTR_ERR(cmd);
589 goto err;
592 if (gen >= 8) {
593 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
594 *cmd++ = lower_32_bits(vma->node.start);
595 *cmd++ = upper_32_bits(vma->node.start);
596 } else if (gen >= 6) {
597 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
598 *cmd++ = lower_32_bits(vma->node.start);
599 } else {
600 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
601 *cmd++ = lower_32_bits(vma->node.start);
603 *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
604 i915_gem_chipset_flush(i915);
606 i915_gem_object_unpin_map(obj);
608 return vma;
610 err:
611 i915_gem_object_put(obj);
612 return ERR_PTR(err);
615 static int recursive_batch_resolve(struct i915_vma *batch)
617 u32 *cmd;
619 cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
620 if (IS_ERR(cmd))
621 return PTR_ERR(cmd);
623 *cmd = MI_BATCH_BUFFER_END;
624 i915_gem_chipset_flush(batch->vm->i915);
626 i915_gem_object_unpin_map(batch->obj);
628 return 0;
631 static int live_all_engines(void *arg)
633 struct drm_i915_private *i915 = arg;
634 struct intel_engine_cs *engine;
635 struct i915_request *request[I915_NUM_ENGINES];
636 struct i915_vma *batch;
637 struct live_test t;
638 unsigned int id;
639 int err;
641 /* Check we can submit requests to all engines simultaneously. We
642 * send a recursive batch to each engine - checking that we don't
643 * block doing so, and that they don't complete too soon.
646 mutex_lock(&i915->drm.struct_mutex);
648 err = begin_live_test(&t, i915, __func__, "");
649 if (err)
650 goto out_unlock;
652 batch = recursive_batch(i915);
653 if (IS_ERR(batch)) {
654 err = PTR_ERR(batch);
655 pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
656 goto out_unlock;
659 for_each_engine(engine, i915, id) {
660 request[id] = i915_request_alloc(engine, i915->kernel_context);
661 if (IS_ERR(request[id])) {
662 err = PTR_ERR(request[id]);
663 pr_err("%s: Request allocation failed with err=%d\n",
664 __func__, err);
665 goto out_request;
668 err = engine->emit_bb_start(request[id],
669 batch->node.start,
670 batch->node.size,
672 GEM_BUG_ON(err);
673 request[id]->batch = batch;
675 if (!i915_gem_object_has_active_reference(batch->obj)) {
676 i915_gem_object_get(batch->obj);
677 i915_gem_object_set_active_reference(batch->obj);
680 err = i915_vma_move_to_active(batch, request[id], 0);
681 GEM_BUG_ON(err);
683 i915_request_get(request[id]);
684 i915_request_add(request[id]);
687 for_each_engine(engine, i915, id) {
688 if (i915_request_completed(request[id])) {
689 pr_err("%s(%s): request completed too early!\n",
690 __func__, engine->name);
691 err = -EINVAL;
692 goto out_request;
696 err = recursive_batch_resolve(batch);
697 if (err) {
698 pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
699 goto out_request;
702 for_each_engine(engine, i915, id) {
703 long timeout;
705 timeout = i915_request_wait(request[id],
706 I915_WAIT_LOCKED,
707 MAX_SCHEDULE_TIMEOUT);
708 if (timeout < 0) {
709 err = timeout;
710 pr_err("%s: error waiting for request on %s, err=%d\n",
711 __func__, engine->name, err);
712 goto out_request;
715 GEM_BUG_ON(!i915_request_completed(request[id]));
716 i915_request_put(request[id]);
717 request[id] = NULL;
720 err = end_live_test(&t);
722 out_request:
723 for_each_engine(engine, i915, id)
724 if (request[id])
725 i915_request_put(request[id]);
726 i915_vma_unpin(batch);
727 i915_vma_put(batch);
728 out_unlock:
729 mutex_unlock(&i915->drm.struct_mutex);
730 return err;
733 static int live_sequential_engines(void *arg)
735 struct drm_i915_private *i915 = arg;
736 struct i915_request *request[I915_NUM_ENGINES] = {};
737 struct i915_request *prev = NULL;
738 struct intel_engine_cs *engine;
739 struct live_test t;
740 unsigned int id;
741 int err;
743 /* Check we can submit requests to all engines sequentially, such
744 * that each successive request waits for the earlier ones. This
745 * tests that we don't execute requests out of order, even though
746 * they are running on independent engines.
749 mutex_lock(&i915->drm.struct_mutex);
751 err = begin_live_test(&t, i915, __func__, "");
752 if (err)
753 goto out_unlock;
755 for_each_engine(engine, i915, id) {
756 struct i915_vma *batch;
758 batch = recursive_batch(i915);
759 if (IS_ERR(batch)) {
760 err = PTR_ERR(batch);
761 pr_err("%s: Unable to create batch for %s, err=%d\n",
762 __func__, engine->name, err);
763 goto out_unlock;
766 request[id] = i915_request_alloc(engine, i915->kernel_context);
767 if (IS_ERR(request[id])) {
768 err = PTR_ERR(request[id]);
769 pr_err("%s: Request allocation failed for %s with err=%d\n",
770 __func__, engine->name, err);
771 goto out_request;
774 if (prev) {
775 err = i915_request_await_dma_fence(request[id],
776 &prev->fence);
777 if (err) {
778 i915_request_add(request[id]);
779 pr_err("%s: Request await failed for %s with err=%d\n",
780 __func__, engine->name, err);
781 goto out_request;
785 err = engine->emit_bb_start(request[id],
786 batch->node.start,
787 batch->node.size,
789 GEM_BUG_ON(err);
790 request[id]->batch = batch;
792 err = i915_vma_move_to_active(batch, request[id], 0);
793 GEM_BUG_ON(err);
795 i915_gem_object_set_active_reference(batch->obj);
796 i915_vma_get(batch);
798 i915_request_get(request[id]);
799 i915_request_add(request[id]);
801 prev = request[id];
804 for_each_engine(engine, i915, id) {
805 long timeout;
807 if (i915_request_completed(request[id])) {
808 pr_err("%s(%s): request completed too early!\n",
809 __func__, engine->name);
810 err = -EINVAL;
811 goto out_request;
814 err = recursive_batch_resolve(request[id]->batch);
815 if (err) {
816 pr_err("%s: failed to resolve batch, err=%d\n",
817 __func__, err);
818 goto out_request;
821 timeout = i915_request_wait(request[id],
822 I915_WAIT_LOCKED,
823 MAX_SCHEDULE_TIMEOUT);
824 if (timeout < 0) {
825 err = timeout;
826 pr_err("%s: error waiting for request on %s, err=%d\n",
827 __func__, engine->name, err);
828 goto out_request;
831 GEM_BUG_ON(!i915_request_completed(request[id]));
834 err = end_live_test(&t);
836 out_request:
837 for_each_engine(engine, i915, id) {
838 u32 *cmd;
840 if (!request[id])
841 break;
843 cmd = i915_gem_object_pin_map(request[id]->batch->obj,
844 I915_MAP_WC);
845 if (!IS_ERR(cmd)) {
846 *cmd = MI_BATCH_BUFFER_END;
847 i915_gem_chipset_flush(i915);
849 i915_gem_object_unpin_map(request[id]->batch->obj);
852 i915_vma_put(request[id]->batch);
853 i915_request_put(request[id]);
855 out_unlock:
856 mutex_unlock(&i915->drm.struct_mutex);
857 return err;
860 int i915_request_live_selftests(struct drm_i915_private *i915)
862 static const struct i915_subtest tests[] = {
863 SUBTEST(live_nop_request),
864 SUBTEST(live_all_engines),
865 SUBTEST(live_sequential_engines),
866 SUBTEST(live_empty_request),
869 if (i915_terminally_wedged(&i915->gpu_error))
870 return 0;
872 return i915_subtests(tests, i915);