Linux 4.19.133
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / selftests / intel_lrc.c
blob582566faef090f395251c816690dad9dee521d7c
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
5 */
7 #include "../i915_selftest.h"
8 #include "igt_flush_test.h"
10 #include "mock_context.h"
12 struct spinner {
13 struct drm_i915_private *i915;
14 struct drm_i915_gem_object *hws;
15 struct drm_i915_gem_object *obj;
16 u32 *batch;
17 void *seqno;
20 static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
22 unsigned int mode;
23 void *vaddr;
24 int err;
26 GEM_BUG_ON(INTEL_GEN(i915) < 8);
28 memset(spin, 0, sizeof(*spin));
29 spin->i915 = i915;
31 spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
32 if (IS_ERR(spin->hws)) {
33 err = PTR_ERR(spin->hws);
34 goto err;
37 spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
38 if (IS_ERR(spin->obj)) {
39 err = PTR_ERR(spin->obj);
40 goto err_hws;
43 i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
44 vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
45 if (IS_ERR(vaddr)) {
46 err = PTR_ERR(vaddr);
47 goto err_obj;
49 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
51 mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
52 vaddr = i915_gem_object_pin_map(spin->obj, mode);
53 if (IS_ERR(vaddr)) {
54 err = PTR_ERR(vaddr);
55 goto err_unpin_hws;
57 spin->batch = vaddr;
59 return 0;
61 err_unpin_hws:
62 i915_gem_object_unpin_map(spin->hws);
63 err_obj:
64 i915_gem_object_put(spin->obj);
65 err_hws:
66 i915_gem_object_put(spin->hws);
67 err:
68 return err;
71 static unsigned int seqno_offset(u64 fence)
73 return offset_in_page(sizeof(u32) * fence);
76 static u64 hws_address(const struct i915_vma *hws,
77 const struct i915_request *rq)
79 return hws->node.start + seqno_offset(rq->fence.context);
82 static int emit_recurse_batch(struct spinner *spin,
83 struct i915_request *rq,
84 u32 arbitration_command)
86 struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
87 struct i915_vma *hws, *vma;
88 u32 *batch;
89 int err;
91 vma = i915_vma_instance(spin->obj, vm, NULL);
92 if (IS_ERR(vma))
93 return PTR_ERR(vma);
95 hws = i915_vma_instance(spin->hws, vm, NULL);
96 if (IS_ERR(hws))
97 return PTR_ERR(hws);
99 err = i915_vma_pin(vma, 0, 0, PIN_USER);
100 if (err)
101 return err;
103 err = i915_vma_pin(hws, 0, 0, PIN_USER);
104 if (err)
105 goto unpin_vma;
107 err = i915_vma_move_to_active(vma, rq, 0);
108 if (err)
109 goto unpin_hws;
111 if (!i915_gem_object_has_active_reference(vma->obj)) {
112 i915_gem_object_get(vma->obj);
113 i915_gem_object_set_active_reference(vma->obj);
116 err = i915_vma_move_to_active(hws, rq, 0);
117 if (err)
118 goto unpin_hws;
120 if (!i915_gem_object_has_active_reference(hws->obj)) {
121 i915_gem_object_get(hws->obj);
122 i915_gem_object_set_active_reference(hws->obj);
125 batch = spin->batch;
127 *batch++ = MI_STORE_DWORD_IMM_GEN4;
128 *batch++ = lower_32_bits(hws_address(hws, rq));
129 *batch++ = upper_32_bits(hws_address(hws, rq));
130 *batch++ = rq->fence.seqno;
132 *batch++ = arbitration_command;
134 *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
135 *batch++ = lower_32_bits(vma->node.start);
136 *batch++ = upper_32_bits(vma->node.start);
137 *batch++ = MI_BATCH_BUFFER_END; /* not reached */
139 i915_gem_chipset_flush(spin->i915);
141 err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
143 unpin_hws:
144 i915_vma_unpin(hws);
145 unpin_vma:
146 i915_vma_unpin(vma);
147 return err;
150 static struct i915_request *
151 spinner_create_request(struct spinner *spin,
152 struct i915_gem_context *ctx,
153 struct intel_engine_cs *engine,
154 u32 arbitration_command)
156 struct i915_request *rq;
157 int err;
159 rq = i915_request_alloc(engine, ctx);
160 if (IS_ERR(rq))
161 return rq;
163 err = emit_recurse_batch(spin, rq, arbitration_command);
164 if (err) {
165 i915_request_add(rq);
166 return ERR_PTR(err);
169 return rq;
172 static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
174 u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
176 return READ_ONCE(*seqno);
179 static void spinner_end(struct spinner *spin)
181 *spin->batch = MI_BATCH_BUFFER_END;
182 i915_gem_chipset_flush(spin->i915);
185 static void spinner_fini(struct spinner *spin)
187 spinner_end(spin);
189 i915_gem_object_unpin_map(spin->obj);
190 i915_gem_object_put(spin->obj);
192 i915_gem_object_unpin_map(spin->hws);
193 i915_gem_object_put(spin->hws);
196 static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq)
198 if (!wait_event_timeout(rq->execute,
199 READ_ONCE(rq->global_seqno),
200 msecs_to_jiffies(10)))
201 return false;
203 return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
204 rq->fence.seqno),
205 10) &&
206 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
207 rq->fence.seqno),
208 1000));
211 static int live_sanitycheck(void *arg)
213 struct drm_i915_private *i915 = arg;
214 struct intel_engine_cs *engine;
215 struct i915_gem_context *ctx;
216 enum intel_engine_id id;
217 struct spinner spin;
218 int err = -ENOMEM;
220 if (!HAS_LOGICAL_RING_CONTEXTS(i915))
221 return 0;
223 mutex_lock(&i915->drm.struct_mutex);
225 if (spinner_init(&spin, i915))
226 goto err_unlock;
228 ctx = kernel_context(i915);
229 if (!ctx)
230 goto err_spin;
232 for_each_engine(engine, i915, id) {
233 struct i915_request *rq;
235 rq = spinner_create_request(&spin, ctx, engine, MI_NOOP);
236 if (IS_ERR(rq)) {
237 err = PTR_ERR(rq);
238 goto err_ctx;
241 i915_request_add(rq);
242 if (!wait_for_spinner(&spin, rq)) {
243 GEM_TRACE("spinner failed to start\n");
244 GEM_TRACE_DUMP();
245 i915_gem_set_wedged(i915);
246 err = -EIO;
247 goto err_ctx;
250 spinner_end(&spin);
251 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
252 err = -EIO;
253 goto err_ctx;
257 err = 0;
258 err_ctx:
259 kernel_context_close(ctx);
260 err_spin:
261 spinner_fini(&spin);
262 err_unlock:
263 igt_flush_test(i915, I915_WAIT_LOCKED);
264 mutex_unlock(&i915->drm.struct_mutex);
265 return err;
268 static int live_preempt(void *arg)
270 struct drm_i915_private *i915 = arg;
271 struct i915_gem_context *ctx_hi, *ctx_lo;
272 struct spinner spin_hi, spin_lo;
273 struct intel_engine_cs *engine;
274 enum intel_engine_id id;
275 int err = -ENOMEM;
277 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
278 return 0;
280 mutex_lock(&i915->drm.struct_mutex);
282 if (spinner_init(&spin_hi, i915))
283 goto err_unlock;
285 if (spinner_init(&spin_lo, i915))
286 goto err_spin_hi;
288 ctx_hi = kernel_context(i915);
289 if (!ctx_hi)
290 goto err_spin_lo;
291 ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
293 ctx_lo = kernel_context(i915);
294 if (!ctx_lo)
295 goto err_ctx_hi;
296 ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
298 for_each_engine(engine, i915, id) {
299 struct i915_request *rq;
301 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
302 MI_ARB_CHECK);
303 if (IS_ERR(rq)) {
304 err = PTR_ERR(rq);
305 goto err_ctx_lo;
308 i915_request_add(rq);
309 if (!wait_for_spinner(&spin_lo, rq)) {
310 GEM_TRACE("lo spinner failed to start\n");
311 GEM_TRACE_DUMP();
312 i915_gem_set_wedged(i915);
313 err = -EIO;
314 goto err_ctx_lo;
317 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
318 MI_ARB_CHECK);
319 if (IS_ERR(rq)) {
320 spinner_end(&spin_lo);
321 err = PTR_ERR(rq);
322 goto err_ctx_lo;
325 i915_request_add(rq);
326 if (!wait_for_spinner(&spin_hi, rq)) {
327 GEM_TRACE("hi spinner failed to start\n");
328 GEM_TRACE_DUMP();
329 i915_gem_set_wedged(i915);
330 err = -EIO;
331 goto err_ctx_lo;
334 spinner_end(&spin_hi);
335 spinner_end(&spin_lo);
336 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
337 err = -EIO;
338 goto err_ctx_lo;
342 err = 0;
343 err_ctx_lo:
344 kernel_context_close(ctx_lo);
345 err_ctx_hi:
346 kernel_context_close(ctx_hi);
347 err_spin_lo:
348 spinner_fini(&spin_lo);
349 err_spin_hi:
350 spinner_fini(&spin_hi);
351 err_unlock:
352 igt_flush_test(i915, I915_WAIT_LOCKED);
353 mutex_unlock(&i915->drm.struct_mutex);
354 return err;
357 static int live_late_preempt(void *arg)
359 struct drm_i915_private *i915 = arg;
360 struct i915_gem_context *ctx_hi, *ctx_lo;
361 struct spinner spin_hi, spin_lo;
362 struct intel_engine_cs *engine;
363 struct i915_sched_attr attr = {};
364 enum intel_engine_id id;
365 int err = -ENOMEM;
367 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
368 return 0;
370 mutex_lock(&i915->drm.struct_mutex);
372 if (spinner_init(&spin_hi, i915))
373 goto err_unlock;
375 if (spinner_init(&spin_lo, i915))
376 goto err_spin_hi;
378 ctx_hi = kernel_context(i915);
379 if (!ctx_hi)
380 goto err_spin_lo;
382 ctx_lo = kernel_context(i915);
383 if (!ctx_lo)
384 goto err_ctx_hi;
386 for_each_engine(engine, i915, id) {
387 struct i915_request *rq;
389 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
390 MI_ARB_CHECK);
391 if (IS_ERR(rq)) {
392 err = PTR_ERR(rq);
393 goto err_ctx_lo;
396 i915_request_add(rq);
397 if (!wait_for_spinner(&spin_lo, rq)) {
398 pr_err("First context failed to start\n");
399 goto err_wedged;
402 rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
403 if (IS_ERR(rq)) {
404 spinner_end(&spin_lo);
405 err = PTR_ERR(rq);
406 goto err_ctx_lo;
409 i915_request_add(rq);
410 if (wait_for_spinner(&spin_hi, rq)) {
411 pr_err("Second context overtook first?\n");
412 goto err_wedged;
415 attr.priority = I915_PRIORITY_MAX;
416 engine->schedule(rq, &attr);
418 if (!wait_for_spinner(&spin_hi, rq)) {
419 pr_err("High priority context failed to preempt the low priority context\n");
420 GEM_TRACE_DUMP();
421 goto err_wedged;
424 spinner_end(&spin_hi);
425 spinner_end(&spin_lo);
426 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
427 err = -EIO;
428 goto err_ctx_lo;
432 err = 0;
433 err_ctx_lo:
434 kernel_context_close(ctx_lo);
435 err_ctx_hi:
436 kernel_context_close(ctx_hi);
437 err_spin_lo:
438 spinner_fini(&spin_lo);
439 err_spin_hi:
440 spinner_fini(&spin_hi);
441 err_unlock:
442 igt_flush_test(i915, I915_WAIT_LOCKED);
443 mutex_unlock(&i915->drm.struct_mutex);
444 return err;
446 err_wedged:
447 spinner_end(&spin_hi);
448 spinner_end(&spin_lo);
449 i915_gem_set_wedged(i915);
450 err = -EIO;
451 goto err_ctx_lo;
454 static int live_preempt_hang(void *arg)
456 struct drm_i915_private *i915 = arg;
457 struct i915_gem_context *ctx_hi, *ctx_lo;
458 struct spinner spin_hi, spin_lo;
459 struct intel_engine_cs *engine;
460 enum intel_engine_id id;
461 int err = -ENOMEM;
463 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
464 return 0;
466 if (!intel_has_reset_engine(i915))
467 return 0;
469 mutex_lock(&i915->drm.struct_mutex);
471 if (spinner_init(&spin_hi, i915))
472 goto err_unlock;
474 if (spinner_init(&spin_lo, i915))
475 goto err_spin_hi;
477 ctx_hi = kernel_context(i915);
478 if (!ctx_hi)
479 goto err_spin_lo;
480 ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
482 ctx_lo = kernel_context(i915);
483 if (!ctx_lo)
484 goto err_ctx_hi;
485 ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
487 for_each_engine(engine, i915, id) {
488 struct i915_request *rq;
490 if (!intel_engine_has_preemption(engine))
491 continue;
493 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
494 MI_ARB_CHECK);
495 if (IS_ERR(rq)) {
496 err = PTR_ERR(rq);
497 goto err_ctx_lo;
500 i915_request_add(rq);
501 if (!wait_for_spinner(&spin_lo, rq)) {
502 GEM_TRACE("lo spinner failed to start\n");
503 GEM_TRACE_DUMP();
504 i915_gem_set_wedged(i915);
505 err = -EIO;
506 goto err_ctx_lo;
509 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
510 MI_ARB_CHECK);
511 if (IS_ERR(rq)) {
512 spinner_end(&spin_lo);
513 err = PTR_ERR(rq);
514 goto err_ctx_lo;
517 init_completion(&engine->execlists.preempt_hang.completion);
518 engine->execlists.preempt_hang.inject_hang = true;
520 i915_request_add(rq);
522 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
523 HZ / 10)) {
524 pr_err("Preemption did not occur within timeout!");
525 GEM_TRACE_DUMP();
526 i915_gem_set_wedged(i915);
527 err = -EIO;
528 goto err_ctx_lo;
531 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
532 i915_reset_engine(engine, NULL);
533 clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
535 engine->execlists.preempt_hang.inject_hang = false;
537 if (!wait_for_spinner(&spin_hi, rq)) {
538 GEM_TRACE("hi spinner failed to start\n");
539 GEM_TRACE_DUMP();
540 i915_gem_set_wedged(i915);
541 err = -EIO;
542 goto err_ctx_lo;
545 spinner_end(&spin_hi);
546 spinner_end(&spin_lo);
547 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
548 err = -EIO;
549 goto err_ctx_lo;
553 err = 0;
554 err_ctx_lo:
555 kernel_context_close(ctx_lo);
556 err_ctx_hi:
557 kernel_context_close(ctx_hi);
558 err_spin_lo:
559 spinner_fini(&spin_lo);
560 err_spin_hi:
561 spinner_fini(&spin_hi);
562 err_unlock:
563 igt_flush_test(i915, I915_WAIT_LOCKED);
564 mutex_unlock(&i915->drm.struct_mutex);
565 return err;
568 int intel_execlists_live_selftests(struct drm_i915_private *i915)
570 static const struct i915_subtest tests[] = {
571 SUBTEST(live_sanitycheck),
572 SUBTEST(live_preempt),
573 SUBTEST(live_late_preempt),
574 SUBTEST(live_preempt_hang),
577 if (!HAS_EXECLISTS(i915))
578 return 0;
580 if (i915_terminally_wedged(&i915->gpu_error))
581 return 0;
583 return i915_subtests(tests, i915);