2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include "../i915_selftest.h"
8 #include "igt_flush_test.h"
10 #include "mock_context.h"
13 struct drm_i915_private
*i915
;
14 struct drm_i915_gem_object
*hws
;
15 struct drm_i915_gem_object
*obj
;
20 static int spinner_init(struct spinner
*spin
, struct drm_i915_private
*i915
)
26 GEM_BUG_ON(INTEL_GEN(i915
) < 8);
28 memset(spin
, 0, sizeof(*spin
));
31 spin
->hws
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
32 if (IS_ERR(spin
->hws
)) {
33 err
= PTR_ERR(spin
->hws
);
37 spin
->obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
38 if (IS_ERR(spin
->obj
)) {
39 err
= PTR_ERR(spin
->obj
);
43 i915_gem_object_set_cache_level(spin
->hws
, I915_CACHE_LLC
);
44 vaddr
= i915_gem_object_pin_map(spin
->hws
, I915_MAP_WB
);
49 spin
->seqno
= memset(vaddr
, 0xff, PAGE_SIZE
);
51 mode
= HAS_LLC(i915
) ? I915_MAP_WB
: I915_MAP_WC
;
52 vaddr
= i915_gem_object_pin_map(spin
->obj
, mode
);
62 i915_gem_object_unpin_map(spin
->hws
);
64 i915_gem_object_put(spin
->obj
);
66 i915_gem_object_put(spin
->hws
);
71 static unsigned int seqno_offset(u64 fence
)
73 return offset_in_page(sizeof(u32
) * fence
);
76 static u64
hws_address(const struct i915_vma
*hws
,
77 const struct i915_request
*rq
)
79 return hws
->node
.start
+ seqno_offset(rq
->fence
.context
);
82 static int emit_recurse_batch(struct spinner
*spin
,
83 struct i915_request
*rq
,
84 u32 arbitration_command
)
86 struct i915_address_space
*vm
= &rq
->gem_context
->ppgtt
->vm
;
87 struct i915_vma
*hws
, *vma
;
91 vma
= i915_vma_instance(spin
->obj
, vm
, NULL
);
95 hws
= i915_vma_instance(spin
->hws
, vm
, NULL
);
99 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
);
103 err
= i915_vma_pin(hws
, 0, 0, PIN_USER
);
107 err
= i915_vma_move_to_active(vma
, rq
, 0);
111 if (!i915_gem_object_has_active_reference(vma
->obj
)) {
112 i915_gem_object_get(vma
->obj
);
113 i915_gem_object_set_active_reference(vma
->obj
);
116 err
= i915_vma_move_to_active(hws
, rq
, 0);
120 if (!i915_gem_object_has_active_reference(hws
->obj
)) {
121 i915_gem_object_get(hws
->obj
);
122 i915_gem_object_set_active_reference(hws
->obj
);
127 *batch
++ = MI_STORE_DWORD_IMM_GEN4
;
128 *batch
++ = lower_32_bits(hws_address(hws
, rq
));
129 *batch
++ = upper_32_bits(hws_address(hws
, rq
));
130 *batch
++ = rq
->fence
.seqno
;
132 *batch
++ = arbitration_command
;
134 *batch
++ = MI_BATCH_BUFFER_START
| 1 << 8 | 1;
135 *batch
++ = lower_32_bits(vma
->node
.start
);
136 *batch
++ = upper_32_bits(vma
->node
.start
);
137 *batch
++ = MI_BATCH_BUFFER_END
; /* not reached */
139 i915_gem_chipset_flush(spin
->i915
);
141 err
= rq
->engine
->emit_bb_start(rq
, vma
->node
.start
, PAGE_SIZE
, 0);
150 static struct i915_request
*
151 spinner_create_request(struct spinner
*spin
,
152 struct i915_gem_context
*ctx
,
153 struct intel_engine_cs
*engine
,
154 u32 arbitration_command
)
156 struct i915_request
*rq
;
159 rq
= i915_request_alloc(engine
, ctx
);
163 err
= emit_recurse_batch(spin
, rq
, arbitration_command
);
165 i915_request_add(rq
);
172 static u32
hws_seqno(const struct spinner
*spin
, const struct i915_request
*rq
)
174 u32
*seqno
= spin
->seqno
+ seqno_offset(rq
->fence
.context
);
176 return READ_ONCE(*seqno
);
179 static void spinner_end(struct spinner
*spin
)
181 *spin
->batch
= MI_BATCH_BUFFER_END
;
182 i915_gem_chipset_flush(spin
->i915
);
185 static void spinner_fini(struct spinner
*spin
)
189 i915_gem_object_unpin_map(spin
->obj
);
190 i915_gem_object_put(spin
->obj
);
192 i915_gem_object_unpin_map(spin
->hws
);
193 i915_gem_object_put(spin
->hws
);
196 static bool wait_for_spinner(struct spinner
*spin
, struct i915_request
*rq
)
198 if (!wait_event_timeout(rq
->execute
,
199 READ_ONCE(rq
->global_seqno
),
200 msecs_to_jiffies(10)))
203 return !(wait_for_us(i915_seqno_passed(hws_seqno(spin
, rq
),
206 wait_for(i915_seqno_passed(hws_seqno(spin
, rq
),
211 static int live_sanitycheck(void *arg
)
213 struct drm_i915_private
*i915
= arg
;
214 struct intel_engine_cs
*engine
;
215 struct i915_gem_context
*ctx
;
216 enum intel_engine_id id
;
220 if (!HAS_LOGICAL_RING_CONTEXTS(i915
))
223 mutex_lock(&i915
->drm
.struct_mutex
);
225 if (spinner_init(&spin
, i915
))
228 ctx
= kernel_context(i915
);
232 for_each_engine(engine
, i915
, id
) {
233 struct i915_request
*rq
;
235 rq
= spinner_create_request(&spin
, ctx
, engine
, MI_NOOP
);
241 i915_request_add(rq
);
242 if (!wait_for_spinner(&spin
, rq
)) {
243 GEM_TRACE("spinner failed to start\n");
245 i915_gem_set_wedged(i915
);
251 if (igt_flush_test(i915
, I915_WAIT_LOCKED
)) {
259 kernel_context_close(ctx
);
263 igt_flush_test(i915
, I915_WAIT_LOCKED
);
264 mutex_unlock(&i915
->drm
.struct_mutex
);
268 static int live_preempt(void *arg
)
270 struct drm_i915_private
*i915
= arg
;
271 struct i915_gem_context
*ctx_hi
, *ctx_lo
;
272 struct spinner spin_hi
, spin_lo
;
273 struct intel_engine_cs
*engine
;
274 enum intel_engine_id id
;
277 if (!HAS_LOGICAL_RING_PREEMPTION(i915
))
280 mutex_lock(&i915
->drm
.struct_mutex
);
282 if (spinner_init(&spin_hi
, i915
))
285 if (spinner_init(&spin_lo
, i915
))
288 ctx_hi
= kernel_context(i915
);
291 ctx_hi
->sched
.priority
= I915_CONTEXT_MAX_USER_PRIORITY
;
293 ctx_lo
= kernel_context(i915
);
296 ctx_lo
->sched
.priority
= I915_CONTEXT_MIN_USER_PRIORITY
;
298 for_each_engine(engine
, i915
, id
) {
299 struct i915_request
*rq
;
301 rq
= spinner_create_request(&spin_lo
, ctx_lo
, engine
,
308 i915_request_add(rq
);
309 if (!wait_for_spinner(&spin_lo
, rq
)) {
310 GEM_TRACE("lo spinner failed to start\n");
312 i915_gem_set_wedged(i915
);
317 rq
= spinner_create_request(&spin_hi
, ctx_hi
, engine
,
320 spinner_end(&spin_lo
);
325 i915_request_add(rq
);
326 if (!wait_for_spinner(&spin_hi
, rq
)) {
327 GEM_TRACE("hi spinner failed to start\n");
329 i915_gem_set_wedged(i915
);
334 spinner_end(&spin_hi
);
335 spinner_end(&spin_lo
);
336 if (igt_flush_test(i915
, I915_WAIT_LOCKED
)) {
344 kernel_context_close(ctx_lo
);
346 kernel_context_close(ctx_hi
);
348 spinner_fini(&spin_lo
);
350 spinner_fini(&spin_hi
);
352 igt_flush_test(i915
, I915_WAIT_LOCKED
);
353 mutex_unlock(&i915
->drm
.struct_mutex
);
357 static int live_late_preempt(void *arg
)
359 struct drm_i915_private
*i915
= arg
;
360 struct i915_gem_context
*ctx_hi
, *ctx_lo
;
361 struct spinner spin_hi
, spin_lo
;
362 struct intel_engine_cs
*engine
;
363 struct i915_sched_attr attr
= {};
364 enum intel_engine_id id
;
367 if (!HAS_LOGICAL_RING_PREEMPTION(i915
))
370 mutex_lock(&i915
->drm
.struct_mutex
);
372 if (spinner_init(&spin_hi
, i915
))
375 if (spinner_init(&spin_lo
, i915
))
378 ctx_hi
= kernel_context(i915
);
382 ctx_lo
= kernel_context(i915
);
386 for_each_engine(engine
, i915
, id
) {
387 struct i915_request
*rq
;
389 rq
= spinner_create_request(&spin_lo
, ctx_lo
, engine
,
396 i915_request_add(rq
);
397 if (!wait_for_spinner(&spin_lo
, rq
)) {
398 pr_err("First context failed to start\n");
402 rq
= spinner_create_request(&spin_hi
, ctx_hi
, engine
, MI_NOOP
);
404 spinner_end(&spin_lo
);
409 i915_request_add(rq
);
410 if (wait_for_spinner(&spin_hi
, rq
)) {
411 pr_err("Second context overtook first?\n");
415 attr
.priority
= I915_PRIORITY_MAX
;
416 engine
->schedule(rq
, &attr
);
418 if (!wait_for_spinner(&spin_hi
, rq
)) {
419 pr_err("High priority context failed to preempt the low priority context\n");
424 spinner_end(&spin_hi
);
425 spinner_end(&spin_lo
);
426 if (igt_flush_test(i915
, I915_WAIT_LOCKED
)) {
434 kernel_context_close(ctx_lo
);
436 kernel_context_close(ctx_hi
);
438 spinner_fini(&spin_lo
);
440 spinner_fini(&spin_hi
);
442 igt_flush_test(i915
, I915_WAIT_LOCKED
);
443 mutex_unlock(&i915
->drm
.struct_mutex
);
447 spinner_end(&spin_hi
);
448 spinner_end(&spin_lo
);
449 i915_gem_set_wedged(i915
);
454 static int live_preempt_hang(void *arg
)
456 struct drm_i915_private
*i915
= arg
;
457 struct i915_gem_context
*ctx_hi
, *ctx_lo
;
458 struct spinner spin_hi
, spin_lo
;
459 struct intel_engine_cs
*engine
;
460 enum intel_engine_id id
;
463 if (!HAS_LOGICAL_RING_PREEMPTION(i915
))
466 if (!intel_has_reset_engine(i915
))
469 mutex_lock(&i915
->drm
.struct_mutex
);
471 if (spinner_init(&spin_hi
, i915
))
474 if (spinner_init(&spin_lo
, i915
))
477 ctx_hi
= kernel_context(i915
);
480 ctx_hi
->sched
.priority
= I915_CONTEXT_MAX_USER_PRIORITY
;
482 ctx_lo
= kernel_context(i915
);
485 ctx_lo
->sched
.priority
= I915_CONTEXT_MIN_USER_PRIORITY
;
487 for_each_engine(engine
, i915
, id
) {
488 struct i915_request
*rq
;
490 if (!intel_engine_has_preemption(engine
))
493 rq
= spinner_create_request(&spin_lo
, ctx_lo
, engine
,
500 i915_request_add(rq
);
501 if (!wait_for_spinner(&spin_lo
, rq
)) {
502 GEM_TRACE("lo spinner failed to start\n");
504 i915_gem_set_wedged(i915
);
509 rq
= spinner_create_request(&spin_hi
, ctx_hi
, engine
,
512 spinner_end(&spin_lo
);
517 init_completion(&engine
->execlists
.preempt_hang
.completion
);
518 engine
->execlists
.preempt_hang
.inject_hang
= true;
520 i915_request_add(rq
);
522 if (!wait_for_completion_timeout(&engine
->execlists
.preempt_hang
.completion
,
524 pr_err("Preemption did not occur within timeout!");
526 i915_gem_set_wedged(i915
);
531 set_bit(I915_RESET_ENGINE
+ id
, &i915
->gpu_error
.flags
);
532 i915_reset_engine(engine
, NULL
);
533 clear_bit(I915_RESET_ENGINE
+ id
, &i915
->gpu_error
.flags
);
535 engine
->execlists
.preempt_hang
.inject_hang
= false;
537 if (!wait_for_spinner(&spin_hi
, rq
)) {
538 GEM_TRACE("hi spinner failed to start\n");
540 i915_gem_set_wedged(i915
);
545 spinner_end(&spin_hi
);
546 spinner_end(&spin_lo
);
547 if (igt_flush_test(i915
, I915_WAIT_LOCKED
)) {
555 kernel_context_close(ctx_lo
);
557 kernel_context_close(ctx_hi
);
559 spinner_fini(&spin_lo
);
561 spinner_fini(&spin_hi
);
563 igt_flush_test(i915
, I915_WAIT_LOCKED
);
564 mutex_unlock(&i915
->drm
.struct_mutex
);
568 int intel_execlists_live_selftests(struct drm_i915_private
*i915
)
570 static const struct i915_subtest tests
[] = {
571 SUBTEST(live_sanitycheck
),
572 SUBTEST(live_preempt
),
573 SUBTEST(live_late_preempt
),
574 SUBTEST(live_preempt_hang
),
577 if (!HAS_EXECLISTS(i915
))
580 if (i915_terminally_wedged(&i915
->gpu_error
))
583 return i915_subtests(tests
, i915
);