2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_drm.h"
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
21 static const struct wo_register
{
22 enum intel_platform platform
;
25 { INTEL_GEMINILAKE
, 0x731c }
29 struct i915_wa_list gt_wa_list
;
31 struct i915_wa_list wa_list
;
32 struct i915_wa_list ctx_wa_list
;
33 } engine
[I915_NUM_ENGINES
];
36 static int request_add_sync(struct i915_request
*rq
, int err
)
40 if (i915_request_wait(rq
, 0, HZ
/ 5) < 0)
47 static int request_add_spin(struct i915_request
*rq
, struct igt_spinner
*spin
)
53 if (spin
&& !igt_wait_for_spinner(spin
, rq
))
61 reference_lists_init(struct intel_gt
*gt
, struct wa_lists
*lists
)
63 struct intel_engine_cs
*engine
;
64 enum intel_engine_id id
;
66 memset(lists
, 0, sizeof(*lists
));
68 wa_init_start(&lists
->gt_wa_list
, "GT_REF", "global");
69 gt_init_workarounds(gt
->i915
, &lists
->gt_wa_list
);
70 wa_init_finish(&lists
->gt_wa_list
);
72 for_each_engine(engine
, gt
, id
) {
73 struct i915_wa_list
*wal
= &lists
->engine
[id
].wa_list
;
75 wa_init_start(wal
, "REF", engine
->name
);
76 engine_init_workarounds(engine
, wal
);
79 __intel_engine_init_ctx_wa(engine
,
80 &lists
->engine
[id
].ctx_wa_list
,
86 reference_lists_fini(struct intel_gt
*gt
, struct wa_lists
*lists
)
88 struct intel_engine_cs
*engine
;
89 enum intel_engine_id id
;
91 for_each_engine(engine
, gt
, id
)
92 intel_wa_list_free(&lists
->engine
[id
].wa_list
);
94 intel_wa_list_free(&lists
->gt_wa_list
);
97 static struct drm_i915_gem_object
*
98 read_nonprivs(struct i915_gem_context
*ctx
, struct intel_engine_cs
*engine
)
100 const u32 base
= engine
->mmio_base
;
101 struct drm_i915_gem_object
*result
;
102 struct i915_request
*rq
;
103 struct i915_vma
*vma
;
108 result
= i915_gem_object_create_internal(engine
->i915
, PAGE_SIZE
);
112 i915_gem_object_set_cache_coherency(result
, I915_CACHE_LLC
);
114 cs
= i915_gem_object_pin_map(result
, I915_MAP_WB
);
119 memset(cs
, 0xc5, PAGE_SIZE
);
120 i915_gem_object_flush_map(result
);
121 i915_gem_object_unpin_map(result
);
123 vma
= i915_vma_instance(result
, &engine
->gt
->ggtt
->vm
, NULL
);
129 err
= i915_vma_pin(vma
, 0, 0, PIN_GLOBAL
);
133 rq
= igt_request_alloc(ctx
, engine
);
140 err
= i915_request_await_object(rq
, vma
->obj
, true);
142 err
= i915_vma_move_to_active(vma
, rq
, EXEC_OBJECT_WRITE
);
143 i915_vma_unlock(vma
);
147 srm
= MI_STORE_REGISTER_MEM
| MI_SRM_LRM_GLOBAL_GTT
;
148 if (INTEL_GEN(ctx
->i915
) >= 8)
151 cs
= intel_ring_begin(rq
, 4 * RING_MAX_NONPRIV_SLOTS
);
157 for (i
= 0; i
< RING_MAX_NONPRIV_SLOTS
; i
++) {
159 *cs
++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base
, i
));
160 *cs
++ = i915_ggtt_offset(vma
) + sizeof(u32
) * i
;
163 intel_ring_advance(rq
, cs
);
165 i915_request_add(rq
);
171 i915_request_add(rq
);
175 i915_gem_object_put(result
);
180 get_whitelist_reg(const struct intel_engine_cs
*engine
, unsigned int i
)
182 i915_reg_t reg
= i
< engine
->whitelist
.count
?
183 engine
->whitelist
.list
[i
].reg
:
184 RING_NOPID(engine
->mmio_base
);
186 return i915_mmio_reg_offset(reg
);
190 print_results(const struct intel_engine_cs
*engine
, const u32
*results
)
194 for (i
= 0; i
< RING_MAX_NONPRIV_SLOTS
; i
++) {
195 u32 expected
= get_whitelist_reg(engine
, i
);
196 u32 actual
= results
[i
];
198 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
199 i
, expected
, actual
);
203 static int check_whitelist(struct i915_gem_context
*ctx
,
204 struct intel_engine_cs
*engine
)
206 struct drm_i915_gem_object
*results
;
207 struct intel_wedge_me wedge
;
212 results
= read_nonprivs(ctx
, engine
);
214 return PTR_ERR(results
);
217 i915_gem_object_lock(results
);
218 intel_wedge_on_timeout(&wedge
, engine
->gt
, HZ
/ 5) /* safety net! */
219 err
= i915_gem_object_set_to_cpu_domain(results
, false);
220 i915_gem_object_unlock(results
);
221 if (intel_gt_is_wedged(engine
->gt
))
226 vaddr
= i915_gem_object_pin_map(results
, I915_MAP_WB
);
228 err
= PTR_ERR(vaddr
);
232 for (i
= 0; i
< RING_MAX_NONPRIV_SLOTS
; i
++) {
233 u32 expected
= get_whitelist_reg(engine
, i
);
234 u32 actual
= vaddr
[i
];
236 if (expected
!= actual
) {
237 print_results(engine
, vaddr
);
238 pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
239 i
, expected
, actual
);
246 i915_gem_object_unpin_map(results
);
248 i915_gem_object_put(results
);
252 static int do_device_reset(struct intel_engine_cs
*engine
)
254 intel_gt_reset(engine
->gt
, engine
->mask
, "live_workarounds");
258 static int do_engine_reset(struct intel_engine_cs
*engine
)
260 return intel_engine_reset(engine
, "live_workarounds");
264 switch_to_scratch_context(struct intel_engine_cs
*engine
,
265 struct igt_spinner
*spin
)
267 struct intel_context
*ce
;
268 struct i915_request
*rq
;
271 ce
= intel_context_create(engine
);
275 rq
= igt_spinner_create_request(spin
, ce
, MI_NOOP
);
276 intel_context_put(ce
);
284 err
= request_add_spin(rq
, spin
);
287 igt_spinner_end(spin
);
292 static int check_whitelist_across_reset(struct intel_engine_cs
*engine
,
293 int (*reset
)(struct intel_engine_cs
*),
296 struct drm_i915_private
*i915
= engine
->i915
;
297 struct i915_gem_context
*ctx
, *tmp
;
298 struct igt_spinner spin
;
299 intel_wakeref_t wakeref
;
302 pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
303 engine
->whitelist
.count
, engine
->name
, name
);
305 ctx
= kernel_context(i915
);
309 err
= igt_spinner_init(&spin
, engine
->gt
);
313 err
= check_whitelist(ctx
, engine
);
315 pr_err("Invalid whitelist *before* %s reset!\n", name
);
319 err
= switch_to_scratch_context(engine
, &spin
);
323 with_intel_runtime_pm(engine
->uncore
->rpm
, wakeref
)
326 igt_spinner_end(&spin
);
329 pr_err("%s reset failed\n", name
);
333 err
= check_whitelist(ctx
, engine
);
335 pr_err("Whitelist not preserved in context across %s reset!\n",
340 tmp
= kernel_context(i915
);
345 kernel_context_close(ctx
);
348 err
= check_whitelist(ctx
, engine
);
350 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
356 igt_spinner_fini(&spin
);
358 kernel_context_close(ctx
);
362 static struct i915_vma
*create_batch(struct i915_address_space
*vm
)
364 struct drm_i915_gem_object
*obj
;
365 struct i915_vma
*vma
;
368 obj
= i915_gem_object_create_internal(vm
->i915
, 16 * PAGE_SIZE
);
370 return ERR_CAST(obj
);
372 vma
= i915_vma_instance(obj
, vm
, NULL
);
378 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
);
385 i915_gem_object_put(obj
);
389 static u32
reg_write(u32 old
, u32
new, u32 rsvd
)
391 if (rsvd
== 0x0000ffff) {
393 old
|= new & (new >> 16);
402 static bool wo_register(struct intel_engine_cs
*engine
, u32 reg
)
404 enum intel_platform platform
= INTEL_INFO(engine
->i915
)->platform
;
407 if ((reg
& RING_FORCE_TO_NONPRIV_ACCESS_MASK
) ==
408 RING_FORCE_TO_NONPRIV_ACCESS_WR
)
411 for (i
= 0; i
< ARRAY_SIZE(wo_registers
); i
++) {
412 if (wo_registers
[i
].platform
== platform
&&
413 wo_registers
[i
].reg
== reg
)
420 static bool ro_register(u32 reg
)
422 if ((reg
& RING_FORCE_TO_NONPRIV_ACCESS_MASK
) ==
423 RING_FORCE_TO_NONPRIV_ACCESS_RD
)
429 static int whitelist_writable_count(struct intel_engine_cs
*engine
)
431 int count
= engine
->whitelist
.count
;
434 for (i
= 0; i
< engine
->whitelist
.count
; i
++) {
435 u32 reg
= i915_mmio_reg_offset(engine
->whitelist
.list
[i
].reg
);
437 if (ro_register(reg
))
444 static int check_dirty_whitelist(struct intel_context
*ce
)
446 const u32 values
[] = {
472 struct intel_engine_cs
*engine
= ce
->engine
;
473 struct i915_vma
*scratch
;
474 struct i915_vma
*batch
;
478 scratch
= create_scratch(ce
->vm
, 2 * ARRAY_SIZE(values
) + 1);
480 return PTR_ERR(scratch
);
482 batch
= create_batch(ce
->vm
);
484 err
= PTR_ERR(batch
);
488 for (i
= 0; i
< engine
->whitelist
.count
; i
++) {
489 u32 reg
= i915_mmio_reg_offset(engine
->whitelist
.list
[i
].reg
);
490 u64 addr
= scratch
->node
.start
;
491 struct i915_request
*rq
;
497 if (wo_register(engine
, reg
))
500 ro_reg
= ro_register(reg
);
502 /* Clear non priv flags */
503 reg
&= RING_FORCE_TO_NONPRIV_ADDRESS_MASK
;
505 srm
= MI_STORE_REGISTER_MEM
;
506 lrm
= MI_LOAD_REGISTER_MEM
;
507 if (INTEL_GEN(engine
->i915
) >= 8)
510 pr_debug("%s: Writing garbage to %x\n",
513 cs
= i915_gem_object_pin_map(batch
->obj
, I915_MAP_WC
);
522 *cs
++ = lower_32_bits(addr
);
523 *cs
++ = upper_32_bits(addr
);
526 for (v
= 0; v
< ARRAY_SIZE(values
); v
++) {
528 *cs
++ = MI_LOAD_REGISTER_IMM(1);
535 *cs
++ = lower_32_bits(addr
+ sizeof(u32
) * idx
);
536 *cs
++ = upper_32_bits(addr
+ sizeof(u32
) * idx
);
539 for (v
= 0; v
< ARRAY_SIZE(values
); v
++) {
541 *cs
++ = MI_LOAD_REGISTER_IMM(1);
548 *cs
++ = lower_32_bits(addr
+ sizeof(u32
) * idx
);
549 *cs
++ = upper_32_bits(addr
+ sizeof(u32
) * idx
);
552 GEM_BUG_ON(idx
* sizeof(u32
) > scratch
->size
);
554 /* LRM original -- don't leave garbage in the context! */
557 *cs
++ = lower_32_bits(addr
);
558 *cs
++ = upper_32_bits(addr
);
560 *cs
++ = MI_BATCH_BUFFER_END
;
562 i915_gem_object_flush_map(batch
->obj
);
563 i915_gem_object_unpin_map(batch
->obj
);
564 intel_gt_chipset_flush(engine
->gt
);
566 rq
= intel_context_create_request(ce
);
572 if (engine
->emit_init_breadcrumb
) { /* Be nice if we hang */
573 err
= engine
->emit_init_breadcrumb(rq
);
578 i915_vma_lock(batch
);
579 err
= i915_request_await_object(rq
, batch
->obj
, false);
581 err
= i915_vma_move_to_active(batch
, rq
, 0);
582 i915_vma_unlock(batch
);
586 err
= engine
->emit_bb_start(rq
,
587 batch
->node
.start
, PAGE_SIZE
,
593 err
= request_add_sync(rq
, err
);
595 pr_err("%s: Futzing %x timedout; cancelling test\n",
597 intel_gt_set_wedged(engine
->gt
);
601 results
= i915_gem_object_pin_map(scratch
->obj
, I915_MAP_WB
);
602 if (IS_ERR(results
)) {
603 err
= PTR_ERR(results
);
607 GEM_BUG_ON(values
[ARRAY_SIZE(values
) - 1] != 0xffffffff);
609 /* detect write masking */
610 rsvd
= results
[ARRAY_SIZE(values
)];
612 pr_err("%s: Unable to write to whitelisted register %x\n",
621 for (v
= 0; v
< ARRAY_SIZE(values
); v
++) {
625 expect
= reg_write(expect
, values
[v
], rsvd
);
627 if (results
[idx
] != expect
)
631 for (v
= 0; v
< ARRAY_SIZE(values
); v
++) {
635 expect
= reg_write(expect
, ~values
[v
], rsvd
);
637 if (results
[idx
] != expect
)
642 pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
643 engine
->name
, err
, reg
);
646 pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
647 engine
->name
, reg
, results
[0]);
649 pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
650 engine
->name
, reg
, results
[0], rsvd
);
654 for (v
= 0; v
< ARRAY_SIZE(values
); v
++) {
660 expect
= reg_write(expect
, w
, rsvd
);
661 pr_info("Wrote %08x, read %08x, expect %08x\n",
662 w
, results
[idx
], expect
);
665 for (v
= 0; v
< ARRAY_SIZE(values
); v
++) {
671 expect
= reg_write(expect
, w
, rsvd
);
672 pr_info("Wrote %08x, read %08x, expect %08x\n",
673 w
, results
[idx
], expect
);
680 i915_gem_object_unpin_map(scratch
->obj
);
685 if (igt_flush_test(engine
->i915
))
688 i915_vma_unpin_and_release(&batch
, 0);
690 i915_vma_unpin_and_release(&scratch
, 0);
694 static int live_dirty_whitelist(void *arg
)
696 struct intel_gt
*gt
= arg
;
697 struct intel_engine_cs
*engine
;
698 enum intel_engine_id id
;
700 /* Can the user write to the whitelisted registers? */
702 if (INTEL_GEN(gt
->i915
) < 7) /* minimum requirement for LRI, SRM, LRM */
705 for_each_engine(engine
, gt
, id
) {
706 struct intel_context
*ce
;
709 if (engine
->whitelist
.count
== 0)
712 ce
= intel_context_create(engine
);
716 err
= check_dirty_whitelist(ce
);
717 intel_context_put(ce
);
725 static int live_reset_whitelist(void *arg
)
727 struct intel_gt
*gt
= arg
;
728 struct intel_engine_cs
*engine
;
729 enum intel_engine_id id
;
732 /* If we reset the gpu, we should not lose the RING_NONPRIV */
733 igt_global_reset_lock(gt
);
735 for_each_engine(engine
, gt
, id
) {
736 if (engine
->whitelist
.count
== 0)
739 if (intel_has_reset_engine(gt
)) {
740 err
= check_whitelist_across_reset(engine
,
747 if (intel_has_gpu_reset(gt
)) {
748 err
= check_whitelist_across_reset(engine
,
757 igt_global_reset_unlock(gt
);
761 static int read_whitelisted_registers(struct i915_gem_context
*ctx
,
762 struct intel_engine_cs
*engine
,
763 struct i915_vma
*results
)
765 struct i915_request
*rq
;
769 rq
= igt_request_alloc(ctx
, engine
);
773 i915_vma_lock(results
);
774 err
= i915_request_await_object(rq
, results
->obj
, true);
776 err
= i915_vma_move_to_active(results
, rq
, EXEC_OBJECT_WRITE
);
777 i915_vma_unlock(results
);
781 srm
= MI_STORE_REGISTER_MEM
;
782 if (INTEL_GEN(ctx
->i915
) >= 8)
785 cs
= intel_ring_begin(rq
, 4 * engine
->whitelist
.count
);
791 for (i
= 0; i
< engine
->whitelist
.count
; i
++) {
792 u64 offset
= results
->node
.start
+ sizeof(u32
) * i
;
793 u32 reg
= i915_mmio_reg_offset(engine
->whitelist
.list
[i
].reg
);
795 /* Clear non priv flags */
796 reg
&= RING_FORCE_TO_NONPRIV_ADDRESS_MASK
;
800 *cs
++ = lower_32_bits(offset
);
801 *cs
++ = upper_32_bits(offset
);
803 intel_ring_advance(rq
, cs
);
806 return request_add_sync(rq
, err
);
809 static int scrub_whitelisted_registers(struct i915_gem_context
*ctx
,
810 struct intel_engine_cs
*engine
)
812 struct i915_address_space
*vm
;
813 struct i915_request
*rq
;
814 struct i915_vma
*batch
;
818 vm
= i915_gem_context_get_vm_rcu(ctx
);
819 batch
= create_batch(vm
);
822 return PTR_ERR(batch
);
824 cs
= i915_gem_object_pin_map(batch
->obj
, I915_MAP_WC
);
830 *cs
++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine
));
831 for (i
= 0; i
< engine
->whitelist
.count
; i
++) {
832 u32 reg
= i915_mmio_reg_offset(engine
->whitelist
.list
[i
].reg
);
834 if (ro_register(reg
))
837 /* Clear non priv flags */
838 reg
&= RING_FORCE_TO_NONPRIV_ADDRESS_MASK
;
843 *cs
++ = MI_BATCH_BUFFER_END
;
845 i915_gem_object_flush_map(batch
->obj
);
846 intel_gt_chipset_flush(engine
->gt
);
848 rq
= igt_request_alloc(ctx
, engine
);
854 if (engine
->emit_init_breadcrumb
) { /* Be nice if we hang */
855 err
= engine
->emit_init_breadcrumb(rq
);
860 i915_vma_lock(batch
);
861 err
= i915_request_await_object(rq
, batch
->obj
, false);
863 err
= i915_vma_move_to_active(batch
, rq
, 0);
864 i915_vma_unlock(batch
);
868 /* Perform the writes from an unprivileged "user" batch */
869 err
= engine
->emit_bb_start(rq
, batch
->node
.start
, 0, 0);
872 err
= request_add_sync(rq
, err
);
875 i915_gem_object_unpin_map(batch
->obj
);
877 i915_vma_unpin_and_release(&batch
, 0);
883 unsigned long gen_mask
;
886 static bool find_reg(struct drm_i915_private
*i915
,
888 const struct regmask
*tbl
,
891 u32 offset
= i915_mmio_reg_offset(reg
);
894 if (INTEL_INFO(i915
)->gen_mask
& tbl
->gen_mask
&&
895 i915_mmio_reg_offset(tbl
->reg
) == offset
)
903 static bool pardon_reg(struct drm_i915_private
*i915
, i915_reg_t reg
)
905 /* Alas, we must pardon some whitelists. Mistakes already made */
906 static const struct regmask pardon
[] = {
907 { GEN9_CTX_PREEMPT_REG
, INTEL_GEN_MASK(9, 9) },
908 { GEN8_L3SQCREG4
, INTEL_GEN_MASK(9, 9) },
911 return find_reg(i915
, reg
, pardon
, ARRAY_SIZE(pardon
));
914 static bool result_eq(struct intel_engine_cs
*engine
,
915 u32 a
, u32 b
, i915_reg_t reg
)
917 if (a
!= b
&& !pardon_reg(engine
->i915
, reg
)) {
918 pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
919 i915_mmio_reg_offset(reg
), a
, b
);
926 static bool writeonly_reg(struct drm_i915_private
*i915
, i915_reg_t reg
)
928 /* Some registers do not seem to behave and our writes unreadable */
929 static const struct regmask wo
[] = {
930 { GEN9_SLICE_COMMON_ECO_CHICKEN1
, INTEL_GEN_MASK(9, 9) },
933 return find_reg(i915
, reg
, wo
, ARRAY_SIZE(wo
));
936 static bool result_neq(struct intel_engine_cs
*engine
,
937 u32 a
, u32 b
, i915_reg_t reg
)
939 if (a
== b
&& !writeonly_reg(engine
->i915
, reg
)) {
940 pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
941 i915_mmio_reg_offset(reg
), a
);
949 check_whitelisted_registers(struct intel_engine_cs
*engine
,
952 bool (*fn
)(struct intel_engine_cs
*engine
,
959 a
= i915_gem_object_pin_map(A
->obj
, I915_MAP_WB
);
963 b
= i915_gem_object_pin_map(B
->obj
, I915_MAP_WB
);
970 for (i
= 0; i
< engine
->whitelist
.count
; i
++) {
971 const struct i915_wa
*wa
= &engine
->whitelist
.list
[i
];
973 if (i915_mmio_reg_offset(wa
->reg
) &
974 RING_FORCE_TO_NONPRIV_ACCESS_RD
)
977 if (!fn(engine
, a
[i
], b
[i
], wa
->reg
))
981 i915_gem_object_unpin_map(B
->obj
);
983 i915_gem_object_unpin_map(A
->obj
);
987 static int live_isolated_whitelist(void *arg
)
989 struct intel_gt
*gt
= arg
;
991 struct i915_gem_context
*ctx
;
992 struct i915_vma
*scratch
[2];
994 struct intel_engine_cs
*engine
;
995 enum intel_engine_id id
;
999 * Check that a write into a whitelist register works, but
1000 * invisible to a second context.
1003 if (!intel_engines_has_context_isolation(gt
->i915
))
1006 for (i
= 0; i
< ARRAY_SIZE(client
); i
++) {
1007 struct i915_address_space
*vm
;
1008 struct i915_gem_context
*c
;
1010 c
= kernel_context(gt
->i915
);
1016 vm
= i915_gem_context_get_vm_rcu(c
);
1018 client
[i
].scratch
[0] = create_scratch(vm
, 1024);
1019 if (IS_ERR(client
[i
].scratch
[0])) {
1020 err
= PTR_ERR(client
[i
].scratch
[0]);
1022 kernel_context_close(c
);
1026 client
[i
].scratch
[1] = create_scratch(vm
, 1024);
1027 if (IS_ERR(client
[i
].scratch
[1])) {
1028 err
= PTR_ERR(client
[i
].scratch
[1]);
1029 i915_vma_unpin_and_release(&client
[i
].scratch
[0], 0);
1031 kernel_context_close(c
);
1039 for_each_engine(engine
, gt
, id
) {
1040 if (!engine
->kernel_context
->vm
)
1043 if (!whitelist_writable_count(engine
))
1046 /* Read default values */
1047 err
= read_whitelisted_registers(client
[0].ctx
, engine
,
1048 client
[0].scratch
[0]);
1052 /* Try to overwrite registers (should only affect ctx0) */
1053 err
= scrub_whitelisted_registers(client
[0].ctx
, engine
);
1057 /* Read values from ctx1, we expect these to be defaults */
1058 err
= read_whitelisted_registers(client
[1].ctx
, engine
,
1059 client
[1].scratch
[0]);
1063 /* Verify that both reads return the same default values */
1064 err
= check_whitelisted_registers(engine
,
1065 client
[0].scratch
[0],
1066 client
[1].scratch
[0],
1071 /* Read back the updated values in ctx0 */
1072 err
= read_whitelisted_registers(client
[0].ctx
, engine
,
1073 client
[0].scratch
[1]);
1077 /* User should be granted privilege to overwhite regs */
1078 err
= check_whitelisted_registers(engine
,
1079 client
[0].scratch
[0],
1080 client
[0].scratch
[1],
1087 for (i
= 0; i
< ARRAY_SIZE(client
); i
++) {
1091 i915_vma_unpin_and_release(&client
[i
].scratch
[1], 0);
1092 i915_vma_unpin_and_release(&client
[i
].scratch
[0], 0);
1093 kernel_context_close(client
[i
].ctx
);
1096 if (igt_flush_test(gt
->i915
))
1103 verify_wa_lists(struct i915_gem_context
*ctx
, struct wa_lists
*lists
,
1106 struct drm_i915_private
*i915
= ctx
->i915
;
1107 struct i915_gem_engines_iter it
;
1108 struct intel_context
*ce
;
1111 ok
&= wa_list_verify(&i915
->uncore
, &lists
->gt_wa_list
, str
);
1113 for_each_gem_engine(ce
, i915_gem_context_engines(ctx
), it
) {
1114 enum intel_engine_id id
= ce
->engine
->id
;
1116 ok
&= engine_wa_list_verify(ce
,
1117 &lists
->engine
[id
].wa_list
,
1120 ok
&= engine_wa_list_verify(ce
,
1121 &lists
->engine
[id
].ctx_wa_list
,
1129 live_gpu_reset_workarounds(void *arg
)
1131 struct intel_gt
*gt
= arg
;
1132 struct i915_gem_context
*ctx
;
1133 intel_wakeref_t wakeref
;
1134 struct wa_lists lists
;
1137 if (!intel_has_gpu_reset(gt
))
1140 ctx
= kernel_context(gt
->i915
);
1142 return PTR_ERR(ctx
);
1144 i915_gem_context_lock_engines(ctx
);
1146 pr_info("Verifying after GPU reset...\n");
1148 igt_global_reset_lock(gt
);
1149 wakeref
= intel_runtime_pm_get(gt
->uncore
->rpm
);
1151 reference_lists_init(gt
, &lists
);
1153 ok
= verify_wa_lists(ctx
, &lists
, "before reset");
1157 intel_gt_reset(gt
, ALL_ENGINES
, "live_workarounds");
1159 ok
= verify_wa_lists(ctx
, &lists
, "after reset");
1162 i915_gem_context_unlock_engines(ctx
);
1163 kernel_context_close(ctx
);
1164 reference_lists_fini(gt
, &lists
);
1165 intel_runtime_pm_put(gt
->uncore
->rpm
, wakeref
);
1166 igt_global_reset_unlock(gt
);
1168 return ok
? 0 : -ESRCH
;
1172 live_engine_reset_workarounds(void *arg
)
1174 struct intel_gt
*gt
= arg
;
1175 struct i915_gem_engines_iter it
;
1176 struct i915_gem_context
*ctx
;
1177 struct intel_context
*ce
;
1178 struct igt_spinner spin
;
1179 struct i915_request
*rq
;
1180 intel_wakeref_t wakeref
;
1181 struct wa_lists lists
;
1184 if (!intel_has_reset_engine(gt
))
1187 ctx
= kernel_context(gt
->i915
);
1189 return PTR_ERR(ctx
);
1191 igt_global_reset_lock(gt
);
1192 wakeref
= intel_runtime_pm_get(gt
->uncore
->rpm
);
1194 reference_lists_init(gt
, &lists
);
1196 for_each_gem_engine(ce
, i915_gem_context_lock_engines(ctx
), it
) {
1197 struct intel_engine_cs
*engine
= ce
->engine
;
1200 pr_info("Verifying after %s reset...\n", engine
->name
);
1202 ok
= verify_wa_lists(ctx
, &lists
, "before reset");
1208 intel_engine_reset(engine
, "live_workarounds");
1210 ok
= verify_wa_lists(ctx
, &lists
, "after idle reset");
1216 ret
= igt_spinner_init(&spin
, engine
->gt
);
1220 rq
= igt_spinner_create_request(&spin
, ce
, MI_NOOP
);
1223 igt_spinner_fini(&spin
);
1227 ret
= request_add_spin(rq
, &spin
);
1229 pr_err("Spinner failed to start\n");
1230 igt_spinner_fini(&spin
);
1234 intel_engine_reset(engine
, "live_workarounds");
1236 igt_spinner_end(&spin
);
1237 igt_spinner_fini(&spin
);
1239 ok
= verify_wa_lists(ctx
, &lists
, "after busy reset");
1246 i915_gem_context_unlock_engines(ctx
);
1247 reference_lists_fini(gt
, &lists
);
1248 intel_runtime_pm_put(gt
->uncore
->rpm
, wakeref
);
1249 igt_global_reset_unlock(gt
);
1250 kernel_context_close(ctx
);
1252 igt_flush_test(gt
->i915
);
1257 int intel_workarounds_live_selftests(struct drm_i915_private
*i915
)
1259 static const struct i915_subtest tests
[] = {
1260 SUBTEST(live_dirty_whitelist
),
1261 SUBTEST(live_reset_whitelist
),
1262 SUBTEST(live_isolated_whitelist
),
1263 SUBTEST(live_gpu_reset_workarounds
),
1264 SUBTEST(live_engine_reset_workarounds
),
1267 if (intel_gt_is_wedged(&i915
->gt
))
1270 return intel_gt_live_subtests(tests
, &i915
->gt
);