2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_drm.h"
18 #include "gem/selftests/igt_gem_utils.h"
19 #include "gem/selftests/mock_context.h"
21 static const struct wo_register
{
22 enum intel_platform platform
;
25 { INTEL_GEMINILAKE
, 0x731c }
29 struct i915_wa_list gt_wa_list
;
31 struct i915_wa_list wa_list
;
32 struct i915_wa_list ctx_wa_list
;
33 } engine
[I915_NUM_ENGINES
];
36 static int request_add_sync(struct i915_request
*rq
, int err
)
40 if (i915_request_wait(rq
, 0, HZ
/ 5) < 0)
47 static int request_add_spin(struct i915_request
*rq
, struct igt_spinner
*spin
)
53 if (spin
&& !igt_wait_for_spinner(spin
, rq
))
61 reference_lists_init(struct intel_gt
*gt
, struct wa_lists
*lists
)
63 struct intel_engine_cs
*engine
;
64 enum intel_engine_id id
;
66 memset(lists
, 0, sizeof(*lists
));
68 wa_init_start(&lists
->gt_wa_list
, "GT_REF", "global");
69 gt_init_workarounds(gt
->i915
, &lists
->gt_wa_list
);
70 wa_init_finish(&lists
->gt_wa_list
);
72 for_each_engine(engine
, gt
, id
) {
73 struct i915_wa_list
*wal
= &lists
->engine
[id
].wa_list
;
75 wa_init_start(wal
, "REF", engine
->name
);
76 engine_init_workarounds(engine
, wal
);
79 __intel_engine_init_ctx_wa(engine
,
80 &lists
->engine
[id
].ctx_wa_list
,
86 reference_lists_fini(struct intel_gt
*gt
, struct wa_lists
*lists
)
88 struct intel_engine_cs
*engine
;
89 enum intel_engine_id id
;
91 for_each_engine(engine
, gt
, id
)
92 intel_wa_list_free(&lists
->engine
[id
].wa_list
);
94 intel_wa_list_free(&lists
->gt_wa_list
);
97 static struct drm_i915_gem_object
*
98 read_nonprivs(struct i915_gem_context
*ctx
, struct intel_engine_cs
*engine
)
100 const u32 base
= engine
->mmio_base
;
101 struct drm_i915_gem_object
*result
;
102 struct i915_request
*rq
;
103 struct i915_vma
*vma
;
108 result
= i915_gem_object_create_internal(engine
->i915
, PAGE_SIZE
);
112 i915_gem_object_set_cache_coherency(result
, I915_CACHE_LLC
);
114 cs
= i915_gem_object_pin_map(result
, I915_MAP_WB
);
119 memset(cs
, 0xc5, PAGE_SIZE
);
120 i915_gem_object_flush_map(result
);
121 i915_gem_object_unpin_map(result
);
123 vma
= i915_vma_instance(result
, &engine
->gt
->ggtt
->vm
, NULL
);
129 err
= i915_vma_pin(vma
, 0, 0, PIN_GLOBAL
);
133 rq
= igt_request_alloc(ctx
, engine
);
140 err
= i915_request_await_object(rq
, vma
->obj
, true);
142 err
= i915_vma_move_to_active(vma
, rq
, EXEC_OBJECT_WRITE
);
143 i915_vma_unlock(vma
);
147 srm
= MI_STORE_REGISTER_MEM
| MI_SRM_LRM_GLOBAL_GTT
;
148 if (INTEL_GEN(ctx
->i915
) >= 8)
151 cs
= intel_ring_begin(rq
, 4 * RING_MAX_NONPRIV_SLOTS
);
157 for (i
= 0; i
< RING_MAX_NONPRIV_SLOTS
; i
++) {
159 *cs
++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base
, i
));
160 *cs
++ = i915_ggtt_offset(vma
) + sizeof(u32
) * i
;
163 intel_ring_advance(rq
, cs
);
165 i915_request_add(rq
);
171 i915_request_add(rq
);
175 i915_gem_object_put(result
);
180 get_whitelist_reg(const struct intel_engine_cs
*engine
, unsigned int i
)
182 i915_reg_t reg
= i
< engine
->whitelist
.count
?
183 engine
->whitelist
.list
[i
].reg
:
184 RING_NOPID(engine
->mmio_base
);
186 return i915_mmio_reg_offset(reg
);
190 print_results(const struct intel_engine_cs
*engine
, const u32
*results
)
194 for (i
= 0; i
< RING_MAX_NONPRIV_SLOTS
; i
++) {
195 u32 expected
= get_whitelist_reg(engine
, i
);
196 u32 actual
= results
[i
];
198 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
199 i
, expected
, actual
);
203 static int check_whitelist(struct i915_gem_context
*ctx
,
204 struct intel_engine_cs
*engine
)
206 struct drm_i915_gem_object
*results
;
207 struct intel_wedge_me wedge
;
212 results
= read_nonprivs(ctx
, engine
);
214 return PTR_ERR(results
);
217 i915_gem_object_lock(results
, NULL
);
218 intel_wedge_on_timeout(&wedge
, engine
->gt
, HZ
/ 5) /* safety net! */
219 err
= i915_gem_object_set_to_cpu_domain(results
, false);
220 i915_gem_object_unlock(results
);
221 if (intel_gt_is_wedged(engine
->gt
))
226 vaddr
= i915_gem_object_pin_map(results
, I915_MAP_WB
);
228 err
= PTR_ERR(vaddr
);
232 for (i
= 0; i
< RING_MAX_NONPRIV_SLOTS
; i
++) {
233 u32 expected
= get_whitelist_reg(engine
, i
);
234 u32 actual
= vaddr
[i
];
236 if (expected
!= actual
) {
237 print_results(engine
, vaddr
);
238 pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
239 i
, expected
, actual
);
246 i915_gem_object_unpin_map(results
);
248 i915_gem_object_put(results
);
252 static int do_device_reset(struct intel_engine_cs
*engine
)
254 intel_gt_reset(engine
->gt
, engine
->mask
, "live_workarounds");
258 static int do_engine_reset(struct intel_engine_cs
*engine
)
260 return intel_engine_reset(engine
, "live_workarounds");
264 switch_to_scratch_context(struct intel_engine_cs
*engine
,
265 struct igt_spinner
*spin
)
267 struct intel_context
*ce
;
268 struct i915_request
*rq
;
271 ce
= intel_context_create(engine
);
275 rq
= igt_spinner_create_request(spin
, ce
, MI_NOOP
);
276 intel_context_put(ce
);
284 err
= request_add_spin(rq
, spin
);
287 igt_spinner_end(spin
);
292 static int check_whitelist_across_reset(struct intel_engine_cs
*engine
,
293 int (*reset
)(struct intel_engine_cs
*),
296 struct drm_i915_private
*i915
= engine
->i915
;
297 struct i915_gem_context
*ctx
, *tmp
;
298 struct igt_spinner spin
;
299 intel_wakeref_t wakeref
;
302 pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
303 engine
->whitelist
.count
, engine
->name
, name
);
305 ctx
= kernel_context(i915
);
309 err
= igt_spinner_init(&spin
, engine
->gt
);
313 err
= check_whitelist(ctx
, engine
);
315 pr_err("Invalid whitelist *before* %s reset!\n", name
);
319 err
= switch_to_scratch_context(engine
, &spin
);
323 with_intel_runtime_pm(engine
->uncore
->rpm
, wakeref
)
326 igt_spinner_end(&spin
);
329 pr_err("%s reset failed\n", name
);
333 err
= check_whitelist(ctx
, engine
);
335 pr_err("Whitelist not preserved in context across %s reset!\n",
340 tmp
= kernel_context(i915
);
345 kernel_context_close(ctx
);
348 err
= check_whitelist(ctx
, engine
);
350 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
356 igt_spinner_fini(&spin
);
358 kernel_context_close(ctx
);
362 static struct i915_vma
*create_batch(struct i915_address_space
*vm
)
364 struct drm_i915_gem_object
*obj
;
365 struct i915_vma
*vma
;
368 obj
= i915_gem_object_create_internal(vm
->i915
, 16 * PAGE_SIZE
);
370 return ERR_CAST(obj
);
372 vma
= i915_vma_instance(obj
, vm
, NULL
);
378 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
);
385 i915_gem_object_put(obj
);
389 static u32
reg_write(u32 old
, u32
new, u32 rsvd
)
391 if (rsvd
== 0x0000ffff) {
393 old
|= new & (new >> 16);
402 static bool wo_register(struct intel_engine_cs
*engine
, u32 reg
)
404 enum intel_platform platform
= INTEL_INFO(engine
->i915
)->platform
;
407 if ((reg
& RING_FORCE_TO_NONPRIV_ACCESS_MASK
) ==
408 RING_FORCE_TO_NONPRIV_ACCESS_WR
)
411 for (i
= 0; i
< ARRAY_SIZE(wo_registers
); i
++) {
412 if (wo_registers
[i
].platform
== platform
&&
413 wo_registers
[i
].reg
== reg
)
420 static bool timestamp(const struct intel_engine_cs
*engine
, u32 reg
)
422 reg
= (reg
- engine
->mmio_base
) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK
;
434 static bool ro_register(u32 reg
)
436 if ((reg
& RING_FORCE_TO_NONPRIV_ACCESS_MASK
) ==
437 RING_FORCE_TO_NONPRIV_ACCESS_RD
)
443 static int whitelist_writable_count(struct intel_engine_cs
*engine
)
445 int count
= engine
->whitelist
.count
;
448 for (i
= 0; i
< engine
->whitelist
.count
; i
++) {
449 u32 reg
= i915_mmio_reg_offset(engine
->whitelist
.list
[i
].reg
);
451 if (ro_register(reg
))
458 static int check_dirty_whitelist(struct intel_context
*ce
)
460 const u32 values
[] = {
486 struct intel_engine_cs
*engine
= ce
->engine
;
487 struct i915_vma
*scratch
;
488 struct i915_vma
*batch
;
492 scratch
= create_scratch(ce
->vm
, 2 * ARRAY_SIZE(values
) + 1);
494 return PTR_ERR(scratch
);
496 batch
= create_batch(ce
->vm
);
498 err
= PTR_ERR(batch
);
502 for (i
= 0; i
< engine
->whitelist
.count
; i
++) {
503 u32 reg
= i915_mmio_reg_offset(engine
->whitelist
.list
[i
].reg
);
504 u64 addr
= scratch
->node
.start
;
505 struct i915_request
*rq
;
511 if (wo_register(engine
, reg
))
514 if (timestamp(engine
, reg
))
515 continue; /* timestamps are expected to autoincrement */
517 ro_reg
= ro_register(reg
);
519 /* Clear non priv flags */
520 reg
&= RING_FORCE_TO_NONPRIV_ADDRESS_MASK
;
522 srm
= MI_STORE_REGISTER_MEM
;
523 lrm
= MI_LOAD_REGISTER_MEM
;
524 if (INTEL_GEN(engine
->i915
) >= 8)
527 pr_debug("%s: Writing garbage to %x\n",
530 cs
= i915_gem_object_pin_map(batch
->obj
, I915_MAP_WC
);
539 *cs
++ = lower_32_bits(addr
);
540 *cs
++ = upper_32_bits(addr
);
543 for (v
= 0; v
< ARRAY_SIZE(values
); v
++) {
545 *cs
++ = MI_LOAD_REGISTER_IMM(1);
552 *cs
++ = lower_32_bits(addr
+ sizeof(u32
) * idx
);
553 *cs
++ = upper_32_bits(addr
+ sizeof(u32
) * idx
);
556 for (v
= 0; v
< ARRAY_SIZE(values
); v
++) {
558 *cs
++ = MI_LOAD_REGISTER_IMM(1);
565 *cs
++ = lower_32_bits(addr
+ sizeof(u32
) * idx
);
566 *cs
++ = upper_32_bits(addr
+ sizeof(u32
) * idx
);
569 GEM_BUG_ON(idx
* sizeof(u32
) > scratch
->size
);
571 /* LRM original -- don't leave garbage in the context! */
574 *cs
++ = lower_32_bits(addr
);
575 *cs
++ = upper_32_bits(addr
);
577 *cs
++ = MI_BATCH_BUFFER_END
;
579 i915_gem_object_flush_map(batch
->obj
);
580 i915_gem_object_unpin_map(batch
->obj
);
581 intel_gt_chipset_flush(engine
->gt
);
583 rq
= intel_context_create_request(ce
);
589 if (engine
->emit_init_breadcrumb
) { /* Be nice if we hang */
590 err
= engine
->emit_init_breadcrumb(rq
);
595 i915_vma_lock(batch
);
596 err
= i915_request_await_object(rq
, batch
->obj
, false);
598 err
= i915_vma_move_to_active(batch
, rq
, 0);
599 i915_vma_unlock(batch
);
603 i915_vma_lock(scratch
);
604 err
= i915_request_await_object(rq
, scratch
->obj
, true);
606 err
= i915_vma_move_to_active(scratch
, rq
,
608 i915_vma_unlock(scratch
);
612 err
= engine
->emit_bb_start(rq
,
613 batch
->node
.start
, PAGE_SIZE
,
619 err
= request_add_sync(rq
, err
);
621 pr_err("%s: Futzing %x timedout; cancelling test\n",
623 intel_gt_set_wedged(engine
->gt
);
627 results
= i915_gem_object_pin_map(scratch
->obj
, I915_MAP_WB
);
628 if (IS_ERR(results
)) {
629 err
= PTR_ERR(results
);
633 GEM_BUG_ON(values
[ARRAY_SIZE(values
) - 1] != 0xffffffff);
635 /* detect write masking */
636 rsvd
= results
[ARRAY_SIZE(values
)];
638 pr_err("%s: Unable to write to whitelisted register %x\n",
649 for (v
= 0; v
< ARRAY_SIZE(values
); v
++) {
653 expect
= reg_write(expect
, values
[v
], rsvd
);
655 if (results
[idx
] != expect
)
659 for (v
= 0; v
< ARRAY_SIZE(values
); v
++) {
663 expect
= reg_write(expect
, ~values
[v
], rsvd
);
665 if (results
[idx
] != expect
)
670 pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
671 engine
->name
, err
, reg
);
674 pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
675 engine
->name
, reg
, results
[0]);
677 pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
678 engine
->name
, reg
, results
[0], rsvd
);
682 for (v
= 0; v
< ARRAY_SIZE(values
); v
++) {
688 expect
= reg_write(expect
, w
, rsvd
);
689 pr_info("Wrote %08x, read %08x, expect %08x\n",
690 w
, results
[idx
], expect
);
693 for (v
= 0; v
< ARRAY_SIZE(values
); v
++) {
699 expect
= reg_write(expect
, w
, rsvd
);
700 pr_info("Wrote %08x, read %08x, expect %08x\n",
701 w
, results
[idx
], expect
);
708 i915_gem_object_unpin_map(scratch
->obj
);
713 if (igt_flush_test(engine
->i915
))
716 i915_vma_unpin_and_release(&batch
, 0);
718 i915_vma_unpin_and_release(&scratch
, 0);
722 static int live_dirty_whitelist(void *arg
)
724 struct intel_gt
*gt
= arg
;
725 struct intel_engine_cs
*engine
;
726 enum intel_engine_id id
;
728 /* Can the user write to the whitelisted registers? */
730 if (INTEL_GEN(gt
->i915
) < 7) /* minimum requirement for LRI, SRM, LRM */
733 for_each_engine(engine
, gt
, id
) {
734 struct intel_context
*ce
;
737 if (engine
->whitelist
.count
== 0)
740 ce
= intel_context_create(engine
);
744 err
= check_dirty_whitelist(ce
);
745 intel_context_put(ce
);
753 static int live_reset_whitelist(void *arg
)
755 struct intel_gt
*gt
= arg
;
756 struct intel_engine_cs
*engine
;
757 enum intel_engine_id id
;
760 /* If we reset the gpu, we should not lose the RING_NONPRIV */
761 igt_global_reset_lock(gt
);
763 for_each_engine(engine
, gt
, id
) {
764 if (engine
->whitelist
.count
== 0)
767 if (intel_has_reset_engine(gt
)) {
768 err
= check_whitelist_across_reset(engine
,
775 if (intel_has_gpu_reset(gt
)) {
776 err
= check_whitelist_across_reset(engine
,
785 igt_global_reset_unlock(gt
);
789 static int read_whitelisted_registers(struct i915_gem_context
*ctx
,
790 struct intel_engine_cs
*engine
,
791 struct i915_vma
*results
)
793 struct i915_request
*rq
;
797 rq
= igt_request_alloc(ctx
, engine
);
801 i915_vma_lock(results
);
802 err
= i915_request_await_object(rq
, results
->obj
, true);
804 err
= i915_vma_move_to_active(results
, rq
, EXEC_OBJECT_WRITE
);
805 i915_vma_unlock(results
);
809 srm
= MI_STORE_REGISTER_MEM
;
810 if (INTEL_GEN(ctx
->i915
) >= 8)
813 cs
= intel_ring_begin(rq
, 4 * engine
->whitelist
.count
);
819 for (i
= 0; i
< engine
->whitelist
.count
; i
++) {
820 u64 offset
= results
->node
.start
+ sizeof(u32
) * i
;
821 u32 reg
= i915_mmio_reg_offset(engine
->whitelist
.list
[i
].reg
);
823 /* Clear non priv flags */
824 reg
&= RING_FORCE_TO_NONPRIV_ADDRESS_MASK
;
828 *cs
++ = lower_32_bits(offset
);
829 *cs
++ = upper_32_bits(offset
);
831 intel_ring_advance(rq
, cs
);
834 return request_add_sync(rq
, err
);
837 static int scrub_whitelisted_registers(struct i915_gem_context
*ctx
,
838 struct intel_engine_cs
*engine
)
840 struct i915_address_space
*vm
;
841 struct i915_request
*rq
;
842 struct i915_vma
*batch
;
846 vm
= i915_gem_context_get_vm_rcu(ctx
);
847 batch
= create_batch(vm
);
850 return PTR_ERR(batch
);
852 cs
= i915_gem_object_pin_map(batch
->obj
, I915_MAP_WC
);
858 *cs
++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine
));
859 for (i
= 0; i
< engine
->whitelist
.count
; i
++) {
860 u32 reg
= i915_mmio_reg_offset(engine
->whitelist
.list
[i
].reg
);
862 if (ro_register(reg
))
865 /* Clear non priv flags */
866 reg
&= RING_FORCE_TO_NONPRIV_ADDRESS_MASK
;
871 *cs
++ = MI_BATCH_BUFFER_END
;
873 i915_gem_object_flush_map(batch
->obj
);
874 intel_gt_chipset_flush(engine
->gt
);
876 rq
= igt_request_alloc(ctx
, engine
);
882 if (engine
->emit_init_breadcrumb
) { /* Be nice if we hang */
883 err
= engine
->emit_init_breadcrumb(rq
);
888 i915_vma_lock(batch
);
889 err
= i915_request_await_object(rq
, batch
->obj
, false);
891 err
= i915_vma_move_to_active(batch
, rq
, 0);
892 i915_vma_unlock(batch
);
896 /* Perform the writes from an unprivileged "user" batch */
897 err
= engine
->emit_bb_start(rq
, batch
->node
.start
, 0, 0);
900 err
= request_add_sync(rq
, err
);
903 i915_gem_object_unpin_map(batch
->obj
);
905 i915_vma_unpin_and_release(&batch
, 0);
911 unsigned long gen_mask
;
914 static bool find_reg(struct drm_i915_private
*i915
,
916 const struct regmask
*tbl
,
919 u32 offset
= i915_mmio_reg_offset(reg
);
922 if (INTEL_INFO(i915
)->gen_mask
& tbl
->gen_mask
&&
923 i915_mmio_reg_offset(tbl
->reg
) == offset
)
931 static bool pardon_reg(struct drm_i915_private
*i915
, i915_reg_t reg
)
933 /* Alas, we must pardon some whitelists. Mistakes already made */
934 static const struct regmask pardon
[] = {
935 { GEN9_CTX_PREEMPT_REG
, INTEL_GEN_MASK(9, 9) },
936 { GEN8_L3SQCREG4
, INTEL_GEN_MASK(9, 9) },
939 return find_reg(i915
, reg
, pardon
, ARRAY_SIZE(pardon
));
942 static bool result_eq(struct intel_engine_cs
*engine
,
943 u32 a
, u32 b
, i915_reg_t reg
)
945 if (a
!= b
&& !pardon_reg(engine
->i915
, reg
)) {
946 pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
947 i915_mmio_reg_offset(reg
), a
, b
);
954 static bool writeonly_reg(struct drm_i915_private
*i915
, i915_reg_t reg
)
956 /* Some registers do not seem to behave and our writes unreadable */
957 static const struct regmask wo
[] = {
958 { GEN9_SLICE_COMMON_ECO_CHICKEN1
, INTEL_GEN_MASK(9, 9) },
961 return find_reg(i915
, reg
, wo
, ARRAY_SIZE(wo
));
964 static bool result_neq(struct intel_engine_cs
*engine
,
965 u32 a
, u32 b
, i915_reg_t reg
)
967 if (a
== b
&& !writeonly_reg(engine
->i915
, reg
)) {
968 pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
969 i915_mmio_reg_offset(reg
), a
);
977 check_whitelisted_registers(struct intel_engine_cs
*engine
,
980 bool (*fn
)(struct intel_engine_cs
*engine
,
987 a
= i915_gem_object_pin_map(A
->obj
, I915_MAP_WB
);
991 b
= i915_gem_object_pin_map(B
->obj
, I915_MAP_WB
);
998 for (i
= 0; i
< engine
->whitelist
.count
; i
++) {
999 const struct i915_wa
*wa
= &engine
->whitelist
.list
[i
];
1001 if (i915_mmio_reg_offset(wa
->reg
) &
1002 RING_FORCE_TO_NONPRIV_ACCESS_RD
)
1005 if (!fn(engine
, a
[i
], b
[i
], wa
->reg
))
1009 i915_gem_object_unpin_map(B
->obj
);
1011 i915_gem_object_unpin_map(A
->obj
);
1015 static int live_isolated_whitelist(void *arg
)
1017 struct intel_gt
*gt
= arg
;
1019 struct i915_gem_context
*ctx
;
1020 struct i915_vma
*scratch
[2];
1022 struct intel_engine_cs
*engine
;
1023 enum intel_engine_id id
;
1027 * Check that a write into a whitelist register works, but
1028 * invisible to a second context.
1031 if (!intel_engines_has_context_isolation(gt
->i915
))
1034 for (i
= 0; i
< ARRAY_SIZE(client
); i
++) {
1035 struct i915_address_space
*vm
;
1036 struct i915_gem_context
*c
;
1038 c
= kernel_context(gt
->i915
);
1044 vm
= i915_gem_context_get_vm_rcu(c
);
1046 client
[i
].scratch
[0] = create_scratch(vm
, 1024);
1047 if (IS_ERR(client
[i
].scratch
[0])) {
1048 err
= PTR_ERR(client
[i
].scratch
[0]);
1050 kernel_context_close(c
);
1054 client
[i
].scratch
[1] = create_scratch(vm
, 1024);
1055 if (IS_ERR(client
[i
].scratch
[1])) {
1056 err
= PTR_ERR(client
[i
].scratch
[1]);
1057 i915_vma_unpin_and_release(&client
[i
].scratch
[0], 0);
1059 kernel_context_close(c
);
1067 for_each_engine(engine
, gt
, id
) {
1068 if (!engine
->kernel_context
->vm
)
1071 if (!whitelist_writable_count(engine
))
1074 /* Read default values */
1075 err
= read_whitelisted_registers(client
[0].ctx
, engine
,
1076 client
[0].scratch
[0]);
1080 /* Try to overwrite registers (should only affect ctx0) */
1081 err
= scrub_whitelisted_registers(client
[0].ctx
, engine
);
1085 /* Read values from ctx1, we expect these to be defaults */
1086 err
= read_whitelisted_registers(client
[1].ctx
, engine
,
1087 client
[1].scratch
[0]);
1091 /* Verify that both reads return the same default values */
1092 err
= check_whitelisted_registers(engine
,
1093 client
[0].scratch
[0],
1094 client
[1].scratch
[0],
1099 /* Read back the updated values in ctx0 */
1100 err
= read_whitelisted_registers(client
[0].ctx
, engine
,
1101 client
[0].scratch
[1]);
1105 /* User should be granted privilege to overwhite regs */
1106 err
= check_whitelisted_registers(engine
,
1107 client
[0].scratch
[0],
1108 client
[0].scratch
[1],
1115 for (i
= 0; i
< ARRAY_SIZE(client
); i
++) {
1119 i915_vma_unpin_and_release(&client
[i
].scratch
[1], 0);
1120 i915_vma_unpin_and_release(&client
[i
].scratch
[0], 0);
1121 kernel_context_close(client
[i
].ctx
);
1124 if (igt_flush_test(gt
->i915
))
1131 verify_wa_lists(struct i915_gem_context
*ctx
, struct wa_lists
*lists
,
1134 struct drm_i915_private
*i915
= ctx
->i915
;
1135 struct i915_gem_engines_iter it
;
1136 struct intel_context
*ce
;
1139 ok
&= wa_list_verify(&i915
->uncore
, &lists
->gt_wa_list
, str
);
1141 for_each_gem_engine(ce
, i915_gem_context_engines(ctx
), it
) {
1142 enum intel_engine_id id
= ce
->engine
->id
;
1144 ok
&= engine_wa_list_verify(ce
,
1145 &lists
->engine
[id
].wa_list
,
1148 ok
&= engine_wa_list_verify(ce
,
1149 &lists
->engine
[id
].ctx_wa_list
,
1157 live_gpu_reset_workarounds(void *arg
)
1159 struct intel_gt
*gt
= arg
;
1160 struct i915_gem_context
*ctx
;
1161 intel_wakeref_t wakeref
;
1162 struct wa_lists lists
;
1165 if (!intel_has_gpu_reset(gt
))
1168 ctx
= kernel_context(gt
->i915
);
1170 return PTR_ERR(ctx
);
1172 i915_gem_context_lock_engines(ctx
);
1174 pr_info("Verifying after GPU reset...\n");
1176 igt_global_reset_lock(gt
);
1177 wakeref
= intel_runtime_pm_get(gt
->uncore
->rpm
);
1179 reference_lists_init(gt
, &lists
);
1181 ok
= verify_wa_lists(ctx
, &lists
, "before reset");
1185 intel_gt_reset(gt
, ALL_ENGINES
, "live_workarounds");
1187 ok
= verify_wa_lists(ctx
, &lists
, "after reset");
1190 i915_gem_context_unlock_engines(ctx
);
1191 kernel_context_close(ctx
);
1192 reference_lists_fini(gt
, &lists
);
1193 intel_runtime_pm_put(gt
->uncore
->rpm
, wakeref
);
1194 igt_global_reset_unlock(gt
);
1196 return ok
? 0 : -ESRCH
;
1200 live_engine_reset_workarounds(void *arg
)
1202 struct intel_gt
*gt
= arg
;
1203 struct i915_gem_engines_iter it
;
1204 struct i915_gem_context
*ctx
;
1205 struct intel_context
*ce
;
1206 struct igt_spinner spin
;
1207 struct i915_request
*rq
;
1208 intel_wakeref_t wakeref
;
1209 struct wa_lists lists
;
1212 if (!intel_has_reset_engine(gt
))
1215 ctx
= kernel_context(gt
->i915
);
1217 return PTR_ERR(ctx
);
1219 igt_global_reset_lock(gt
);
1220 wakeref
= intel_runtime_pm_get(gt
->uncore
->rpm
);
1222 reference_lists_init(gt
, &lists
);
1224 for_each_gem_engine(ce
, i915_gem_context_lock_engines(ctx
), it
) {
1225 struct intel_engine_cs
*engine
= ce
->engine
;
1228 pr_info("Verifying after %s reset...\n", engine
->name
);
1230 ok
= verify_wa_lists(ctx
, &lists
, "before reset");
1236 intel_engine_reset(engine
, "live_workarounds");
1238 ok
= verify_wa_lists(ctx
, &lists
, "after idle reset");
1244 ret
= igt_spinner_init(&spin
, engine
->gt
);
1248 rq
= igt_spinner_create_request(&spin
, ce
, MI_NOOP
);
1251 igt_spinner_fini(&spin
);
1255 ret
= request_add_spin(rq
, &spin
);
1257 pr_err("Spinner failed to start\n");
1258 igt_spinner_fini(&spin
);
1262 intel_engine_reset(engine
, "live_workarounds");
1264 igt_spinner_end(&spin
);
1265 igt_spinner_fini(&spin
);
1267 ok
= verify_wa_lists(ctx
, &lists
, "after busy reset");
1274 i915_gem_context_unlock_engines(ctx
);
1275 reference_lists_fini(gt
, &lists
);
1276 intel_runtime_pm_put(gt
->uncore
->rpm
, wakeref
);
1277 igt_global_reset_unlock(gt
);
1278 kernel_context_close(ctx
);
1280 igt_flush_test(gt
->i915
);
1285 int intel_workarounds_live_selftests(struct drm_i915_private
*i915
)
1287 static const struct i915_subtest tests
[] = {
1288 SUBTEST(live_dirty_whitelist
),
1289 SUBTEST(live_reset_whitelist
),
1290 SUBTEST(live_isolated_whitelist
),
1291 SUBTEST(live_gpu_reset_workarounds
),
1292 SUBTEST(live_engine_reset_workarounds
),
1295 if (intel_gt_is_wedged(&i915
->gt
))
1298 return intel_gt_live_subtests(tests
, &i915
->gt
);