2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include "../i915_selftest.h"
9 #include "igt_wedge_me.h"
10 #include "mock_context.h"
12 static struct drm_i915_gem_object
*
13 read_nonprivs(struct i915_gem_context
*ctx
, struct intel_engine_cs
*engine
)
15 struct drm_i915_gem_object
*result
;
16 struct i915_request
*rq
;
18 const u32 base
= engine
->mmio_base
;
23 result
= i915_gem_object_create_internal(engine
->i915
, PAGE_SIZE
);
27 i915_gem_object_set_cache_level(result
, I915_CACHE_LLC
);
29 cs
= i915_gem_object_pin_map(result
, I915_MAP_WB
);
34 memset(cs
, 0xc5, PAGE_SIZE
);
35 i915_gem_object_unpin_map(result
);
37 vma
= i915_vma_instance(result
, &engine
->i915
->ggtt
.vm
, NULL
);
43 err
= i915_vma_pin(vma
, 0, 0, PIN_GLOBAL
);
47 rq
= i915_request_alloc(engine
, ctx
);
53 err
= i915_vma_move_to_active(vma
, rq
, EXEC_OBJECT_WRITE
);
57 srm
= MI_STORE_REGISTER_MEM
| MI_SRM_LRM_GLOBAL_GTT
;
58 if (INTEL_GEN(ctx
->i915
) >= 8)
61 cs
= intel_ring_begin(rq
, 4 * RING_MAX_NONPRIV_SLOTS
);
67 for (i
= 0; i
< RING_MAX_NONPRIV_SLOTS
; i
++) {
69 *cs
++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base
, i
));
70 *cs
++ = i915_ggtt_offset(vma
) + sizeof(u32
) * i
;
73 intel_ring_advance(rq
, cs
);
75 i915_gem_object_get(result
);
76 i915_gem_object_set_active_reference(result
);
88 i915_gem_object_put(result
);
92 static u32
get_whitelist_reg(const struct whitelist
*w
, unsigned int i
)
94 return i
< w
->count
? i915_mmio_reg_offset(w
->reg
[i
]) : w
->nopid
;
97 static void print_results(const struct whitelist
*w
, const u32
*results
)
101 for (i
= 0; i
< RING_MAX_NONPRIV_SLOTS
; i
++) {
102 u32 expected
= get_whitelist_reg(w
, i
);
103 u32 actual
= results
[i
];
105 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
106 i
, expected
, actual
);
110 static int check_whitelist(const struct whitelist
*w
,
111 struct i915_gem_context
*ctx
,
112 struct intel_engine_cs
*engine
)
114 struct drm_i915_gem_object
*results
;
115 struct igt_wedge_me wedge
;
120 results
= read_nonprivs(ctx
, engine
);
122 return PTR_ERR(results
);
125 igt_wedge_on_timeout(&wedge
, ctx
->i915
, HZ
/ 5) /* a safety net! */
126 err
= i915_gem_object_set_to_cpu_domain(results
, false);
127 if (i915_terminally_wedged(&ctx
->i915
->gpu_error
))
132 vaddr
= i915_gem_object_pin_map(results
, I915_MAP_WB
);
134 err
= PTR_ERR(vaddr
);
138 for (i
= 0; i
< RING_MAX_NONPRIV_SLOTS
; i
++) {
139 u32 expected
= get_whitelist_reg(w
, i
);
140 u32 actual
= vaddr
[i
];
142 if (expected
!= actual
) {
143 print_results(w
, vaddr
);
144 pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
145 i
, expected
, actual
);
152 i915_gem_object_unpin_map(results
);
154 i915_gem_object_put(results
);
158 static int do_device_reset(struct intel_engine_cs
*engine
)
160 i915_reset(engine
->i915
, ENGINE_MASK(engine
->id
), NULL
);
164 static int do_engine_reset(struct intel_engine_cs
*engine
)
166 return i915_reset_engine(engine
, NULL
);
169 static int switch_to_scratch_context(struct intel_engine_cs
*engine
)
171 struct i915_gem_context
*ctx
;
172 struct i915_request
*rq
;
174 ctx
= kernel_context(engine
->i915
);
178 rq
= i915_request_alloc(engine
, ctx
);
179 kernel_context_close(ctx
);
183 i915_request_add(rq
);
188 static int check_whitelist_across_reset(struct intel_engine_cs
*engine
,
189 int (*reset
)(struct intel_engine_cs
*),
190 const struct whitelist
*w
,
193 struct i915_gem_context
*ctx
;
196 ctx
= kernel_context(engine
->i915
);
200 err
= check_whitelist(w
, ctx
, engine
);
202 pr_err("Invalid whitelist *before* %s reset!\n", name
);
206 err
= switch_to_scratch_context(engine
);
212 pr_err("%s reset failed\n", name
);
216 err
= check_whitelist(w
, ctx
, engine
);
218 pr_err("Whitelist not preserved in context across %s reset!\n",
223 kernel_context_close(ctx
);
225 ctx
= kernel_context(engine
->i915
);
229 err
= check_whitelist(w
, ctx
, engine
);
231 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
237 kernel_context_close(ctx
);
241 static int live_reset_whitelist(void *arg
)
243 struct drm_i915_private
*i915
= arg
;
244 struct intel_engine_cs
*engine
= i915
->engine
[RCS
];
245 struct i915_gpu_error
*error
= &i915
->gpu_error
;
249 /* If we reset the gpu, we should not lose the RING_NONPRIV */
254 if (!whitelist_build(engine
, &w
))
257 pr_info("Checking %d whitelisted registers (RING_NONPRIV)\n", w
.count
);
259 set_bit(I915_RESET_BACKOFF
, &error
->flags
);
260 set_bit(I915_RESET_ENGINE
+ engine
->id
, &error
->flags
);
262 if (intel_has_reset_engine(i915
)) {
263 err
= check_whitelist_across_reset(engine
,
270 if (intel_has_gpu_reset(i915
)) {
271 err
= check_whitelist_across_reset(engine
,
279 clear_bit(I915_RESET_ENGINE
+ engine
->id
, &error
->flags
);
280 clear_bit(I915_RESET_BACKOFF
, &error
->flags
);
284 int intel_workarounds_live_selftests(struct drm_i915_private
*i915
)
286 static const struct i915_subtest tests
[] = {
287 SUBTEST(live_reset_whitelist
),
291 if (i915_terminally_wedged(&i915
->gpu_error
))
294 mutex_lock(&i915
->drm
.struct_mutex
);
295 err
= i915_subtests(tests
, i915
);
296 mutex_unlock(&i915
->drm
.struct_mutex
);