1 // SPDX-License-Identifier: MIT
3 * Copyright © 2018 Intel Corporation
6 #include <linux/crc32.h>
8 #include "gem/i915_gem_stolen.h"
10 #include "i915_memcpy.h"
11 #include "i915_selftest.h"
12 #include "selftests/igt_reset.h"
13 #include "selftests/igt_atomic.h"
14 #include "selftests/igt_spinner.h"
17 __igt_reset_stolen(struct intel_gt
*gt
,
18 intel_engine_mask_t mask
,
21 struct i915_ggtt
*ggtt
= >
->i915
->ggtt
;
22 const struct resource
*dsm
= >
->i915
->dsm
;
23 resource_size_t num_pages
, page
;
24 struct intel_engine_cs
*engine
;
25 intel_wakeref_t wakeref
;
26 enum intel_engine_id id
;
27 struct igt_spinner spin
;
33 if (!drm_mm_node_allocated(&ggtt
->error_capture
))
36 num_pages
= resource_size(dsm
) >> PAGE_SHIFT
;
40 crc
= kmalloc_array(num_pages
, sizeof(u32
), GFP_KERNEL
);
44 tmp
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
50 igt_global_reset_lock(gt
);
51 wakeref
= intel_runtime_pm_get(gt
->uncore
->rpm
);
53 err
= igt_spinner_init(&spin
, gt
);
57 for_each_engine(engine
, gt
, id
) {
58 struct intel_context
*ce
;
59 struct i915_request
*rq
;
61 if (!(mask
& engine
->mask
))
64 if (!intel_engine_can_store_dword(engine
))
67 ce
= intel_context_create(engine
);
72 rq
= igt_spinner_create_request(&spin
, ce
, MI_ARB_CHECK
);
73 intel_context_put(ce
);
81 for (page
= 0; page
< num_pages
; page
++) {
82 dma_addr_t dma
= (dma_addr_t
)dsm
->start
+ (page
<< PAGE_SHIFT
);
86 ggtt
->vm
.insert_page(&ggtt
->vm
, dma
,
87 ggtt
->error_capture
.start
,
91 s
= io_mapping_map_wc(&ggtt
->iomap
,
92 ggtt
->error_capture
.start
,
95 if (!__drm_mm_interval_first(>
->i915
->mm
.stolen
,
97 ((page
+ 1) << PAGE_SHIFT
) - 1))
98 memset32(s
, STACK_MAGIC
, PAGE_SIZE
/ sizeof(u32
));
101 if (i915_memcpy_from_wc(tmp
, s
, PAGE_SIZE
))
103 crc
[page
] = crc32_le(0, in
, PAGE_SIZE
);
108 ggtt
->vm
.clear_range(&ggtt
->vm
, ggtt
->error_capture
.start
, PAGE_SIZE
);
110 if (mask
== ALL_ENGINES
) {
111 intel_gt_reset(gt
, mask
, NULL
);
113 for_each_engine(engine
, gt
, id
) {
114 if (mask
& engine
->mask
)
115 intel_engine_reset(engine
, NULL
);
121 for (page
= 0; page
< num_pages
; page
++) {
122 dma_addr_t dma
= (dma_addr_t
)dsm
->start
+ (page
<< PAGE_SHIFT
);
127 ggtt
->vm
.insert_page(&ggtt
->vm
, dma
,
128 ggtt
->error_capture
.start
,
132 s
= io_mapping_map_wc(&ggtt
->iomap
,
133 ggtt
->error_capture
.start
,
137 if (i915_memcpy_from_wc(tmp
, s
, PAGE_SIZE
))
139 x
= crc32_le(0, in
, PAGE_SIZE
);
141 if (x
!= crc
[page
] &&
142 !__drm_mm_interval_first(>
->i915
->mm
.stolen
,
144 ((page
+ 1) << PAGE_SHIFT
) - 1)) {
145 pr_debug("unused stolen page %pa modified by GPU reset\n",
148 igt_hexdump(in
, PAGE_SIZE
);
155 ggtt
->vm
.clear_range(&ggtt
->vm
, ggtt
->error_capture
.start
, PAGE_SIZE
);
158 pr_info("%s reset clobbered %ld pages of stolen, last clobber at page %ld\n",
161 if (max
>= I915_GEM_STOLEN_BIAS
>> PAGE_SHIFT
) {
162 pr_err("%s reset clobbered unreserved area [above %x] of stolen; may cause severe faults\n",
163 msg
, I915_GEM_STOLEN_BIAS
);
168 igt_spinner_fini(&spin
);
171 intel_runtime_pm_put(gt
->uncore
->rpm
, wakeref
);
172 igt_global_reset_unlock(gt
);
180 static int igt_reset_device_stolen(void *arg
)
182 return __igt_reset_stolen(arg
, ALL_ENGINES
, "device");
185 static int igt_reset_engines_stolen(void *arg
)
187 struct intel_gt
*gt
= arg
;
188 struct intel_engine_cs
*engine
;
189 enum intel_engine_id id
;
192 if (!intel_has_reset_engine(gt
))
195 for_each_engine(engine
, gt
, id
) {
196 err
= __igt_reset_stolen(gt
, engine
->mask
, engine
->name
);
204 static int igt_global_reset(void *arg
)
206 struct intel_gt
*gt
= arg
;
207 unsigned int reset_count
;
208 intel_wakeref_t wakeref
;
211 /* Check that we can issue a global GPU reset */
213 igt_global_reset_lock(gt
);
214 wakeref
= intel_runtime_pm_get(gt
->uncore
->rpm
);
216 reset_count
= i915_reset_count(>
->i915
->gpu_error
);
218 intel_gt_reset(gt
, ALL_ENGINES
, NULL
);
220 if (i915_reset_count(>
->i915
->gpu_error
) == reset_count
) {
221 pr_err("No GPU reset recorded!\n");
225 intel_runtime_pm_put(gt
->uncore
->rpm
, wakeref
);
226 igt_global_reset_unlock(gt
);
228 if (intel_gt_is_wedged(gt
))
234 static int igt_wedged_reset(void *arg
)
236 struct intel_gt
*gt
= arg
;
237 intel_wakeref_t wakeref
;
239 /* Check that we can recover a wedged device with a GPU reset */
241 igt_global_reset_lock(gt
);
242 wakeref
= intel_runtime_pm_get(gt
->uncore
->rpm
);
244 intel_gt_set_wedged(gt
);
246 GEM_BUG_ON(!intel_gt_is_wedged(gt
));
247 intel_gt_reset(gt
, ALL_ENGINES
, NULL
);
249 intel_runtime_pm_put(gt
->uncore
->rpm
, wakeref
);
250 igt_global_reset_unlock(gt
);
252 return intel_gt_is_wedged(gt
) ? -EIO
: 0;
255 static int igt_atomic_reset(void *arg
)
257 struct intel_gt
*gt
= arg
;
258 const typeof(*igt_atomic_phases
) *p
;
261 /* Check that the resets are usable from atomic context */
264 igt_global_reset_lock(gt
);
266 /* Flush any requests before we get started and check basics */
267 if (!igt_force_reset(gt
))
270 for (p
= igt_atomic_phases
; p
->name
; p
++) {
271 intel_engine_mask_t awake
;
273 GEM_TRACE("__intel_gt_reset under %s\n", p
->name
);
275 awake
= reset_prepare(gt
);
276 p
->critical_section_begin();
278 err
= __intel_gt_reset(gt
, ALL_ENGINES
);
280 p
->critical_section_end();
281 reset_finish(gt
, awake
);
284 pr_err("__intel_gt_reset failed under %s\n", p
->name
);
289 /* As we poke around the guts, do a full reset before continuing. */
293 igt_global_reset_unlock(gt
);
299 static int igt_atomic_engine_reset(void *arg
)
301 struct intel_gt
*gt
= arg
;
302 const typeof(*igt_atomic_phases
) *p
;
303 struct intel_engine_cs
*engine
;
304 enum intel_engine_id id
;
307 /* Check that the resets are usable from atomic context */
309 if (!intel_has_reset_engine(gt
))
312 if (intel_uc_uses_guc_submission(>
->uc
))
316 igt_global_reset_lock(gt
);
318 /* Flush any requests before we get started and check basics */
319 if (!igt_force_reset(gt
))
322 for_each_engine(engine
, gt
, id
) {
323 tasklet_disable(&engine
->execlists
.tasklet
);
324 intel_engine_pm_get(engine
);
326 for (p
= igt_atomic_phases
; p
->name
; p
++) {
327 GEM_TRACE("intel_engine_reset(%s) under %s\n",
328 engine
->name
, p
->name
);
330 p
->critical_section_begin();
331 err
= intel_engine_reset(engine
, NULL
);
332 p
->critical_section_end();
335 pr_err("intel_engine_reset(%s) failed under %s\n",
336 engine
->name
, p
->name
);
341 intel_engine_pm_put(engine
);
342 tasklet_enable(&engine
->execlists
.tasklet
);
347 /* As we poke around the guts, do a full reset before continuing. */
351 igt_global_reset_unlock(gt
);
357 int intel_reset_live_selftests(struct drm_i915_private
*i915
)
359 static const struct i915_subtest tests
[] = {
360 SUBTEST(igt_global_reset
), /* attempt to recover GPU first */
361 SUBTEST(igt_reset_device_stolen
),
362 SUBTEST(igt_reset_engines_stolen
),
363 SUBTEST(igt_wedged_reset
),
364 SUBTEST(igt_atomic_reset
),
365 SUBTEST(igt_atomic_engine_reset
),
367 struct intel_gt
*gt
= &i915
->gt
;
369 if (!intel_has_gpu_reset(gt
))
372 if (intel_gt_is_wedged(gt
))
373 return -EIO
; /* we're long past hope of a successful reset */
375 return intel_gt_live_subtests(tests
, gt
);