2 * SPDX-License-Identifier: GPL-2.0
4 * Copyright © 2019 Intel Corporation
7 #include "i915_selftest.h"
8 #include "intel_engine_heartbeat.h"
9 #include "intel_engine_pm.h"
12 #include "gem/selftests/mock_context.h"
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/mock_drm.h"
16 static int request_sync(struct i915_request
*rq
)
18 struct intel_timeline
*tl
= i915_request_timeline(rq
);
22 intel_timeline_get(tl
);
25 /* Opencode i915_request_add() so we can keep the timeline locked. */
26 __i915_request_commit(rq
);
27 __i915_request_queue(rq
, NULL
);
29 timeout
= i915_request_wait(rq
, 0, HZ
/ 10);
33 i915_request_retire_upto(rq
);
35 lockdep_unpin_lock(&tl
->mutex
, rq
->cookie
);
36 mutex_unlock(&tl
->mutex
);
39 intel_timeline_put(tl
);
44 static int context_sync(struct intel_context
*ce
)
46 struct intel_timeline
*tl
= ce
->timeline
;
49 mutex_lock(&tl
->mutex
);
51 struct i915_request
*rq
;
54 if (list_empty(&tl
->requests
))
57 rq
= list_last_entry(&tl
->requests
, typeof(*rq
), link
);
60 timeout
= i915_request_wait(rq
, 0, HZ
/ 10);
64 i915_request_retire_upto(rq
);
68 mutex_unlock(&tl
->mutex
);
73 static int __live_context_size(struct intel_engine_cs
*engine
)
75 struct intel_context
*ce
;
76 struct i915_request
*rq
;
80 ce
= intel_context_create(engine
);
84 err
= intel_context_pin(ce
);
88 vaddr
= i915_gem_object_pin_map(ce
->state
->obj
,
89 i915_coherent_map_type(engine
->i915
));
92 intel_context_unpin(ce
);
97 * Note that execlists also applies a redzone which it checks on
98 * context unpin when debugging. We are using the same location
99 * and same poison value so that our checks overlap. Despite the
100 * redundancy, we want to keep this little selftest so that we
101 * get coverage of any and all submission backends, and we can
102 * always extend this test to ensure we trick the HW into a
103 * compromising position wrt to the various sections that need
104 * to be written into the context state.
106 * TLDR; this overlaps with the execlists redzone.
108 vaddr
+= engine
->context_size
- I915_GTT_PAGE_SIZE
;
109 memset(vaddr
, POISON_INUSE
, I915_GTT_PAGE_SIZE
);
111 rq
= intel_context_create_request(ce
);
112 intel_context_unpin(ce
);
118 err
= request_sync(rq
);
122 /* Force the context switch */
123 rq
= intel_engine_create_kernel_request(engine
);
128 err
= request_sync(rq
);
132 if (memchr_inv(vaddr
, POISON_INUSE
, I915_GTT_PAGE_SIZE
)) {
133 pr_err("%s context overwrote trailing red-zone!", engine
->name
);
138 i915_gem_object_unpin_map(ce
->state
->obj
);
140 intel_context_put(ce
);
144 static int live_context_size(void *arg
)
146 struct intel_gt
*gt
= arg
;
147 struct intel_engine_cs
*engine
;
148 enum intel_engine_id id
;
152 * Check that our context sizes are correct by seeing if the
153 * HW tries to write past the end of one.
156 for_each_engine(engine
, gt
, id
) {
158 struct drm_i915_gem_object
*state
;
162 if (!engine
->context_size
)
165 intel_engine_pm_get(engine
);
168 * Hide the old default state -- we lie about the context size
169 * and get confused when the default state is smaller than
170 * expected. For our do nothing request, inheriting the
171 * active state is sufficient, we are only checking that we
172 * don't use more than we planned.
174 saved
.state
= fetch_and_zero(&engine
->default_state
);
175 saved
.pinned
= fetch_and_zero(&engine
->pinned_default_state
);
177 /* Overlaps with the execlists redzone */
178 engine
->context_size
+= I915_GTT_PAGE_SIZE
;
180 err
= __live_context_size(engine
);
182 engine
->context_size
-= I915_GTT_PAGE_SIZE
;
184 engine
->pinned_default_state
= saved
.pinned
;
185 engine
->default_state
= saved
.state
;
187 intel_engine_pm_put(engine
);
196 static int __live_active_context(struct intel_engine_cs
*engine
)
198 unsigned long saved_heartbeat
;
199 struct intel_context
*ce
;
204 * We keep active contexts alive until after a subsequent context
205 * switch as the final write from the context-save will be after
206 * we retire the final request. We track when we unpin the context,
207 * under the presumption that the final pin is from the last request,
208 * and instead of immediately unpinning the context, we add a task
209 * to unpin the context from the next idle-barrier.
211 * This test makes sure that the context is kept alive until a
212 * subsequent idle-barrier (emitted when the engine wakeref hits 0
213 * with no more outstanding requests).
216 if (intel_engine_pm_is_awake(engine
)) {
217 pr_err("%s is awake before starting %s!\n",
218 engine
->name
, __func__
);
222 ce
= intel_context_create(engine
);
226 saved_heartbeat
= engine
->props
.heartbeat_interval_ms
;
227 engine
->props
.heartbeat_interval_ms
= 0;
229 for (pass
= 0; pass
<= 2; pass
++) {
230 struct i915_request
*rq
;
232 intel_engine_pm_get(engine
);
234 rq
= intel_context_create_request(ce
);
240 err
= request_sync(rq
);
244 /* Context will be kept active until after an idle-barrier. */
245 if (i915_active_is_idle(&ce
->active
)) {
246 pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
252 if (!intel_engine_pm_is_awake(engine
)) {
253 pr_err("%s is asleep before idle-barrier\n",
260 intel_engine_pm_put(engine
);
265 /* Now make sure our idle-barriers are flushed */
266 err
= intel_engine_flush_barriers(engine
);
270 /* Wait for the barrier and in the process wait for engine to park */
271 err
= context_sync(engine
->kernel_context
);
275 if (!i915_active_is_idle(&ce
->active
)) {
276 pr_err("context is still active!");
280 intel_engine_pm_flush(engine
);
282 if (intel_engine_pm_is_awake(engine
)) {
283 struct drm_printer p
= drm_debug_printer(__func__
);
285 intel_engine_dump(engine
, &p
,
286 "%s is still awake:%d after idle-barriers\n",
288 atomic_read(&engine
->wakeref
.count
));
296 engine
->props
.heartbeat_interval_ms
= saved_heartbeat
;
297 intel_context_put(ce
);
301 static int live_active_context(void *arg
)
303 struct intel_gt
*gt
= arg
;
304 struct intel_engine_cs
*engine
;
305 enum intel_engine_id id
;
308 for_each_engine(engine
, gt
, id
) {
309 err
= __live_active_context(engine
);
313 err
= igt_flush_test(gt
->i915
);
321 static int __remote_sync(struct intel_context
*ce
, struct intel_context
*remote
)
323 struct i915_request
*rq
;
326 err
= intel_context_pin(remote
);
330 rq
= intel_context_create_request(ce
);
336 err
= intel_context_prepare_remote_request(remote
, rq
);
338 i915_request_add(rq
);
342 err
= request_sync(rq
);
345 intel_context_unpin(remote
);
349 static int __live_remote_context(struct intel_engine_cs
*engine
)
351 struct intel_context
*local
, *remote
;
352 unsigned long saved_heartbeat
;
357 * Check that our idle barriers do not interfere with normal
358 * activity tracking. In particular, check that operating
359 * on the context image remotely (intel_context_prepare_remote_request),
360 * which inserts foreign fences into intel_context.active, does not
361 * clobber the idle-barrier.
364 if (intel_engine_pm_is_awake(engine
)) {
365 pr_err("%s is awake before starting %s!\n",
366 engine
->name
, __func__
);
370 remote
= intel_context_create(engine
);
372 return PTR_ERR(remote
);
374 local
= intel_context_create(engine
);
376 err
= PTR_ERR(local
);
380 saved_heartbeat
= engine
->props
.heartbeat_interval_ms
;
381 engine
->props
.heartbeat_interval_ms
= 0;
382 intel_engine_pm_get(engine
);
384 for (pass
= 0; pass
<= 2; pass
++) {
385 err
= __remote_sync(local
, remote
);
389 err
= __remote_sync(engine
->kernel_context
, remote
);
393 if (i915_active_is_idle(&remote
->active
)) {
394 pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n",
401 intel_engine_pm_put(engine
);
402 engine
->props
.heartbeat_interval_ms
= saved_heartbeat
;
404 intel_context_put(local
);
406 intel_context_put(remote
);
410 static int live_remote_context(void *arg
)
412 struct intel_gt
*gt
= arg
;
413 struct intel_engine_cs
*engine
;
414 enum intel_engine_id id
;
417 for_each_engine(engine
, gt
, id
) {
418 err
= __live_remote_context(engine
);
422 err
= igt_flush_test(gt
->i915
);
430 int intel_context_live_selftests(struct drm_i915_private
*i915
)
432 static const struct i915_subtest tests
[] = {
433 SUBTEST(live_context_size
),
434 SUBTEST(live_active_context
),
435 SUBTEST(live_remote_context
),
437 struct intel_gt
*gt
= &i915
->gt
;
439 if (intel_gt_is_wedged(gt
))
442 return intel_gt_live_subtests(tests
, gt
);