2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include "intel_context.h"
8 #include "intel_engine_pm.h"
9 #include "intel_gt_requests.h"
10 #include "intel_ring.h"
11 #include "selftest_rc6.h"
13 #include "selftests/i915_random.h"
15 int live_rc6_manual(void *arg
)
17 struct intel_gt
*gt
= arg
;
18 struct intel_rc6
*rc6
= >
->rc6
;
19 intel_wakeref_t wakeref
;
24 * Our claim is that we can "encourage" the GPU to enter rc6 at will.
31 /* bsw/byt use a PCU and decouple RC6 from our manual control */
32 if (IS_VALLEYVIEW(gt
->i915
) || IS_CHERRYVIEW(gt
->i915
))
35 wakeref
= intel_runtime_pm_get(gt
->uncore
->rpm
);
37 /* Force RC6 off for starters */
38 __intel_rc6_disable(rc6
);
39 msleep(1); /* wakeup is not immediate, takes about 100us on icl */
41 res
[0] = intel_rc6_residency_ns(rc6
, GEN6_GT_GFX_RC6
);
43 res
[1] = intel_rc6_residency_ns(rc6
, GEN6_GT_GFX_RC6
);
44 if ((res
[1] - res
[0]) >> 10) {
45 pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
46 (res
[1] - res
[0]) >> 10);
51 /* Manually enter RC6 */
54 res
[0] = intel_rc6_residency_ns(rc6
, GEN6_GT_GFX_RC6
);
56 res
[1] = intel_rc6_residency_ns(rc6
, GEN6_GT_GFX_RC6
);
58 if (res
[1] == res
[0]) {
59 pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x\n",
60 intel_uncore_read_fw(gt
->uncore
, GEN6_RC_STATE
),
61 intel_uncore_read_fw(gt
->uncore
, GEN6_RC_CONTROL
));
65 /* Restore what should have been the original state! */
66 intel_rc6_unpark(rc6
);
69 intel_runtime_pm_put(gt
->uncore
->rpm
, wakeref
);
73 static const u32
*__live_rc6_ctx(struct intel_context
*ce
)
75 struct i915_request
*rq
;
80 rq
= intel_context_create_request(ce
);
84 cs
= intel_ring_begin(rq
, 4);
90 cmd
= MI_STORE_REGISTER_MEM
| MI_USE_GGTT
;
91 if (INTEL_GEN(rq
->i915
) >= 8)
95 *cs
++ = i915_mmio_reg_offset(GEN8_RC6_CTX_INFO
);
96 *cs
++ = ce
->timeline
->hwsp_offset
+ 8;
98 intel_ring_advance(rq
, cs
);
100 result
= rq
->hwsp_seqno
+ 2;
101 i915_request_add(rq
);
106 static struct intel_engine_cs
**
107 randomised_engines(struct intel_gt
*gt
,
108 struct rnd_state
*prng
,
111 struct intel_engine_cs
*engine
, **engines
;
112 enum intel_engine_id id
;
116 for_each_engine(engine
, gt
, id
)
121 engines
= kmalloc_array(n
, sizeof(*engines
), GFP_KERNEL
);
126 for_each_engine(engine
, gt
, id
)
127 engines
[n
++] = engine
;
129 i915_prandom_shuffle(engines
, sizeof(*engines
), n
, prng
);
135 int live_rc6_ctx_wa(void *arg
)
137 struct intel_gt
*gt
= arg
;
138 struct intel_engine_cs
**engines
;
139 unsigned int n
, count
;
140 I915_RND_STATE(prng
);
143 /* A read of CTX_INFO upsets rc6. Poke the bear! */
144 if (INTEL_GEN(gt
->i915
) < 8)
147 engines
= randomised_engines(gt
, &prng
, &count
);
151 for (n
= 0; n
< count
; n
++) {
152 struct intel_engine_cs
*engine
= engines
[n
];
155 for (pass
= 0; pass
< 2; pass
++) {
156 struct intel_context
*ce
;
157 unsigned int resets
=
158 i915_reset_engine_count(>
->i915
->gpu_error
,
162 /* Use a sacrifical context */
163 ce
= intel_context_create(engine
);
169 intel_engine_pm_get(engine
);
170 res
= __live_rc6_ctx(ce
);
171 intel_engine_pm_put(engine
);
172 intel_context_put(ce
);
178 if (intel_gt_wait_for_idle(gt
, HZ
/ 5) == -ETIME
) {
179 intel_gt_set_wedged(gt
);
184 intel_gt_pm_wait_for_idle(gt
);
185 pr_debug("%s: CTX_INFO=%0x\n",
186 engine
->name
, READ_ONCE(*res
));
189 i915_reset_engine_count(>
->i915
->gpu_error
,
191 pr_err("%s: GPU reset required\n",
193 add_taint_for_CI(TAINT_WARN
);