Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / gt / selftest_engine_pm.c
blobb08fc5390e8afcbfc17387d1eedf21b4b2ea0f32
1 /*
2 * SPDX-License-Identifier: GPL-2.0
4 * Copyright © 2018 Intel Corporation
5 */
7 #include "i915_selftest.h"
8 #include "selftest_engine.h"
9 #include "selftest_engine_heartbeat.h"
10 #include "selftests/igt_atomic.h"
11 #include "selftests/igt_flush_test.h"
12 #include "selftests/igt_spinner.h"
14 static int live_engine_busy_stats(void *arg)
16 struct intel_gt *gt = arg;
17 struct intel_engine_cs *engine;
18 enum intel_engine_id id;
19 struct igt_spinner spin;
20 int err = 0;
23 * Check that if an engine supports busy-stats, they tell the truth.
26 if (igt_spinner_init(&spin, gt))
27 return -ENOMEM;
29 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
30 for_each_engine(engine, gt, id) {
31 struct i915_request *rq;
32 ktime_t de, dt;
33 ktime_t t[2];
35 if (!intel_engine_supports_stats(engine))
36 continue;
38 if (!intel_engine_can_store_dword(engine))
39 continue;
41 if (intel_gt_pm_wait_for_idle(gt)) {
42 err = -EBUSY;
43 break;
46 st_engine_heartbeat_disable(engine);
48 ENGINE_TRACE(engine, "measuring idle time\n");
49 preempt_disable();
50 de = intel_engine_get_busy_time(engine, &t[0]);
51 udelay(100);
52 de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
53 preempt_enable();
54 dt = ktime_sub(t[1], t[0]);
55 if (de < 0 || de > 10) {
56 pr_err("%s: reported %lldns [%d%%] busyness while sleeping [for %lldns]\n",
57 engine->name,
58 de, (int)div64_u64(100 * de, dt), dt);
59 GEM_TRACE_DUMP();
60 err = -EINVAL;
61 goto end;
64 /* 100% busy */
65 rq = igt_spinner_create_request(&spin,
66 engine->kernel_context,
67 MI_NOOP);
68 if (IS_ERR(rq)) {
69 err = PTR_ERR(rq);
70 goto end;
72 i915_request_add(rq);
74 if (!igt_wait_for_spinner(&spin, rq)) {
75 intel_gt_set_wedged(engine->gt);
76 err = -ETIME;
77 goto end;
80 ENGINE_TRACE(engine, "measuring busy time\n");
81 preempt_disable();
82 de = intel_engine_get_busy_time(engine, &t[0]);
83 udelay(100);
84 de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
85 preempt_enable();
86 dt = ktime_sub(t[1], t[0]);
87 if (100 * de < 95 * dt || 95 * de > 100 * dt) {
88 pr_err("%s: reported %lldns [%d%%] busyness while spinning [for %lldns]\n",
89 engine->name,
90 de, (int)div64_u64(100 * de, dt), dt);
91 GEM_TRACE_DUMP();
92 err = -EINVAL;
93 goto end;
96 end:
97 st_engine_heartbeat_enable(engine);
98 igt_spinner_end(&spin);
99 if (igt_flush_test(gt->i915))
100 err = -EIO;
101 if (err)
102 break;
105 igt_spinner_fini(&spin);
106 if (igt_flush_test(gt->i915))
107 err = -EIO;
108 return err;
111 static int live_engine_pm(void *arg)
113 struct intel_gt *gt = arg;
114 struct intel_engine_cs *engine;
115 enum intel_engine_id id;
118 * Check we can call intel_engine_pm_put from any context. No
119 * failures are reported directly, but if we mess up lockdep should
120 * tell us.
122 if (intel_gt_pm_wait_for_idle(gt)) {
123 pr_err("Unable to flush GT pm before test\n");
124 return -EBUSY;
127 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
128 for_each_engine(engine, gt, id) {
129 const typeof(*igt_atomic_phases) *p;
131 for (p = igt_atomic_phases; p->name; p++) {
133 * Acquisition is always synchronous, except if we
134 * know that the engine is already awake, in which
135 * case we should use intel_engine_pm_get_if_awake()
136 * to atomically grab the wakeref.
138 * In practice,
139 * intel_engine_pm_get();
140 * intel_engine_pm_put();
141 * occurs in one thread, while simultaneously
142 * intel_engine_pm_get_if_awake();
143 * intel_engine_pm_put();
144 * occurs from atomic context in another.
146 GEM_BUG_ON(intel_engine_pm_is_awake(engine));
147 intel_engine_pm_get(engine);
149 p->critical_section_begin();
150 if (!intel_engine_pm_get_if_awake(engine))
151 pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
152 engine->name, p->name);
153 else
154 intel_engine_pm_put_async(engine);
155 intel_engine_pm_put_async(engine);
156 p->critical_section_end();
158 intel_engine_pm_flush(engine);
160 if (intel_engine_pm_is_awake(engine)) {
161 pr_err("%s is still awake after flushing pm\n",
162 engine->name);
163 return -EINVAL;
166 /* gt wakeref is async (deferred to workqueue) */
167 if (intel_gt_pm_wait_for_idle(gt)) {
168 pr_err("GT failed to idle\n");
169 return -EINVAL;
174 return 0;
177 int live_engine_pm_selftests(struct intel_gt *gt)
179 static const struct i915_subtest tests[] = {
180 SUBTEST(live_engine_busy_stats),
181 SUBTEST(live_engine_pm),
184 return intel_gt_live_subtests(tests, gt);