treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / selftests / i915_active.c
blobef572a0c25668ecb2d21a0d0bf8b26c960f83b90
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
5 */
7 #include <linux/kref.h>
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_gt.h"
12 #include "i915_selftest.h"
14 #include "igt_flush_test.h"
15 #include "lib_sw_fence.h"
17 struct live_active {
18 struct i915_active base;
19 struct kref ref;
20 bool retired;
23 static void __live_get(struct live_active *active)
25 kref_get(&active->ref);
28 static void __live_free(struct live_active *active)
30 i915_active_fini(&active->base);
31 kfree(active);
34 static void __live_release(struct kref *ref)
36 struct live_active *active = container_of(ref, typeof(*active), ref);
38 __live_free(active);
41 static void __live_put(struct live_active *active)
43 kref_put(&active->ref, __live_release);
46 static int __live_active(struct i915_active *base)
48 struct live_active *active = container_of(base, typeof(*active), base);
50 __live_get(active);
51 return 0;
54 static void __live_retire(struct i915_active *base)
56 struct live_active *active = container_of(base, typeof(*active), base);
58 active->retired = true;
59 __live_put(active);
62 static struct live_active *__live_alloc(struct drm_i915_private *i915)
64 struct live_active *active;
66 active = kzalloc(sizeof(*active), GFP_KERNEL);
67 if (!active)
68 return NULL;
70 kref_init(&active->ref);
71 i915_active_init(&active->base, __live_active, __live_retire);
73 return active;
76 static struct live_active *
77 __live_active_setup(struct drm_i915_private *i915)
79 struct intel_engine_cs *engine;
80 struct i915_sw_fence *submit;
81 struct live_active *active;
82 unsigned int count = 0;
83 int err = 0;
85 active = __live_alloc(i915);
86 if (!active)
87 return ERR_PTR(-ENOMEM);
89 submit = heap_fence_create(GFP_KERNEL);
90 if (!submit) {
91 kfree(active);
92 return ERR_PTR(-ENOMEM);
95 err = i915_active_acquire(&active->base);
96 if (err)
97 goto out;
99 for_each_uabi_engine(engine, i915) {
100 struct i915_request *rq;
102 rq = intel_engine_create_kernel_request(engine);
103 if (IS_ERR(rq)) {
104 err = PTR_ERR(rq);
105 break;
108 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
109 submit,
110 GFP_KERNEL);
111 if (err >= 0)
112 err = i915_active_add_request(&active->base, rq);
113 i915_request_add(rq);
114 if (err) {
115 pr_err("Failed to track active ref!\n");
116 break;
119 count++;
122 i915_active_release(&active->base);
123 if (READ_ONCE(active->retired) && count) {
124 pr_err("i915_active retired before submission!\n");
125 err = -EINVAL;
127 if (atomic_read(&active->base.count) != count) {
128 pr_err("i915_active not tracking all requests, found %d, expected %d\n",
129 atomic_read(&active->base.count), count);
130 err = -EINVAL;
133 out:
134 i915_sw_fence_commit(submit);
135 heap_fence_put(submit);
136 if (err) {
137 __live_put(active);
138 active = ERR_PTR(err);
141 return active;
144 static int live_active_wait(void *arg)
146 struct drm_i915_private *i915 = arg;
147 struct live_active *active;
148 int err = 0;
150 /* Check that we get a callback when requests retire upon waiting */
152 active = __live_active_setup(i915);
153 if (IS_ERR(active))
154 return PTR_ERR(active);
156 i915_active_wait(&active->base);
157 if (!READ_ONCE(active->retired)) {
158 struct drm_printer p = drm_err_printer(__func__);
160 pr_err("i915_active not retired after waiting!\n");
161 i915_active_print(&active->base, &p);
163 err = -EINVAL;
166 __live_put(active);
168 if (igt_flush_test(i915))
169 err = -EIO;
171 return err;
174 static int live_active_retire(void *arg)
176 struct drm_i915_private *i915 = arg;
177 struct live_active *active;
178 int err = 0;
180 /* Check that we get a callback when requests are indirectly retired */
182 active = __live_active_setup(i915);
183 if (IS_ERR(active))
184 return PTR_ERR(active);
186 /* waits for & retires all requests */
187 if (igt_flush_test(i915))
188 err = -EIO;
190 if (!READ_ONCE(active->retired)) {
191 struct drm_printer p = drm_err_printer(__func__);
193 pr_err("i915_active not retired after flushing!\n");
194 i915_active_print(&active->base, &p);
196 err = -EINVAL;
199 __live_put(active);
201 return err;
204 int i915_active_live_selftests(struct drm_i915_private *i915)
206 static const struct i915_subtest tests[] = {
207 SUBTEST(live_active_wait),
208 SUBTEST(live_active_retire),
211 if (intel_gt_is_wedged(&i915->gt))
212 return 0;
214 return i915_subtests(tests, i915);
217 static struct intel_engine_cs *node_to_barrier(struct active_node *it)
219 struct intel_engine_cs *engine;
221 if (!is_barrier(&it->base))
222 return NULL;
224 engine = __barrier_to_engine(it);
225 smp_rmb(); /* serialise with add_active_barriers */
226 if (!is_barrier(&it->base))
227 return NULL;
229 return engine;
232 void i915_active_print(struct i915_active *ref, struct drm_printer *m)
234 drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire);
235 drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
236 drm_printf(m, "\tpreallocated barriers? %s\n",
237 yesno(!llist_empty(&ref->preallocated_barriers)));
239 if (i915_active_acquire_if_busy(ref)) {
240 struct active_node *it, *n;
242 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
243 struct intel_engine_cs *engine;
245 engine = node_to_barrier(it);
246 if (engine) {
247 drm_printf(m, "\tbarrier: %s\n", engine->name);
248 continue;
251 if (i915_active_fence_isset(&it->base)) {
252 drm_printf(m,
253 "\ttimeline: %llx\n", it->timeline);
254 continue;
258 i915_active_release(ref);
262 static void spin_unlock_wait(spinlock_t *lock)
264 spin_lock_irq(lock);
265 spin_unlock_irq(lock);
268 void i915_active_unlock_wait(struct i915_active *ref)
270 if (i915_active_acquire_if_busy(ref)) {
271 struct active_node *it, *n;
273 rcu_read_lock();
274 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
275 struct dma_fence *f;
277 /* Wait for all active callbacks */
278 f = rcu_dereference(it->base.fence);
279 if (f)
280 spin_unlock_wait(f->lock);
282 rcu_read_unlock();
284 i915_active_release(ref);
287 /* And wait for the retire callback */
288 spin_lock_irq(&ref->tree_lock);
289 spin_unlock_irq(&ref->tree_lock);
291 /* ... which may have been on a thread instead */
292 flush_work(&ref->work);