Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / selftests / i915_active.c
blob4002c984c2e0c261f54d46e362b8b3f7caf40cad
1 /*
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
5 */
7 #include <linux/kref.h>
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_gt.h"
12 #include "i915_selftest.h"
14 #include "igt_flush_test.h"
15 #include "lib_sw_fence.h"
17 struct live_active {
18 struct i915_active base;
19 struct kref ref;
20 bool retired;
23 static void __live_get(struct live_active *active)
25 kref_get(&active->ref);
28 static void __live_free(struct live_active *active)
30 i915_active_fini(&active->base);
31 kfree(active);
34 static void __live_release(struct kref *ref)
36 struct live_active *active = container_of(ref, typeof(*active), ref);
38 __live_free(active);
41 static void __live_put(struct live_active *active)
43 kref_put(&active->ref, __live_release);
46 static int __live_active(struct i915_active *base)
48 struct live_active *active = container_of(base, typeof(*active), base);
50 __live_get(active);
51 return 0;
54 static void __live_retire(struct i915_active *base)
56 struct live_active *active = container_of(base, typeof(*active), base);
58 active->retired = true;
59 __live_put(active);
62 static struct live_active *__live_alloc(struct drm_i915_private *i915)
64 struct live_active *active;
66 active = kzalloc(sizeof(*active), GFP_KERNEL);
67 if (!active)
68 return NULL;
70 kref_init(&active->ref);
71 i915_active_init(&active->base, __live_active, __live_retire);
73 return active;
76 static struct live_active *
77 __live_active_setup(struct drm_i915_private *i915)
79 struct intel_engine_cs *engine;
80 struct i915_sw_fence *submit;
81 struct live_active *active;
82 unsigned int count = 0;
83 int err = 0;
85 active = __live_alloc(i915);
86 if (!active)
87 return ERR_PTR(-ENOMEM);
89 submit = heap_fence_create(GFP_KERNEL);
90 if (!submit) {
91 kfree(active);
92 return ERR_PTR(-ENOMEM);
95 err = i915_active_acquire(&active->base);
96 if (err)
97 goto out;
99 for_each_uabi_engine(engine, i915) {
100 struct i915_request *rq;
102 rq = intel_engine_create_kernel_request(engine);
103 if (IS_ERR(rq)) {
104 err = PTR_ERR(rq);
105 break;
108 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
109 submit,
110 GFP_KERNEL);
111 if (err >= 0)
112 err = i915_active_add_request(&active->base, rq);
113 i915_request_add(rq);
114 if (err) {
115 pr_err("Failed to track active ref!\n");
116 break;
119 count++;
122 i915_active_release(&active->base);
123 if (READ_ONCE(active->retired) && count) {
124 pr_err("i915_active retired before submission!\n");
125 err = -EINVAL;
127 if (atomic_read(&active->base.count) != count) {
128 pr_err("i915_active not tracking all requests, found %d, expected %d\n",
129 atomic_read(&active->base.count), count);
130 err = -EINVAL;
133 out:
134 i915_sw_fence_commit(submit);
135 heap_fence_put(submit);
136 if (err) {
137 __live_put(active);
138 active = ERR_PTR(err);
141 return active;
144 static int live_active_wait(void *arg)
146 struct drm_i915_private *i915 = arg;
147 struct live_active *active;
148 int err = 0;
150 /* Check that we get a callback when requests retire upon waiting */
152 active = __live_active_setup(i915);
153 if (IS_ERR(active))
154 return PTR_ERR(active);
156 __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
157 if (!READ_ONCE(active->retired)) {
158 struct drm_printer p = drm_err_printer(__func__);
160 pr_err("i915_active not retired after waiting!\n");
161 i915_active_print(&active->base, &p);
163 err = -EINVAL;
166 __live_put(active);
168 if (igt_flush_test(i915))
169 err = -EIO;
171 return err;
174 static int live_active_retire(void *arg)
176 struct drm_i915_private *i915 = arg;
177 struct live_active *active;
178 int err = 0;
180 /* Check that we get a callback when requests are indirectly retired */
182 active = __live_active_setup(i915);
183 if (IS_ERR(active))
184 return PTR_ERR(active);
186 /* waits for & retires all requests */
187 if (igt_flush_test(i915))
188 err = -EIO;
190 if (!READ_ONCE(active->retired)) {
191 struct drm_printer p = drm_err_printer(__func__);
193 pr_err("i915_active not retired after flushing!\n");
194 i915_active_print(&active->base, &p);
196 err = -EINVAL;
199 __live_put(active);
201 return err;
204 static int live_active_barrier(void *arg)
206 struct drm_i915_private *i915 = arg;
207 struct intel_engine_cs *engine;
208 struct live_active *active;
209 int err = 0;
211 /* Check that we get a callback when requests retire upon waiting */
213 active = __live_alloc(i915);
214 if (!active)
215 return -ENOMEM;
217 err = i915_active_acquire(&active->base);
218 if (err)
219 goto out;
221 for_each_uabi_engine(engine, i915) {
222 err = i915_active_acquire_preallocate_barrier(&active->base,
223 engine);
224 if (err)
225 break;
227 i915_active_acquire_barrier(&active->base);
230 i915_active_release(&active->base);
231 if (err)
232 goto out;
234 __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
235 if (!READ_ONCE(active->retired)) {
236 pr_err("i915_active not retired after flushing barriers!\n");
237 err = -EINVAL;
240 out:
241 __live_put(active);
243 if (igt_flush_test(i915))
244 err = -EIO;
246 return err;
249 int i915_active_live_selftests(struct drm_i915_private *i915)
251 static const struct i915_subtest tests[] = {
252 SUBTEST(live_active_wait),
253 SUBTEST(live_active_retire),
254 SUBTEST(live_active_barrier),
257 if (intel_gt_is_wedged(&i915->gt))
258 return 0;
260 return i915_subtests(tests, i915);
263 static struct intel_engine_cs *node_to_barrier(struct active_node *it)
265 struct intel_engine_cs *engine;
267 if (!is_barrier(&it->base))
268 return NULL;
270 engine = __barrier_to_engine(it);
271 smp_rmb(); /* serialise with add_active_barriers */
272 if (!is_barrier(&it->base))
273 return NULL;
275 return engine;
278 void i915_active_print(struct i915_active *ref, struct drm_printer *m)
280 drm_printf(m, "active %ps:%ps\n", ref->active, ref->retire);
281 drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
282 drm_printf(m, "\tpreallocated barriers? %s\n",
283 yesno(!llist_empty(&ref->preallocated_barriers)));
285 if (i915_active_acquire_if_busy(ref)) {
286 struct active_node *it, *n;
288 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
289 struct intel_engine_cs *engine;
291 engine = node_to_barrier(it);
292 if (engine) {
293 drm_printf(m, "\tbarrier: %s\n", engine->name);
294 continue;
297 if (i915_active_fence_isset(&it->base)) {
298 drm_printf(m,
299 "\ttimeline: %llx\n", it->timeline);
300 continue;
304 i915_active_release(ref);
308 static void spin_unlock_wait(spinlock_t *lock)
310 spin_lock_irq(lock);
311 spin_unlock_irq(lock);
314 static void active_flush(struct i915_active *ref,
315 struct i915_active_fence *active)
317 struct dma_fence *fence;
319 fence = xchg(__active_fence_slot(active), NULL);
320 if (!fence)
321 return;
323 spin_lock_irq(fence->lock);
324 __list_del_entry(&active->cb.node);
325 spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
326 atomic_dec(&ref->count);
328 GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
331 void i915_active_unlock_wait(struct i915_active *ref)
333 if (i915_active_acquire_if_busy(ref)) {
334 struct active_node *it, *n;
336 /* Wait for all active callbacks */
337 rcu_read_lock();
338 active_flush(ref, &ref->excl);
339 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
340 active_flush(ref, &it->base);
341 rcu_read_unlock();
343 i915_active_release(ref);
346 /* And wait for the retire callback */
347 spin_unlock_wait(&ref->tree_lock);
349 /* ... which may have been on a thread instead */
350 flush_work(&ref->work);