2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/kref.h>
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_gt.h"
12 #include "i915_selftest.h"
14 #include "igt_flush_test.h"
15 #include "lib_sw_fence.h"
18 struct i915_active base
;
23 static void __live_get(struct live_active
*active
)
25 kref_get(&active
->ref
);
28 static void __live_free(struct live_active
*active
)
30 i915_active_fini(&active
->base
);
34 static void __live_release(struct kref
*ref
)
36 struct live_active
*active
= container_of(ref
, typeof(*active
), ref
);
41 static void __live_put(struct live_active
*active
)
43 kref_put(&active
->ref
, __live_release
);
46 static int __live_active(struct i915_active
*base
)
48 struct live_active
*active
= container_of(base
, typeof(*active
), base
);
54 static void __live_retire(struct i915_active
*base
)
56 struct live_active
*active
= container_of(base
, typeof(*active
), base
);
58 active
->retired
= true;
62 static struct live_active
*__live_alloc(struct drm_i915_private
*i915
)
64 struct live_active
*active
;
66 active
= kzalloc(sizeof(*active
), GFP_KERNEL
);
70 kref_init(&active
->ref
);
71 i915_active_init(&active
->base
, __live_active
, __live_retire
);
76 static struct live_active
*
77 __live_active_setup(struct drm_i915_private
*i915
)
79 struct intel_engine_cs
*engine
;
80 struct i915_sw_fence
*submit
;
81 struct live_active
*active
;
82 unsigned int count
= 0;
85 active
= __live_alloc(i915
);
87 return ERR_PTR(-ENOMEM
);
89 submit
= heap_fence_create(GFP_KERNEL
);
92 return ERR_PTR(-ENOMEM
);
95 err
= i915_active_acquire(&active
->base
);
99 for_each_uabi_engine(engine
, i915
) {
100 struct i915_request
*rq
;
102 rq
= intel_engine_create_kernel_request(engine
);
108 err
= i915_sw_fence_await_sw_fence_gfp(&rq
->submit
,
112 err
= i915_active_add_request(&active
->base
, rq
);
113 i915_request_add(rq
);
115 pr_err("Failed to track active ref!\n");
122 i915_active_release(&active
->base
);
123 if (READ_ONCE(active
->retired
) && count
) {
124 pr_err("i915_active retired before submission!\n");
127 if (atomic_read(&active
->base
.count
) != count
) {
128 pr_err("i915_active not tracking all requests, found %d, expected %d\n",
129 atomic_read(&active
->base
.count
), count
);
134 i915_sw_fence_commit(submit
);
135 heap_fence_put(submit
);
138 active
= ERR_PTR(err
);
144 static int live_active_wait(void *arg
)
146 struct drm_i915_private
*i915
= arg
;
147 struct live_active
*active
;
150 /* Check that we get a callback when requests retire upon waiting */
152 active
= __live_active_setup(i915
);
154 return PTR_ERR(active
);
156 i915_active_wait(&active
->base
);
157 if (!READ_ONCE(active
->retired
)) {
158 struct drm_printer p
= drm_err_printer(__func__
);
160 pr_err("i915_active not retired after waiting!\n");
161 i915_active_print(&active
->base
, &p
);
168 if (igt_flush_test(i915
))
174 static int live_active_retire(void *arg
)
176 struct drm_i915_private
*i915
= arg
;
177 struct live_active
*active
;
180 /* Check that we get a callback when requests are indirectly retired */
182 active
= __live_active_setup(i915
);
184 return PTR_ERR(active
);
186 /* waits for & retires all requests */
187 if (igt_flush_test(i915
))
190 if (!READ_ONCE(active
->retired
)) {
191 struct drm_printer p
= drm_err_printer(__func__
);
193 pr_err("i915_active not retired after flushing!\n");
194 i915_active_print(&active
->base
, &p
);
204 int i915_active_live_selftests(struct drm_i915_private
*i915
)
206 static const struct i915_subtest tests
[] = {
207 SUBTEST(live_active_wait
),
208 SUBTEST(live_active_retire
),
211 if (intel_gt_is_wedged(&i915
->gt
))
214 return i915_subtests(tests
, i915
);
217 static struct intel_engine_cs
*node_to_barrier(struct active_node
*it
)
219 struct intel_engine_cs
*engine
;
221 if (!is_barrier(&it
->base
))
224 engine
= __barrier_to_engine(it
);
225 smp_rmb(); /* serialise with add_active_barriers */
226 if (!is_barrier(&it
->base
))
232 void i915_active_print(struct i915_active
*ref
, struct drm_printer
*m
)
234 drm_printf(m
, "active %pS:%pS\n", ref
->active
, ref
->retire
);
235 drm_printf(m
, "\tcount: %d\n", atomic_read(&ref
->count
));
236 drm_printf(m
, "\tpreallocated barriers? %s\n",
237 yesno(!llist_empty(&ref
->preallocated_barriers
)));
239 if (i915_active_acquire_if_busy(ref
)) {
240 struct active_node
*it
, *n
;
242 rbtree_postorder_for_each_entry_safe(it
, n
, &ref
->tree
, node
) {
243 struct intel_engine_cs
*engine
;
245 engine
= node_to_barrier(it
);
247 drm_printf(m
, "\tbarrier: %s\n", engine
->name
);
251 if (i915_active_fence_isset(&it
->base
)) {
253 "\ttimeline: %llx\n", it
->timeline
);
258 i915_active_release(ref
);
262 static void spin_unlock_wait(spinlock_t
*lock
)
265 spin_unlock_irq(lock
);
268 void i915_active_unlock_wait(struct i915_active
*ref
)
270 if (i915_active_acquire_if_busy(ref
)) {
271 struct active_node
*it
, *n
;
274 rbtree_postorder_for_each_entry_safe(it
, n
, &ref
->tree
, node
) {
277 /* Wait for all active callbacks */
278 f
= rcu_dereference(it
->base
.fence
);
280 spin_unlock_wait(f
->lock
);
284 i915_active_release(ref
);
287 /* And wait for the retire callback */
288 spin_lock_irq(&ref
->tree_lock
);
289 spin_unlock_irq(&ref
->tree_lock
);
291 /* ... which may have been on a thread instead */
292 flush_work(&ref
->work
);