2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include <linux/workqueue.h>
9 #include "i915_drv.h" /* for_each_engine() */
10 #include "i915_request.h"
11 #include "intel_engine_heartbeat.h"
13 #include "intel_gt_pm.h"
14 #include "intel_gt_requests.h"
15 #include "intel_timeline.h"
17 static bool retire_requests(struct intel_timeline
*tl
)
19 struct i915_request
*rq
, *rn
;
21 list_for_each_entry_safe(rq
, rn
, &tl
->requests
, link
)
22 if (!i915_request_retire(rq
))
25 /* And check nothing new was submitted */
26 return !i915_active_fence_isset(&tl
->last_request
);
29 static bool engine_active(const struct intel_engine_cs
*engine
)
31 return !list_empty(&engine
->kernel_context
->timeline
->requests
);
34 static bool flush_submission(struct intel_gt
*gt
, long timeout
)
36 struct intel_engine_cs
*engine
;
37 enum intel_engine_id id
;
43 if (!intel_gt_pm_is_awake(gt
))
46 for_each_engine(engine
, gt
, id
) {
47 intel_engine_flush_submission(engine
);
49 /* Flush the background retirement and idle barriers */
50 flush_work(&engine
->retire_work
);
51 flush_delayed_work(&engine
->wakeref
.work
);
53 /* Is the idle barrier still outstanding? */
54 active
|= engine_active(engine
);
60 static void engine_retire(struct work_struct
*work
)
62 struct intel_engine_cs
*engine
=
63 container_of(work
, typeof(*engine
), retire_work
);
64 struct intel_timeline
*tl
= xchg(&engine
->retire
, NULL
);
67 struct intel_timeline
*next
= xchg(&tl
->retire
, NULL
);
70 * Our goal here is to retire _idle_ timelines as soon as
71 * possible (as they are idle, we do not expect userspace
72 * to be cleaning up anytime soon).
74 * If the timeline is currently locked, either it is being
75 * retired elsewhere or about to be!
77 if (mutex_trylock(&tl
->mutex
)) {
79 mutex_unlock(&tl
->mutex
);
81 intel_timeline_put(tl
);
84 tl
= ptr_mask_bits(next
, 1);
88 static bool add_retire(struct intel_engine_cs
*engine
,
89 struct intel_timeline
*tl
)
91 #define STUB ((struct intel_timeline *)1)
92 struct intel_timeline
*first
;
95 * We open-code a llist here to include the additional tag [BIT(0)]
96 * so that we know when the timeline is already on a
97 * retirement queue: either this engine or another.
100 if (cmpxchg(&tl
->retire
, NULL
, STUB
)) /* already queued */
103 intel_timeline_get(tl
);
104 first
= READ_ONCE(engine
->retire
);
106 tl
->retire
= ptr_pack_bits(first
, 1, 1);
107 while (!try_cmpxchg(&engine
->retire
, &first
, tl
));
112 void intel_engine_add_retire(struct intel_engine_cs
*engine
,
113 struct intel_timeline
*tl
)
115 /* We don't deal well with the engine disappearing beneath us */
116 GEM_BUG_ON(intel_engine_is_virtual(engine
));
118 if (add_retire(engine
, tl
))
119 schedule_work(&engine
->retire_work
);
122 void intel_engine_init_retire(struct intel_engine_cs
*engine
)
124 INIT_WORK(&engine
->retire_work
, engine_retire
);
127 void intel_engine_fini_retire(struct intel_engine_cs
*engine
)
129 flush_work(&engine
->retire_work
);
130 GEM_BUG_ON(engine
->retire
);
133 long intel_gt_retire_requests_timeout(struct intel_gt
*gt
, long timeout
)
135 struct intel_gt_timelines
*timelines
= >
->timelines
;
136 struct intel_timeline
*tl
, *tn
;
137 unsigned long active_count
= 0;
141 interruptible
= true;
142 if (unlikely(timeout
< 0))
143 timeout
= -timeout
, interruptible
= false;
145 flush_submission(gt
, timeout
); /* kick the ksoftirqd tasklets */
146 spin_lock(&timelines
->lock
);
147 list_for_each_entry_safe(tl
, tn
, &timelines
->active_list
, link
) {
148 if (!mutex_trylock(&tl
->mutex
)) {
149 active_count
++; /* report busy to caller, try again? */
153 intel_timeline_get(tl
);
154 GEM_BUG_ON(!atomic_read(&tl
->active_count
));
155 atomic_inc(&tl
->active_count
); /* pin the list element */
156 spin_unlock(&timelines
->lock
);
159 struct dma_fence
*fence
;
161 fence
= i915_active_fence_get(&tl
->last_request
);
163 mutex_unlock(&tl
->mutex
);
165 timeout
= dma_fence_wait_timeout(fence
,
168 dma_fence_put(fence
);
170 /* Retirement is best effort */
171 if (!mutex_trylock(&tl
->mutex
)) {
178 if (!retire_requests(tl
))
180 mutex_unlock(&tl
->mutex
);
182 out_active
: spin_lock(&timelines
->lock
);
184 /* Resume list iteration after reacquiring spinlock */
185 list_safe_reset_next(tl
, tn
, link
);
186 if (atomic_dec_and_test(&tl
->active_count
))
189 /* Defer the final release to after the spinlock */
190 if (refcount_dec_and_test(&tl
->kref
.refcount
)) {
191 GEM_BUG_ON(atomic_read(&tl
->active_count
));
192 list_add(&tl
->link
, &free
);
195 spin_unlock(&timelines
->lock
);
197 list_for_each_entry_safe(tl
, tn
, &free
, link
)
198 __intel_timeline_free(&tl
->kref
);
200 if (flush_submission(gt
, timeout
)) /* Wait, there's more! */
203 return active_count
? timeout
: 0;
206 int intel_gt_wait_for_idle(struct intel_gt
*gt
, long timeout
)
208 /* If the device is asleep, we have no requests outstanding */
209 if (!intel_gt_pm_is_awake(gt
))
212 while ((timeout
= intel_gt_retire_requests_timeout(gt
, timeout
)) > 0) {
214 if (signal_pending(current
))
221 static void retire_work_handler(struct work_struct
*work
)
223 struct intel_gt
*gt
=
224 container_of(work
, typeof(*gt
), requests
.retire_work
.work
);
226 schedule_delayed_work(>
->requests
.retire_work
,
227 round_jiffies_up_relative(HZ
));
228 intel_gt_retire_requests(gt
);
231 void intel_gt_init_requests(struct intel_gt
*gt
)
233 INIT_DELAYED_WORK(>
->requests
.retire_work
, retire_work_handler
);
236 void intel_gt_park_requests(struct intel_gt
*gt
)
238 cancel_delayed_work(>
->requests
.retire_work
);
241 void intel_gt_unpark_requests(struct intel_gt
*gt
)
243 schedule_delayed_work(>
->requests
.retire_work
,
244 round_jiffies_up_relative(HZ
));
247 void intel_gt_fini_requests(struct intel_gt
*gt
)
249 /* Wait until the work is marked as finished before unloading! */
250 cancel_delayed_work_sync(>
->requests
.retire_work
);