2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include <linux/workqueue.h>
9 #include "i915_drv.h" /* for_each_engine() */
10 #include "i915_request.h"
11 #include "intel_engine_heartbeat.h"
13 #include "intel_gt_pm.h"
14 #include "intel_gt_requests.h"
15 #include "intel_timeline.h"
17 static bool retire_requests(struct intel_timeline
*tl
)
19 struct i915_request
*rq
, *rn
;
21 list_for_each_entry_safe(rq
, rn
, &tl
->requests
, link
)
22 if (!i915_request_retire(rq
))
25 /* And check nothing new was submitted */
26 return !i915_active_fence_isset(&tl
->last_request
);
29 static bool flush_submission(struct intel_gt
*gt
)
31 struct intel_engine_cs
*engine
;
32 enum intel_engine_id id
;
35 if (!intel_gt_pm_is_awake(gt
))
38 for_each_engine(engine
, gt
, id
) {
39 intel_engine_flush_submission(engine
);
40 active
|= flush_work(&engine
->retire_work
);
41 active
|= flush_work(&engine
->wakeref
.work
);
47 static void engine_retire(struct work_struct
*work
)
49 struct intel_engine_cs
*engine
=
50 container_of(work
, typeof(*engine
), retire_work
);
51 struct intel_timeline
*tl
= xchg(&engine
->retire
, NULL
);
54 struct intel_timeline
*next
= xchg(&tl
->retire
, NULL
);
57 * Our goal here is to retire _idle_ timelines as soon as
58 * possible (as they are idle, we do not expect userspace
59 * to be cleaning up anytime soon).
61 * If the timeline is currently locked, either it is being
62 * retired elsewhere or about to be!
64 if (mutex_trylock(&tl
->mutex
)) {
66 mutex_unlock(&tl
->mutex
);
68 intel_timeline_put(tl
);
71 tl
= ptr_mask_bits(next
, 1);
75 static bool add_retire(struct intel_engine_cs
*engine
,
76 struct intel_timeline
*tl
)
78 #define STUB ((struct intel_timeline *)1)
79 struct intel_timeline
*first
;
82 * We open-code a llist here to include the additional tag [BIT(0)]
83 * so that we know when the timeline is already on a
84 * retirement queue: either this engine or another.
87 if (cmpxchg(&tl
->retire
, NULL
, STUB
)) /* already queued */
90 intel_timeline_get(tl
);
91 first
= READ_ONCE(engine
->retire
);
93 tl
->retire
= ptr_pack_bits(first
, 1, 1);
94 while (!try_cmpxchg(&engine
->retire
, &first
, tl
));
99 void intel_engine_add_retire(struct intel_engine_cs
*engine
,
100 struct intel_timeline
*tl
)
102 if (add_retire(engine
, tl
))
103 schedule_work(&engine
->retire_work
);
106 void intel_engine_init_retire(struct intel_engine_cs
*engine
)
108 INIT_WORK(&engine
->retire_work
, engine_retire
);
111 void intel_engine_fini_retire(struct intel_engine_cs
*engine
)
113 flush_work(&engine
->retire_work
);
114 GEM_BUG_ON(engine
->retire
);
117 long intel_gt_retire_requests_timeout(struct intel_gt
*gt
, long timeout
)
119 struct intel_gt_timelines
*timelines
= >
->timelines
;
120 struct intel_timeline
*tl
, *tn
;
121 unsigned long active_count
= 0;
125 interruptible
= true;
126 if (unlikely(timeout
< 0))
127 timeout
= -timeout
, interruptible
= false;
129 flush_submission(gt
); /* kick the ksoftirqd tasklets */
130 spin_lock(&timelines
->lock
);
131 list_for_each_entry_safe(tl
, tn
, &timelines
->active_list
, link
) {
132 if (!mutex_trylock(&tl
->mutex
)) {
133 active_count
++; /* report busy to caller, try again? */
137 intel_timeline_get(tl
);
138 GEM_BUG_ON(!atomic_read(&tl
->active_count
));
139 atomic_inc(&tl
->active_count
); /* pin the list element */
140 spin_unlock(&timelines
->lock
);
143 struct dma_fence
*fence
;
145 fence
= i915_active_fence_get(&tl
->last_request
);
147 timeout
= dma_fence_wait_timeout(fence
,
150 dma_fence_put(fence
);
154 if (!retire_requests(tl
) || flush_submission(gt
))
157 spin_lock(&timelines
->lock
);
159 /* Resume iteration after dropping lock */
160 list_safe_reset_next(tl
, tn
, link
);
161 if (atomic_dec_and_test(&tl
->active_count
))
164 mutex_unlock(&tl
->mutex
);
166 /* Defer the final release to after the spinlock */
167 if (refcount_dec_and_test(&tl
->kref
.refcount
)) {
168 GEM_BUG_ON(atomic_read(&tl
->active_count
));
169 list_add(&tl
->link
, &free
);
172 spin_unlock(&timelines
->lock
);
174 list_for_each_entry_safe(tl
, tn
, &free
, link
)
175 __intel_timeline_free(&tl
->kref
);
177 return active_count
? timeout
: 0;
180 int intel_gt_wait_for_idle(struct intel_gt
*gt
, long timeout
)
182 /* If the device is asleep, we have no requests outstanding */
183 if (!intel_gt_pm_is_awake(gt
))
186 while ((timeout
= intel_gt_retire_requests_timeout(gt
, timeout
)) > 0) {
188 if (signal_pending(current
))
195 static void retire_work_handler(struct work_struct
*work
)
197 struct intel_gt
*gt
=
198 container_of(work
, typeof(*gt
), requests
.retire_work
.work
);
200 schedule_delayed_work(>
->requests
.retire_work
,
201 round_jiffies_up_relative(HZ
));
202 intel_gt_retire_requests(gt
);
205 void intel_gt_init_requests(struct intel_gt
*gt
)
207 INIT_DELAYED_WORK(>
->requests
.retire_work
, retire_work_handler
);
210 void intel_gt_park_requests(struct intel_gt
*gt
)
212 cancel_delayed_work(>
->requests
.retire_work
);
215 void intel_gt_unpark_requests(struct intel_gt
*gt
)
217 schedule_delayed_work(>
->requests
.retire_work
,
218 round_jiffies_up_relative(HZ
));
221 void intel_gt_fini_requests(struct intel_gt
*gt
)
223 /* Wait until the work is marked as finished before unloading! */
224 cancel_delayed_work_sync(>
->requests
.retire_work
);