2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
29 static void intel_breadcrumbs_fake_irq(unsigned long data
)
31 struct intel_engine_cs
*engine
= (struct intel_engine_cs
*)data
;
34 * The timer persists in case we cannot enable interrupts,
35 * or if we have previously seen seqno/interrupt incoherency
36 * ("missed interrupt" syndrome). Here the worker will wake up
37 * every jiffie in order to kick the oldest waiter to do the
38 * coherent seqno check.
41 if (intel_engine_wakeup(engine
))
42 mod_timer(&engine
->breadcrumbs
.fake_irq
, jiffies
+ 1);
46 static void irq_enable(struct intel_engine_cs
*engine
)
48 /* Enabling the IRQ may miss the generation of the interrupt, but
49 * we still need to force the barrier before reading the seqno,
52 engine
->breadcrumbs
.irq_posted
= true;
54 /* Make sure the current hangcheck doesn't falsely accuse a just
55 * started irq handler from missing an interrupt (because the
56 * interrupt count still matches the stale value from when
57 * the irq handler was disabled, many hangchecks ago).
59 engine
->breadcrumbs
.irq_wakeups
++;
61 spin_lock_irq(&engine
->i915
->irq_lock
);
62 engine
->irq_enable(engine
);
63 spin_unlock_irq(&engine
->i915
->irq_lock
);
66 static void irq_disable(struct intel_engine_cs
*engine
)
68 spin_lock_irq(&engine
->i915
->irq_lock
);
69 engine
->irq_disable(engine
);
70 spin_unlock_irq(&engine
->i915
->irq_lock
);
72 engine
->breadcrumbs
.irq_posted
= false;
75 static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs
*b
)
77 struct intel_engine_cs
*engine
=
78 container_of(b
, struct intel_engine_cs
, breadcrumbs
);
79 struct drm_i915_private
*i915
= engine
->i915
;
81 assert_spin_locked(&b
->lock
);
85 /* Since we are waiting on a request, the GPU should be busy
86 * and should have its own rpm reference. For completeness,
87 * record an rpm reference for ourselves to cover the
88 * interrupt we unmask.
90 intel_runtime_pm_get_noresume(i915
);
91 b
->rpm_wakelock
= true;
93 /* No interrupts? Kick the waiter every jiffie! */
94 if (intel_irqs_enabled(i915
)) {
95 if (!test_bit(engine
->id
, &i915
->gpu_error
.test_irq_rings
))
97 b
->irq_enabled
= true;
100 if (!b
->irq_enabled
||
101 test_bit(engine
->id
, &i915
->gpu_error
.missed_irq_rings
))
102 mod_timer(&b
->fake_irq
, jiffies
+ 1);
104 /* Ensure that even if the GPU hangs, we get woken up.
106 * However, note that if no one is waiting, we never notice
107 * a gpu hang. Eventually, we will have to wait for a resource
108 * held by the GPU and so trigger a hangcheck. In the most
109 * pathological case, this will be upon memory starvation!
111 i915_queue_hangcheck(i915
);
114 static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs
*b
)
116 struct intel_engine_cs
*engine
=
117 container_of(b
, struct intel_engine_cs
, breadcrumbs
);
119 assert_spin_locked(&b
->lock
);
120 if (!b
->rpm_wakelock
)
123 if (b
->irq_enabled
) {
125 b
->irq_enabled
= false;
128 intel_runtime_pm_put(engine
->i915
);
129 b
->rpm_wakelock
= false;
132 static inline struct intel_wait
*to_wait(struct rb_node
*node
)
134 return container_of(node
, struct intel_wait
, node
);
137 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs
*b
,
138 struct intel_wait
*wait
)
140 assert_spin_locked(&b
->lock
);
142 /* This request is completed, so remove it from the tree, mark it as
143 * complete, and *then* wake up the associated task.
145 rb_erase(&wait
->node
, &b
->waiters
);
146 RB_CLEAR_NODE(&wait
->node
);
148 wake_up_process(wait
->tsk
); /* implicit smp_wmb() */
151 static bool __intel_engine_add_wait(struct intel_engine_cs
*engine
,
152 struct intel_wait
*wait
)
154 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
155 struct rb_node
**p
, *parent
, *completed
;
159 /* Insert the request into the retirement ordered list
160 * of waiters by walking the rbtree. If we are the oldest
161 * seqno in the tree (the first to be retired), then
162 * set ourselves as the bottom-half.
164 * As we descend the tree, prune completed branches since we hold the
165 * spinlock we know that the first_waiter must be delayed and can
166 * reduce some of the sequential wake up latency if we take action
167 * ourselves and wake up the completed tasks in parallel. Also, by
168 * removing stale elements in the tree, we may be able to reduce the
169 * ping-pong between the old bottom-half and ourselves as first-waiter.
174 seqno
= intel_engine_get_seqno(engine
);
176 /* If the request completed before we managed to grab the spinlock,
177 * return now before adding ourselves to the rbtree. We let the
178 * current bottom-half handle any pending wakeups and instead
179 * try and get out of the way quickly.
181 if (i915_seqno_passed(seqno
, wait
->seqno
)) {
182 RB_CLEAR_NODE(&wait
->node
);
186 p
= &b
->waiters
.rb_node
;
189 if (wait
->seqno
== to_wait(parent
)->seqno
) {
190 /* We have multiple waiters on the same seqno, select
191 * the highest priority task (that with the smallest
192 * task->prio) to serve as the bottom-half for this
195 if (wait
->tsk
->prio
> to_wait(parent
)->tsk
->prio
) {
196 p
= &parent
->rb_right
;
199 p
= &parent
->rb_left
;
201 } else if (i915_seqno_passed(wait
->seqno
,
202 to_wait(parent
)->seqno
)) {
203 p
= &parent
->rb_right
;
204 if (i915_seqno_passed(seqno
, to_wait(parent
)->seqno
))
209 p
= &parent
->rb_left
;
212 rb_link_node(&wait
->node
, parent
, p
);
213 rb_insert_color(&wait
->node
, &b
->waiters
);
214 GEM_BUG_ON(!first
&& !b
->irq_seqno_bh
);
217 struct rb_node
*next
= rb_next(completed
);
219 GEM_BUG_ON(!next
&& !first
);
220 if (next
&& next
!= &wait
->node
) {
222 b
->first_wait
= to_wait(next
);
223 smp_store_mb(b
->irq_seqno_bh
, b
->first_wait
->tsk
);
224 /* As there is a delay between reading the current
225 * seqno, processing the completed tasks and selecting
226 * the next waiter, we may have missed the interrupt
227 * and so need for the next bottom-half to wakeup.
229 * Also as we enable the IRQ, we may miss the
230 * interrupt for that seqno, so we have to wake up
231 * the next bottom-half in order to do a coherent check
232 * in case the seqno passed.
234 __intel_breadcrumbs_enable_irq(b
);
235 if (READ_ONCE(b
->irq_posted
))
236 wake_up_process(to_wait(next
)->tsk
);
240 struct intel_wait
*crumb
= to_wait(completed
);
241 completed
= rb_prev(completed
);
242 __intel_breadcrumbs_finish(b
, crumb
);
247 GEM_BUG_ON(rb_first(&b
->waiters
) != &wait
->node
);
248 b
->first_wait
= wait
;
249 smp_store_mb(b
->irq_seqno_bh
, wait
->tsk
);
250 /* After assigning ourselves as the new bottom-half, we must
251 * perform a cursory check to prevent a missed interrupt.
252 * Either we miss the interrupt whilst programming the hardware,
253 * or if there was a previous waiter (for a later seqno) they
254 * may be woken instead of us (due to the inherent race
255 * in the unlocked read of b->irq_seqno_bh in the irq handler)
256 * and so we miss the wake up.
258 __intel_breadcrumbs_enable_irq(b
);
260 GEM_BUG_ON(!b
->irq_seqno_bh
);
261 GEM_BUG_ON(!b
->first_wait
);
262 GEM_BUG_ON(rb_first(&b
->waiters
) != &b
->first_wait
->node
);
267 bool intel_engine_add_wait(struct intel_engine_cs
*engine
,
268 struct intel_wait
*wait
)
270 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
274 first
= __intel_engine_add_wait(engine
, wait
);
275 spin_unlock(&b
->lock
);
280 void intel_engine_enable_fake_irq(struct intel_engine_cs
*engine
)
282 mod_timer(&engine
->breadcrumbs
.fake_irq
, jiffies
+ 1);
285 static inline bool chain_wakeup(struct rb_node
*rb
, int priority
)
287 return rb
&& to_wait(rb
)->tsk
->prio
<= priority
;
290 static inline int wakeup_priority(struct intel_breadcrumbs
*b
,
291 struct task_struct
*tsk
)
293 if (tsk
== b
->signaler
)
299 void intel_engine_remove_wait(struct intel_engine_cs
*engine
,
300 struct intel_wait
*wait
)
302 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
304 /* Quick check to see if this waiter was already decoupled from
305 * the tree by the bottom-half to avoid contention on the spinlock
308 if (RB_EMPTY_NODE(&wait
->node
))
313 if (RB_EMPTY_NODE(&wait
->node
))
316 if (b
->first_wait
== wait
) {
317 const int priority
= wakeup_priority(b
, wait
->tsk
);
318 struct rb_node
*next
;
320 GEM_BUG_ON(b
->irq_seqno_bh
!= wait
->tsk
);
322 /* We are the current bottom-half. Find the next candidate,
323 * the first waiter in the queue on the remaining oldest
324 * request. As multiple seqnos may complete in the time it
325 * takes us to wake up and find the next waiter, we have to
326 * wake up that waiter for it to perform its own coherent
329 next
= rb_next(&wait
->node
);
330 if (chain_wakeup(next
, priority
)) {
331 /* If the next waiter is already complete,
332 * wake it up and continue onto the next waiter. So
333 * if have a small herd, they will wake up in parallel
334 * rather than sequentially, which should reduce
335 * the overall latency in waking all the completed
338 * However, waking up a chain adds extra latency to
339 * the first_waiter. This is undesirable if that
340 * waiter is a high priority task.
342 u32 seqno
= intel_engine_get_seqno(engine
);
344 while (i915_seqno_passed(seqno
, to_wait(next
)->seqno
)) {
345 struct rb_node
*n
= rb_next(next
);
347 __intel_breadcrumbs_finish(b
, to_wait(next
));
349 if (!chain_wakeup(next
, priority
))
355 /* In our haste, we may have completed the first waiter
356 * before we enabled the interrupt. Do so now as we
357 * have a second waiter for a future seqno. Afterwards,
358 * we have to wake up that waiter in case we missed
359 * the interrupt, or if we have to handle an
360 * exception rather than a seqno completion.
362 b
->first_wait
= to_wait(next
);
363 smp_store_mb(b
->irq_seqno_bh
, b
->first_wait
->tsk
);
364 if (b
->first_wait
->seqno
!= wait
->seqno
)
365 __intel_breadcrumbs_enable_irq(b
);
366 wake_up_process(b
->irq_seqno_bh
);
368 b
->first_wait
= NULL
;
369 WRITE_ONCE(b
->irq_seqno_bh
, NULL
);
370 __intel_breadcrumbs_disable_irq(b
);
373 GEM_BUG_ON(rb_first(&b
->waiters
) == &wait
->node
);
376 GEM_BUG_ON(RB_EMPTY_NODE(&wait
->node
));
377 rb_erase(&wait
->node
, &b
->waiters
);
380 GEM_BUG_ON(b
->first_wait
== wait
);
381 GEM_BUG_ON(rb_first(&b
->waiters
) !=
382 (b
->first_wait
? &b
->first_wait
->node
: NULL
));
383 GEM_BUG_ON(!b
->irq_seqno_bh
^ RB_EMPTY_ROOT(&b
->waiters
));
384 spin_unlock(&b
->lock
);
387 static bool signal_complete(struct drm_i915_gem_request
*request
)
392 /* If another process served as the bottom-half it may have already
393 * signalled that this wait is already completed.
395 if (intel_wait_complete(&request
->signaling
.wait
))
398 /* Carefully check if the request is complete, giving time for the
399 * seqno to be visible or if the GPU hung.
401 if (__i915_request_irq_complete(request
))
407 static struct drm_i915_gem_request
*to_signaler(struct rb_node
*rb
)
409 return container_of(rb
, struct drm_i915_gem_request
, signaling
.node
);
412 static void signaler_set_rtpriority(void)
414 struct sched_param param
= { .sched_priority
= 1 };
416 sched_setscheduler_nocheck(current
, SCHED_FIFO
, ¶m
);
419 static int intel_breadcrumbs_signaler(void *arg
)
421 struct intel_engine_cs
*engine
= arg
;
422 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
423 struct drm_i915_gem_request
*request
;
425 /* Install ourselves with high priority to reduce signalling latency */
426 signaler_set_rtpriority();
429 set_current_state(TASK_INTERRUPTIBLE
);
431 /* We are either woken up by the interrupt bottom-half,
432 * or by a client adding a new signaller. In both cases,
433 * the GPU seqno may have advanced beyond our oldest signal.
434 * If it has, propagate the signal, remove the waiter and
435 * check again with the next oldest signal. Otherwise we
436 * need to wait for a new interrupt from the GPU or for
439 request
= READ_ONCE(b
->first_signal
);
440 if (signal_complete(request
)) {
441 /* Wake up all other completed waiters and select the
442 * next bottom-half for the next user interrupt.
444 intel_engine_remove_wait(engine
,
445 &request
->signaling
.wait
);
446 fence_signal(&request
->fence
);
448 /* Find the next oldest signal. Note that as we have
449 * not been holding the lock, another client may
450 * have installed an even older signal than the one
451 * we just completed - so double check we are still
452 * the oldest before picking the next one.
455 if (request
== b
->first_signal
) {
457 rb_next(&request
->signaling
.node
);
458 b
->first_signal
= rb
? to_signaler(rb
) : NULL
;
460 rb_erase(&request
->signaling
.node
, &b
->signals
);
461 spin_unlock(&b
->lock
);
463 i915_gem_request_put(request
);
465 if (kthread_should_stop())
471 __set_current_state(TASK_RUNNING
);
476 void intel_engine_enable_signaling(struct drm_i915_gem_request
*request
)
478 struct intel_engine_cs
*engine
= request
->engine
;
479 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
480 struct rb_node
*parent
, **p
;
483 /* locked by fence_enable_sw_signaling() */
484 assert_spin_locked(&request
->lock
);
486 request
->signaling
.wait
.tsk
= b
->signaler
;
487 request
->signaling
.wait
.seqno
= request
->fence
.seqno
;
488 i915_gem_request_get(request
);
492 /* First add ourselves into the list of waiters, but register our
493 * bottom-half as the signaller thread. As per usual, only the oldest
494 * waiter (not just signaller) is tasked as the bottom-half waking
495 * up all completed waiters after the user interrupt.
497 * If we are the oldest waiter, enable the irq (after which we
498 * must double check that the seqno did not complete).
500 wakeup
= __intel_engine_add_wait(engine
, &request
->signaling
.wait
);
502 /* Now insert ourselves into the retirement ordered list of signals
503 * on this engine. We track the oldest seqno as that will be the
504 * first signal to complete.
508 p
= &b
->signals
.rb_node
;
511 if (i915_seqno_passed(request
->fence
.seqno
,
512 to_signaler(parent
)->fence
.seqno
)) {
513 p
= &parent
->rb_right
;
516 p
= &parent
->rb_left
;
519 rb_link_node(&request
->signaling
.node
, parent
, p
);
520 rb_insert_color(&request
->signaling
.node
, &b
->signals
);
522 smp_store_mb(b
->first_signal
, request
);
524 spin_unlock(&b
->lock
);
527 wake_up_process(b
->signaler
);
530 int intel_engine_init_breadcrumbs(struct intel_engine_cs
*engine
)
532 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
533 struct task_struct
*tsk
;
535 spin_lock_init(&b
->lock
);
536 setup_timer(&b
->fake_irq
,
537 intel_breadcrumbs_fake_irq
,
538 (unsigned long)engine
);
540 /* Spawn a thread to provide a common bottom-half for all signals.
541 * As this is an asynchronous interface we cannot steal the current
542 * task for handling the bottom-half to the user interrupt, therefore
543 * we create a thread to do the coherent seqno dance after the
544 * interrupt and then signal the waitqueue (via the dma-buf/fence).
546 tsk
= kthread_run(intel_breadcrumbs_signaler
, engine
,
547 "i915/signal:%d", engine
->id
);
556 void intel_engine_fini_breadcrumbs(struct intel_engine_cs
*engine
)
558 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
560 if (!IS_ERR_OR_NULL(b
->signaler
))
561 kthread_stop(b
->signaler
);
563 del_timer_sync(&b
->fake_irq
);
566 unsigned int intel_kick_waiters(struct drm_i915_private
*i915
)
568 struct intel_engine_cs
*engine
;
569 unsigned int mask
= 0;
571 /* To avoid the task_struct disappearing beneath us as we wake up
572 * the process, we must first inspect the task_struct->state under the
573 * RCU lock, i.e. as we call wake_up_process() we must be holding the
577 for_each_engine(engine
, i915
)
578 if (unlikely(intel_engine_wakeup(engine
)))
579 mask
|= intel_engine_flag(engine
);
585 unsigned int intel_kick_signalers(struct drm_i915_private
*i915
)
587 struct intel_engine_cs
*engine
;
588 unsigned int mask
= 0;
590 for_each_engine(engine
, i915
) {
591 if (unlikely(READ_ONCE(engine
->breadcrumbs
.first_signal
))) {
592 wake_up_process(engine
->breadcrumbs
.signaler
);
593 mask
|= intel_engine_flag(engine
);