2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
29 static void intel_breadcrumbs_hangcheck(unsigned long data
)
31 struct intel_engine_cs
*engine
= (struct intel_engine_cs
*)data
;
32 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
37 if (time_before(jiffies
, b
->timeout
)) {
38 mod_timer(&b
->hangcheck
, b
->timeout
);
42 DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine
->name
);
43 set_bit(engine
->id
, &engine
->i915
->gpu_error
.missed_irq_rings
);
44 mod_timer(&engine
->breadcrumbs
.fake_irq
, jiffies
+ 1);
46 /* Ensure that even if the GPU hangs, we get woken up.
48 * However, note that if no one is waiting, we never notice
49 * a gpu hang. Eventually, we will have to wait for a resource
50 * held by the GPU and so trigger a hangcheck. In the most
51 * pathological case, this will be upon memory starvation! To
52 * prevent this, we also queue the hangcheck from the retire
55 i915_queue_hangcheck(engine
->i915
);
58 static unsigned long wait_timeout(void)
60 return round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
);
63 static void intel_breadcrumbs_fake_irq(unsigned long data
)
65 struct intel_engine_cs
*engine
= (struct intel_engine_cs
*)data
;
68 * The timer persists in case we cannot enable interrupts,
69 * or if we have previously seen seqno/interrupt incoherency
70 * ("missed interrupt" syndrome). Here the worker will wake up
71 * every jiffie in order to kick the oldest waiter to do the
72 * coherent seqno check.
74 if (intel_engine_wakeup(engine
))
75 mod_timer(&engine
->breadcrumbs
.fake_irq
, jiffies
+ 1);
78 static void irq_enable(struct intel_engine_cs
*engine
)
80 /* Enabling the IRQ may miss the generation of the interrupt, but
81 * we still need to force the barrier before reading the seqno,
84 engine
->breadcrumbs
.irq_posted
= true;
86 /* Caller disables interrupts */
87 spin_lock(&engine
->i915
->irq_lock
);
88 engine
->irq_enable(engine
);
89 spin_unlock(&engine
->i915
->irq_lock
);
92 static void irq_disable(struct intel_engine_cs
*engine
)
94 /* Caller disables interrupts */
95 spin_lock(&engine
->i915
->irq_lock
);
96 engine
->irq_disable(engine
);
97 spin_unlock(&engine
->i915
->irq_lock
);
99 engine
->breadcrumbs
.irq_posted
= false;
102 static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs
*b
)
104 struct intel_engine_cs
*engine
=
105 container_of(b
, struct intel_engine_cs
, breadcrumbs
);
106 struct drm_i915_private
*i915
= engine
->i915
;
108 assert_spin_locked(&b
->lock
);
112 /* Since we are waiting on a request, the GPU should be busy
113 * and should have its own rpm reference. For completeness,
114 * record an rpm reference for ourselves to cover the
115 * interrupt we unmask.
117 intel_runtime_pm_get_noresume(i915
);
118 b
->rpm_wakelock
= true;
120 /* No interrupts? Kick the waiter every jiffie! */
121 if (intel_irqs_enabled(i915
)) {
122 if (!test_bit(engine
->id
, &i915
->gpu_error
.test_irq_rings
))
124 b
->irq_enabled
= true;
127 if (!b
->irq_enabled
||
128 test_bit(engine
->id
, &i915
->gpu_error
.missed_irq_rings
)) {
129 mod_timer(&b
->fake_irq
, jiffies
+ 1);
131 /* Ensure we never sleep indefinitely */
132 GEM_BUG_ON(!time_after(b
->timeout
, jiffies
));
133 mod_timer(&b
->hangcheck
, b
->timeout
);
137 static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs
*b
)
139 struct intel_engine_cs
*engine
=
140 container_of(b
, struct intel_engine_cs
, breadcrumbs
);
142 assert_spin_locked(&b
->lock
);
143 if (!b
->rpm_wakelock
)
146 if (b
->irq_enabled
) {
148 b
->irq_enabled
= false;
151 intel_runtime_pm_put(engine
->i915
);
152 b
->rpm_wakelock
= false;
155 static inline struct intel_wait
*to_wait(struct rb_node
*node
)
157 return rb_entry(node
, struct intel_wait
, node
);
160 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs
*b
,
161 struct intel_wait
*wait
)
163 assert_spin_locked(&b
->lock
);
165 /* This request is completed, so remove it from the tree, mark it as
166 * complete, and *then* wake up the associated task.
168 rb_erase(&wait
->node
, &b
->waiters
);
169 RB_CLEAR_NODE(&wait
->node
);
171 wake_up_process(wait
->tsk
); /* implicit smp_wmb() */
174 static bool __intel_engine_add_wait(struct intel_engine_cs
*engine
,
175 struct intel_wait
*wait
)
177 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
178 struct rb_node
**p
, *parent
, *completed
;
182 /* Insert the request into the retirement ordered list
183 * of waiters by walking the rbtree. If we are the oldest
184 * seqno in the tree (the first to be retired), then
185 * set ourselves as the bottom-half.
187 * As we descend the tree, prune completed branches since we hold the
188 * spinlock we know that the first_waiter must be delayed and can
189 * reduce some of the sequential wake up latency if we take action
190 * ourselves and wake up the completed tasks in parallel. Also, by
191 * removing stale elements in the tree, we may be able to reduce the
192 * ping-pong between the old bottom-half and ourselves as first-waiter.
197 seqno
= intel_engine_get_seqno(engine
);
199 /* If the request completed before we managed to grab the spinlock,
200 * return now before adding ourselves to the rbtree. We let the
201 * current bottom-half handle any pending wakeups and instead
202 * try and get out of the way quickly.
204 if (i915_seqno_passed(seqno
, wait
->seqno
)) {
205 RB_CLEAR_NODE(&wait
->node
);
209 p
= &b
->waiters
.rb_node
;
212 if (wait
->seqno
== to_wait(parent
)->seqno
) {
213 /* We have multiple waiters on the same seqno, select
214 * the highest priority task (that with the smallest
215 * task->prio) to serve as the bottom-half for this
218 if (wait
->tsk
->prio
> to_wait(parent
)->tsk
->prio
) {
219 p
= &parent
->rb_right
;
222 p
= &parent
->rb_left
;
224 } else if (i915_seqno_passed(wait
->seqno
,
225 to_wait(parent
)->seqno
)) {
226 p
= &parent
->rb_right
;
227 if (i915_seqno_passed(seqno
, to_wait(parent
)->seqno
))
232 p
= &parent
->rb_left
;
235 rb_link_node(&wait
->node
, parent
, p
);
236 rb_insert_color(&wait
->node
, &b
->waiters
);
237 GEM_BUG_ON(!first
&& !rcu_access_pointer(b
->irq_seqno_bh
));
240 struct rb_node
*next
= rb_next(completed
);
242 GEM_BUG_ON(!next
&& !first
);
243 if (next
&& next
!= &wait
->node
) {
245 b
->timeout
= wait_timeout();
246 b
->first_wait
= to_wait(next
);
247 rcu_assign_pointer(b
->irq_seqno_bh
, b
->first_wait
->tsk
);
248 /* As there is a delay between reading the current
249 * seqno, processing the completed tasks and selecting
250 * the next waiter, we may have missed the interrupt
251 * and so need for the next bottom-half to wakeup.
253 * Also as we enable the IRQ, we may miss the
254 * interrupt for that seqno, so we have to wake up
255 * the next bottom-half in order to do a coherent check
256 * in case the seqno passed.
258 __intel_breadcrumbs_enable_irq(b
);
259 if (READ_ONCE(b
->irq_posted
))
260 wake_up_process(to_wait(next
)->tsk
);
264 struct intel_wait
*crumb
= to_wait(completed
);
265 completed
= rb_prev(completed
);
266 __intel_breadcrumbs_finish(b
, crumb
);
271 GEM_BUG_ON(rb_first(&b
->waiters
) != &wait
->node
);
272 b
->timeout
= wait_timeout();
273 b
->first_wait
= wait
;
274 rcu_assign_pointer(b
->irq_seqno_bh
, wait
->tsk
);
275 /* After assigning ourselves as the new bottom-half, we must
276 * perform a cursory check to prevent a missed interrupt.
277 * Either we miss the interrupt whilst programming the hardware,
278 * or if there was a previous waiter (for a later seqno) they
279 * may be woken instead of us (due to the inherent race
280 * in the unlocked read of b->irq_seqno_bh in the irq handler)
281 * and so we miss the wake up.
283 __intel_breadcrumbs_enable_irq(b
);
285 GEM_BUG_ON(!rcu_access_pointer(b
->irq_seqno_bh
));
286 GEM_BUG_ON(!b
->first_wait
);
287 GEM_BUG_ON(rb_first(&b
->waiters
) != &b
->first_wait
->node
);
292 bool intel_engine_add_wait(struct intel_engine_cs
*engine
,
293 struct intel_wait
*wait
)
295 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
298 spin_lock_irq(&b
->lock
);
299 first
= __intel_engine_add_wait(engine
, wait
);
300 spin_unlock_irq(&b
->lock
);
305 static inline bool chain_wakeup(struct rb_node
*rb
, int priority
)
307 return rb
&& to_wait(rb
)->tsk
->prio
<= priority
;
310 static inline int wakeup_priority(struct intel_breadcrumbs
*b
,
311 struct task_struct
*tsk
)
313 if (tsk
== b
->signaler
)
319 void intel_engine_remove_wait(struct intel_engine_cs
*engine
,
320 struct intel_wait
*wait
)
322 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
324 /* Quick check to see if this waiter was already decoupled from
325 * the tree by the bottom-half to avoid contention on the spinlock
328 if (RB_EMPTY_NODE(&wait
->node
))
331 spin_lock_irq(&b
->lock
);
333 if (RB_EMPTY_NODE(&wait
->node
))
336 if (b
->first_wait
== wait
) {
337 const int priority
= wakeup_priority(b
, wait
->tsk
);
338 struct rb_node
*next
;
340 GEM_BUG_ON(rcu_access_pointer(b
->irq_seqno_bh
) != wait
->tsk
);
342 /* We are the current bottom-half. Find the next candidate,
343 * the first waiter in the queue on the remaining oldest
344 * request. As multiple seqnos may complete in the time it
345 * takes us to wake up and find the next waiter, we have to
346 * wake up that waiter for it to perform its own coherent
349 next
= rb_next(&wait
->node
);
350 if (chain_wakeup(next
, priority
)) {
351 /* If the next waiter is already complete,
352 * wake it up and continue onto the next waiter. So
353 * if have a small herd, they will wake up in parallel
354 * rather than sequentially, which should reduce
355 * the overall latency in waking all the completed
358 * However, waking up a chain adds extra latency to
359 * the first_waiter. This is undesirable if that
360 * waiter is a high priority task.
362 u32 seqno
= intel_engine_get_seqno(engine
);
364 while (i915_seqno_passed(seqno
, to_wait(next
)->seqno
)) {
365 struct rb_node
*n
= rb_next(next
);
367 __intel_breadcrumbs_finish(b
, to_wait(next
));
369 if (!chain_wakeup(next
, priority
))
375 /* In our haste, we may have completed the first waiter
376 * before we enabled the interrupt. Do so now as we
377 * have a second waiter for a future seqno. Afterwards,
378 * we have to wake up that waiter in case we missed
379 * the interrupt, or if we have to handle an
380 * exception rather than a seqno completion.
382 b
->timeout
= wait_timeout();
383 b
->first_wait
= to_wait(next
);
384 rcu_assign_pointer(b
->irq_seqno_bh
, b
->first_wait
->tsk
);
385 if (b
->first_wait
->seqno
!= wait
->seqno
)
386 __intel_breadcrumbs_enable_irq(b
);
387 wake_up_process(b
->first_wait
->tsk
);
389 b
->first_wait
= NULL
;
390 rcu_assign_pointer(b
->irq_seqno_bh
, NULL
);
391 __intel_breadcrumbs_disable_irq(b
);
394 GEM_BUG_ON(rb_first(&b
->waiters
) == &wait
->node
);
397 GEM_BUG_ON(RB_EMPTY_NODE(&wait
->node
));
398 rb_erase(&wait
->node
, &b
->waiters
);
401 GEM_BUG_ON(b
->first_wait
== wait
);
402 GEM_BUG_ON(rb_first(&b
->waiters
) !=
403 (b
->first_wait
? &b
->first_wait
->node
: NULL
));
404 GEM_BUG_ON(!rcu_access_pointer(b
->irq_seqno_bh
) ^ RB_EMPTY_ROOT(&b
->waiters
));
405 spin_unlock_irq(&b
->lock
);
408 static bool signal_complete(struct drm_i915_gem_request
*request
)
413 /* If another process served as the bottom-half it may have already
414 * signalled that this wait is already completed.
416 if (intel_wait_complete(&request
->signaling
.wait
))
419 /* Carefully check if the request is complete, giving time for the
420 * seqno to be visible or if the GPU hung.
422 if (__i915_request_irq_complete(request
))
428 static struct drm_i915_gem_request
*to_signaler(struct rb_node
*rb
)
430 return rb_entry(rb
, struct drm_i915_gem_request
, signaling
.node
);
433 static void signaler_set_rtpriority(void)
435 struct sched_param param
= { .sched_priority
= 1 };
437 sched_setscheduler_nocheck(current
, SCHED_FIFO
, ¶m
);
440 static int intel_breadcrumbs_signaler(void *arg
)
442 struct intel_engine_cs
*engine
= arg
;
443 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
444 struct drm_i915_gem_request
*request
;
446 /* Install ourselves with high priority to reduce signalling latency */
447 signaler_set_rtpriority();
450 set_current_state(TASK_INTERRUPTIBLE
);
452 /* We are either woken up by the interrupt bottom-half,
453 * or by a client adding a new signaller. In both cases,
454 * the GPU seqno may have advanced beyond our oldest signal.
455 * If it has, propagate the signal, remove the waiter and
456 * check again with the next oldest signal. Otherwise we
457 * need to wait for a new interrupt from the GPU or for
460 request
= READ_ONCE(b
->first_signal
);
461 if (signal_complete(request
)) {
462 /* Wake up all other completed waiters and select the
463 * next bottom-half for the next user interrupt.
465 intel_engine_remove_wait(engine
,
466 &request
->signaling
.wait
);
469 dma_fence_signal(&request
->fence
);
470 local_bh_enable(); /* kick start the tasklets */
472 /* Find the next oldest signal. Note that as we have
473 * not been holding the lock, another client may
474 * have installed an even older signal than the one
475 * we just completed - so double check we are still
476 * the oldest before picking the next one.
478 spin_lock_irq(&b
->lock
);
479 if (request
== b
->first_signal
) {
481 rb_next(&request
->signaling
.node
);
482 b
->first_signal
= rb
? to_signaler(rb
) : NULL
;
484 rb_erase(&request
->signaling
.node
, &b
->signals
);
485 spin_unlock_irq(&b
->lock
);
487 i915_gem_request_put(request
);
489 if (kthread_should_stop())
495 __set_current_state(TASK_RUNNING
);
500 void intel_engine_enable_signaling(struct drm_i915_gem_request
*request
)
502 struct intel_engine_cs
*engine
= request
->engine
;
503 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
504 struct rb_node
*parent
, **p
;
507 /* Note that we may be called from an interrupt handler on another
508 * device (e.g. nouveau signaling a fence completion causing us
509 * to submit a request, and so enable signaling). As such,
510 * we need to make sure that all other users of b->lock protect
511 * against interrupts, i.e. use spin_lock_irqsave.
514 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
515 assert_spin_locked(&request
->lock
);
516 if (!request
->global_seqno
)
519 request
->signaling
.wait
.tsk
= b
->signaler
;
520 request
->signaling
.wait
.seqno
= request
->global_seqno
;
521 i915_gem_request_get(request
);
525 /* First add ourselves into the list of waiters, but register our
526 * bottom-half as the signaller thread. As per usual, only the oldest
527 * waiter (not just signaller) is tasked as the bottom-half waking
528 * up all completed waiters after the user interrupt.
530 * If we are the oldest waiter, enable the irq (after which we
531 * must double check that the seqno did not complete).
533 wakeup
= __intel_engine_add_wait(engine
, &request
->signaling
.wait
);
535 /* Now insert ourselves into the retirement ordered list of signals
536 * on this engine. We track the oldest seqno as that will be the
537 * first signal to complete.
541 p
= &b
->signals
.rb_node
;
544 if (i915_seqno_passed(request
->global_seqno
,
545 to_signaler(parent
)->global_seqno
)) {
546 p
= &parent
->rb_right
;
549 p
= &parent
->rb_left
;
552 rb_link_node(&request
->signaling
.node
, parent
, p
);
553 rb_insert_color(&request
->signaling
.node
, &b
->signals
);
555 smp_store_mb(b
->first_signal
, request
);
557 spin_unlock(&b
->lock
);
560 wake_up_process(b
->signaler
);
563 int intel_engine_init_breadcrumbs(struct intel_engine_cs
*engine
)
565 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
566 struct task_struct
*tsk
;
568 spin_lock_init(&b
->lock
);
569 setup_timer(&b
->fake_irq
,
570 intel_breadcrumbs_fake_irq
,
571 (unsigned long)engine
);
572 setup_timer(&b
->hangcheck
,
573 intel_breadcrumbs_hangcheck
,
574 (unsigned long)engine
);
576 /* Spawn a thread to provide a common bottom-half for all signals.
577 * As this is an asynchronous interface we cannot steal the current
578 * task for handling the bottom-half to the user interrupt, therefore
579 * we create a thread to do the coherent seqno dance after the
580 * interrupt and then signal the waitqueue (via the dma-buf/fence).
582 tsk
= kthread_run(intel_breadcrumbs_signaler
, engine
,
583 "i915/signal:%d", engine
->id
);
592 static void cancel_fake_irq(struct intel_engine_cs
*engine
)
594 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
596 del_timer_sync(&b
->hangcheck
);
597 del_timer_sync(&b
->fake_irq
);
598 clear_bit(engine
->id
, &engine
->i915
->gpu_error
.missed_irq_rings
);
601 void intel_engine_reset_breadcrumbs(struct intel_engine_cs
*engine
)
603 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
605 cancel_fake_irq(engine
);
606 spin_lock_irq(&b
->lock
);
608 __intel_breadcrumbs_disable_irq(b
);
609 if (intel_engine_has_waiter(engine
)) {
610 b
->timeout
= wait_timeout();
611 __intel_breadcrumbs_enable_irq(b
);
612 if (READ_ONCE(b
->irq_posted
))
613 wake_up_process(b
->first_wait
->tsk
);
615 /* sanitize the IMR and unmask any auxiliary interrupts */
619 spin_unlock_irq(&b
->lock
);
622 void intel_engine_fini_breadcrumbs(struct intel_engine_cs
*engine
)
624 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
626 /* The engines should be idle and all requests accounted for! */
627 WARN_ON(READ_ONCE(b
->first_wait
));
628 WARN_ON(!RB_EMPTY_ROOT(&b
->waiters
));
629 WARN_ON(READ_ONCE(b
->first_signal
));
630 WARN_ON(!RB_EMPTY_ROOT(&b
->signals
));
632 if (!IS_ERR_OR_NULL(b
->signaler
))
633 kthread_stop(b
->signaler
);
635 cancel_fake_irq(engine
);
638 unsigned int intel_breadcrumbs_busy(struct drm_i915_private
*i915
)
640 struct intel_engine_cs
*engine
;
641 enum intel_engine_id id
;
642 unsigned int mask
= 0;
644 for_each_engine(engine
, i915
, id
) {
645 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
647 spin_lock_irq(&b
->lock
);
650 wake_up_process(b
->first_wait
->tsk
);
651 mask
|= intel_engine_flag(engine
);
654 if (b
->first_signal
) {
655 wake_up_process(b
->signaler
);
656 mask
|= intel_engine_flag(engine
);
659 spin_unlock_irq(&b
->lock
);