2 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
4 * started by Ingo Molnar and Thomas Gleixner.
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9 * Copyright (C) 2006 Esben Nielsen
12 * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
14 * Adaptive Spinlocks simplification:
15 * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
17 * See Documentation/rt-mutex-design.txt for details.
19 #include <linux/spinlock.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/timer.h>
23 #include <linux/hardirq.h>
24 #include <linux/semaphore.h>
26 #include "rtmutex_common.h"
29 * lock->owner state tracking:
31 * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
32 * are used to keep track of the "owner is pending" and "lock has
36 * NULL 0 0 lock is free (fast acquire possible)
37 * NULL 0 1 invalid state
38 * NULL 1 0 Transitional State*
39 * NULL 1 1 invalid state
40 * taskpointer 0 0 lock is held (fast release possible)
41 * taskpointer 0 1 task is pending owner
42 * taskpointer 1 0 lock is held and has waiters
43 * taskpointer 1 1 task is pending owner and lock has more waiters
45 * Pending ownership is assigned to the top (highest priority)
46 * waiter of the lock, when the lock is released. The thread is woken
47 * up and can now take the lock. Until the lock is taken (bit 0
48 * cleared) a competing higher priority thread can steal the lock
49 * which puts the woken up thread back on the waiters list.
51 * The fast atomic compare exchange based acquire and release is only
52 * possible when bit 0 and 1 of lock->owner are 0.
54 * (*) There's a small time where the owner can be NULL and the
55 * "lock has waiters" bit is set. This can happen when grabbing the lock.
56 * To prevent a cmpxchg of the owner releasing the lock, we need to set this
57 * bit before looking at the lock, hence the reason this is a transitional
62 rt_mutex_set_owner(struct rt_mutex
*lock
, struct task_struct
*owner
,
65 unsigned long val
= (unsigned long)owner
| mask
;
67 if (rt_mutex_has_waiters(lock
))
68 val
|= RT_MUTEX_HAS_WAITERS
;
70 lock
->owner
= (struct task_struct
*)val
;
73 static inline void clear_rt_mutex_waiters(struct rt_mutex
*lock
)
75 lock
->owner
= (struct task_struct
*)
76 ((unsigned long)lock
->owner
& ~RT_MUTEX_HAS_WAITERS
);
79 static void fixup_rt_mutex_waiters(struct rt_mutex
*lock
)
81 if (!rt_mutex_has_waiters(lock
))
82 clear_rt_mutex_waiters(lock
);
86 * We can speed up the acquire/release, if the architecture
87 * supports cmpxchg and if there's no debugging state to be set up
89 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
90 # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
91 static inline void mark_rt_mutex_waiters(struct rt_mutex
*lock
)
93 unsigned long owner
, *p
= (unsigned long *) &lock
->owner
;
97 } while (cmpxchg(p
, owner
, owner
| RT_MUTEX_HAS_WAITERS
) != owner
);
100 # define rt_mutex_cmpxchg(l,c,n) (0)
101 static inline void mark_rt_mutex_waiters(struct rt_mutex
*lock
)
103 lock
->owner
= (struct task_struct
*)
104 ((unsigned long)lock
->owner
| RT_MUTEX_HAS_WAITERS
);
111 * we initialize the wait_list runtime. (Could be done build-time and/or
114 static inline void init_lists(struct rt_mutex
*lock
)
116 if (unlikely(!lock
->wait_list
.prio_list
.prev
)) {
117 plist_head_init(&lock
->wait_list
, &lock
->wait_lock
);
118 #ifdef CONFIG_DEBUG_RT_MUTEXES
125 * Calculate task priority from the waiter list priority
127 * Return task->normal_prio when the waiter list is empty or when
128 * the waiter is not allowed to do priority boosting
130 int rt_mutex_getprio(struct task_struct
*task
)
132 if (likely(!task_has_pi_waiters(task
)))
133 return task
->normal_prio
;
135 return min(task_top_pi_waiter(task
)->pi_list_entry
.prio
,
140 * Adjust the priority of a task, after its pi_waiters got modified.
142 * This can be both boosting and unboosting. task->pi_lock must be held.
144 static void __rt_mutex_adjust_prio(struct task_struct
*task
)
146 int prio
= rt_mutex_getprio(task
);
148 if (task
->prio
!= prio
)
149 rt_mutex_setprio(task
, prio
);
153 * Adjust task priority (undo boosting). Called from the exit path of
154 * rt_mutex_slowunlock() and rt_mutex_slowlock().
156 * (Note: We do this outside of the protection of lock->wait_lock to
157 * allow the lock to be taken while or before we readjust the priority
158 * of task. We do not use the spin_xx_mutex() variants here as we are
159 * outside of the debug path.)
161 static void rt_mutex_adjust_prio(struct task_struct
*task
)
165 spin_lock_irqsave(&task
->pi_lock
, flags
);
166 __rt_mutex_adjust_prio(task
);
167 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
171 * Max number of times we'll walk the boosting chain:
173 int max_lock_depth
= 1024;
176 * Adjust the priority chain. Also used for deadlock detection.
177 * Decreases task's usage by one - may thus free the task.
178 * Returns 0 or -EDEADLK.
180 static int rt_mutex_adjust_prio_chain(struct task_struct
*task
,
182 struct rt_mutex
*orig_lock
,
183 struct rt_mutex_waiter
*orig_waiter
,
184 struct task_struct
*top_task
)
186 struct rt_mutex
*lock
;
187 struct rt_mutex_waiter
*waiter
, *top_waiter
= orig_waiter
;
188 int detect_deadlock
, ret
= 0, depth
= 0;
191 detect_deadlock
= debug_rt_mutex_detect_deadlock(orig_waiter
,
195 * The (de)boosting is a step by step approach with a lot of
196 * pitfalls. We want this to be preemptible and we want hold a
197 * maximum of two locks per step. So we have to check
198 * carefully whether things change under us.
201 if (++depth
> max_lock_depth
) {
205 * Print this only once. If the admin changes the limit,
206 * print a new message when reaching the limit again.
208 if (prev_max
!= max_lock_depth
) {
209 prev_max
= max_lock_depth
;
210 printk(KERN_WARNING
"Maximum lock depth %d reached "
211 "task: %s (%d)\n", max_lock_depth
,
212 top_task
->comm
, task_pid_nr(top_task
));
214 put_task_struct(task
);
216 return deadlock_detect
? -EDEADLK
: 0;
220 * Task can not go away as we did a get_task() before !
222 spin_lock_irqsave(&task
->pi_lock
, flags
);
224 waiter
= task
->pi_blocked_on
;
226 * Check whether the end of the boosting chain has been
227 * reached or the state of the chain has changed while we
230 if (!waiter
|| !waiter
->task
)
234 * Check the orig_waiter state. After we dropped the locks,
235 * the previous owner of the lock might have released the lock
236 * and made us the pending owner:
238 if (orig_waiter
&& !orig_waiter
->task
)
242 * Drop out, when the task has no waiters. Note,
243 * top_waiter can be NULL, when we are in the deboosting
246 if (top_waiter
&& (!task_has_pi_waiters(task
) ||
247 top_waiter
!= task_top_pi_waiter(task
)))
251 * When deadlock detection is off then we check, if further
252 * priority adjustment is necessary.
254 if (!detect_deadlock
&& waiter
->list_entry
.prio
== task
->prio
)
258 if (!spin_trylock(&lock
->wait_lock
)) {
259 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
264 /* Deadlock detection */
265 if (lock
== orig_lock
|| rt_mutex_owner(lock
) == top_task
) {
266 debug_rt_mutex_deadlock(deadlock_detect
, orig_waiter
, lock
);
267 spin_unlock(&lock
->wait_lock
);
268 ret
= deadlock_detect
? -EDEADLK
: 0;
272 top_waiter
= rt_mutex_top_waiter(lock
);
274 /* Requeue the waiter */
275 plist_del(&waiter
->list_entry
, &lock
->wait_list
);
276 waiter
->list_entry
.prio
= task
->prio
;
277 plist_add(&waiter
->list_entry
, &lock
->wait_list
);
279 /* Release the task */
280 spin_unlock(&task
->pi_lock
);
281 put_task_struct(task
);
283 /* Grab the next task */
284 task
= rt_mutex_owner(lock
);
285 get_task_struct(task
);
286 spin_lock(&task
->pi_lock
);
288 if (waiter
== rt_mutex_top_waiter(lock
)) {
289 /* Boost the owner */
290 plist_del(&top_waiter
->pi_list_entry
, &task
->pi_waiters
);
291 waiter
->pi_list_entry
.prio
= waiter
->list_entry
.prio
;
292 plist_add(&waiter
->pi_list_entry
, &task
->pi_waiters
);
293 __rt_mutex_adjust_prio(task
);
295 } else if (top_waiter
== waiter
) {
296 /* Deboost the owner */
297 plist_del(&waiter
->pi_list_entry
, &task
->pi_waiters
);
298 waiter
= rt_mutex_top_waiter(lock
);
299 waiter
->pi_list_entry
.prio
= waiter
->list_entry
.prio
;
300 plist_add(&waiter
->pi_list_entry
, &task
->pi_waiters
);
301 __rt_mutex_adjust_prio(task
);
304 spin_unlock(&task
->pi_lock
);
306 top_waiter
= rt_mutex_top_waiter(lock
);
307 spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
309 if (!detect_deadlock
&& waiter
!= top_waiter
)
315 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
317 put_task_struct(task
);
323 * Optimization: check if we can steal the lock from the
324 * assigned pending owner [which might not have taken the
327 static inline int try_to_steal_lock(struct rt_mutex
*lock
,
328 struct task_struct
*task
, int mode
)
330 struct task_struct
*pendowner
= rt_mutex_owner(lock
);
331 struct rt_mutex_waiter
*next
;
333 if (!rt_mutex_owner_pending(lock
))
336 if (pendowner
== task
)
339 spin_lock(&pendowner
->pi_lock
);
340 if (!lock_is_stealable(task
, pendowner
, mode
)) {
341 spin_unlock(&pendowner
->pi_lock
);
346 * Check if a waiter is enqueued on the pending owners
347 * pi_waiters list. Remove it and readjust pending owners
350 if (likely(!rt_mutex_has_waiters(lock
))) {
351 spin_unlock(&pendowner
->pi_lock
);
355 /* No chain handling, pending owner is not blocked on anything: */
356 next
= rt_mutex_top_waiter(lock
);
357 plist_del(&next
->pi_list_entry
, &pendowner
->pi_waiters
);
358 __rt_mutex_adjust_prio(pendowner
);
359 spin_unlock(&pendowner
->pi_lock
);
362 * We are going to steal the lock and a waiter was
363 * enqueued on the pending owners pi_waiters queue. So
364 * we have to enqueue this waiter into
365 * task->pi_waiters list. This covers the case,
366 * where task is boosted because it holds another
367 * lock and gets unboosted because the booster is
368 * interrupted, so we would delay a waiter with higher
369 * priority as task->normal_prio.
371 * Note: in the rare case of a SCHED_OTHER task changing
372 * its priority and thus stealing the lock, next->task
375 if (likely(next
->task
!= task
)) {
376 spin_lock(&task
->pi_lock
);
377 plist_add(&next
->pi_list_entry
, &task
->pi_waiters
);
378 __rt_mutex_adjust_prio(task
);
379 spin_unlock(&task
->pi_lock
);
385 * Try to take an rt-mutex
388 * - when the lock has a real owner
389 * - when a different pending owner exists and has higher priority than current
391 * Must be called with lock->wait_lock held.
393 static int do_try_to_take_rt_mutex(struct rt_mutex
*lock
, int mode
)
396 * We have to be careful here if the atomic speedups are
397 * enabled, such that, when
398 * - no other waiter is on the lock
399 * - the lock has been released since we did the cmpxchg
400 * the lock can be released or taken while we are doing the
401 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
403 * The atomic acquire/release aware variant of
404 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
405 * the WAITERS bit, the atomic release / acquire can not
406 * happen anymore and lock->wait_lock protects us from the
409 * Note, that this might set lock->owner =
410 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
411 * any more. This is fixed up when we take the ownership.
412 * This is the transitional state explained at the top of this file.
414 mark_rt_mutex_waiters(lock
);
416 if (rt_mutex_owner(lock
) && !try_to_steal_lock(lock
, current
, mode
))
419 /* We got the lock. */
420 debug_rt_mutex_lock(lock
);
422 rt_mutex_set_owner(lock
, current
, 0);
424 rt_mutex_deadlock_account_lock(lock
, current
);
429 static inline int try_to_take_rt_mutex(struct rt_mutex
*lock
)
431 return do_try_to_take_rt_mutex(lock
, STEAL_NORMAL
);
435 * Task blocks on lock.
437 * Prepare waiter and propagate pi chain
439 * This must be called with lock->wait_lock held.
441 static int task_blocks_on_rt_mutex(struct rt_mutex
*lock
,
442 struct rt_mutex_waiter
*waiter
,
443 struct task_struct
*task
,
444 int detect_deadlock
, unsigned long flags
)
446 struct task_struct
*owner
= rt_mutex_owner(lock
);
447 struct rt_mutex_waiter
*top_waiter
= waiter
;
448 int chain_walk
= 0, res
;
450 spin_lock(&task
->pi_lock
);
451 __rt_mutex_adjust_prio(task
);
454 plist_node_init(&waiter
->list_entry
, task
->prio
);
455 plist_node_init(&waiter
->pi_list_entry
, task
->prio
);
457 /* Get the top priority waiter on the lock */
458 if (rt_mutex_has_waiters(lock
))
459 top_waiter
= rt_mutex_top_waiter(lock
);
460 plist_add(&waiter
->list_entry
, &lock
->wait_list
);
462 task
->pi_blocked_on
= waiter
;
464 spin_unlock(&task
->pi_lock
);
466 if (waiter
== rt_mutex_top_waiter(lock
)) {
467 spin_lock(&owner
->pi_lock
);
468 plist_del(&top_waiter
->pi_list_entry
, &owner
->pi_waiters
);
469 plist_add(&waiter
->pi_list_entry
, &owner
->pi_waiters
);
471 __rt_mutex_adjust_prio(owner
);
472 if (owner
->pi_blocked_on
)
474 spin_unlock(&owner
->pi_lock
);
476 else if (debug_rt_mutex_detect_deadlock(waiter
, detect_deadlock
))
483 * The owner can't disappear while holding a lock,
484 * so the owner struct is protected by wait_lock.
485 * Gets dropped in rt_mutex_adjust_prio_chain()!
487 get_task_struct(owner
);
489 spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
491 res
= rt_mutex_adjust_prio_chain(owner
, detect_deadlock
, lock
, waiter
,
494 spin_lock_irq(&lock
->wait_lock
);
500 * Wake up the next waiter on the lock.
502 * Remove the top waiter from the current tasks waiter list and from
503 * the lock waiter list. Set it as pending owner. Then wake it up.
505 * Called with lock->wait_lock held.
507 static void wakeup_next_waiter(struct rt_mutex
*lock
, int savestate
)
509 struct rt_mutex_waiter
*waiter
;
510 struct task_struct
*pendowner
;
511 struct rt_mutex_waiter
*next
;
513 spin_lock(¤t
->pi_lock
);
515 waiter
= rt_mutex_top_waiter(lock
);
516 plist_del(&waiter
->list_entry
, &lock
->wait_list
);
519 * Remove it from current->pi_waiters. We do not adjust a
520 * possible priority boost right now. We execute wakeup in the
521 * boosted mode and go back to normal after releasing
524 plist_del(&waiter
->pi_list_entry
, ¤t
->pi_waiters
);
525 pendowner
= waiter
->task
;
529 * Do the wakeup before the ownership change to give any spinning
530 * waiter grantees a headstart over the other threads that will
531 * trigger once owner changes.
534 wake_up_process(pendowner
);
537 * We can skip the actual (expensive) wakeup if the
538 * waiter is already running, but we have to be careful
539 * of race conditions because they may be about to sleep.
541 * The waiter-side protocol has the following pattern:
542 * 1: Set state != RUNNING
543 * 2: Conditionally sleep if waiter->task != NULL;
545 * And the owner-side has the following:
546 * A: Set waiter->task = NULL
547 * B: Conditionally wake if the state != RUNNING
549 * As long as we ensure 1->2 order, and A->B order, we
550 * will never miss a wakeup.
552 * Therefore, this barrier ensures that waiter->task = NULL
553 * is visible before we test the pendowner->state. The
554 * corresponding barrier is in the sleep logic.
558 /* If !RUNNING && !RUNNING_MUTEX */
559 if (pendowner
->state
& ~TASK_RUNNING_MUTEX
)
560 wake_up_process_mutex(pendowner
);
563 rt_mutex_set_owner(lock
, pendowner
, RT_MUTEX_OWNER_PENDING
);
565 spin_unlock(¤t
->pi_lock
);
568 * Clear the pi_blocked_on variable and enqueue a possible
569 * waiter into the pi_waiters list of the pending owner. This
570 * prevents that in case the pending owner gets unboosted a
571 * waiter with higher priority than pending-owner->normal_prio
572 * is blocked on the unboosted (pending) owner.
575 if (rt_mutex_has_waiters(lock
))
576 next
= rt_mutex_top_waiter(lock
);
580 spin_lock(&pendowner
->pi_lock
);
582 WARN_ON(!pendowner
->pi_blocked_on
);
583 WARN_ON(pendowner
->pi_blocked_on
!= waiter
);
584 WARN_ON(pendowner
->pi_blocked_on
->lock
!= lock
);
586 pendowner
->pi_blocked_on
= NULL
;
589 plist_add(&next
->pi_list_entry
, &pendowner
->pi_waiters
);
591 spin_unlock(&pendowner
->pi_lock
);
595 * Remove a waiter from a lock
597 * Must be called with lock->wait_lock held
599 static void remove_waiter(struct rt_mutex
*lock
,
600 struct rt_mutex_waiter
*waiter
,
603 int first
= (waiter
== rt_mutex_top_waiter(lock
));
604 struct task_struct
*owner
= rt_mutex_owner(lock
);
607 spin_lock(¤t
->pi_lock
);
608 plist_del(&waiter
->list_entry
, &lock
->wait_list
);
610 current
->pi_blocked_on
= NULL
;
611 spin_unlock(¤t
->pi_lock
);
613 if (first
&& owner
!= current
) {
615 spin_lock(&owner
->pi_lock
);
617 plist_del(&waiter
->pi_list_entry
, &owner
->pi_waiters
);
619 if (rt_mutex_has_waiters(lock
)) {
620 struct rt_mutex_waiter
*next
;
622 next
= rt_mutex_top_waiter(lock
);
623 plist_add(&next
->pi_list_entry
, &owner
->pi_waiters
);
625 __rt_mutex_adjust_prio(owner
);
627 if (owner
->pi_blocked_on
)
630 spin_unlock(&owner
->pi_lock
);
633 WARN_ON(!plist_node_empty(&waiter
->pi_list_entry
));
638 /* gets dropped in rt_mutex_adjust_prio_chain()! */
639 get_task_struct(owner
);
641 spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
643 rt_mutex_adjust_prio_chain(owner
, 0, lock
, NULL
, current
);
645 spin_lock_irq(&lock
->wait_lock
);
649 * Recheck the pi chain, in case we got a priority setting
651 * Called from sched_setscheduler
653 void rt_mutex_adjust_pi(struct task_struct
*task
)
655 struct rt_mutex_waiter
*waiter
;
658 spin_lock_irqsave(&task
->pi_lock
, flags
);
660 waiter
= task
->pi_blocked_on
;
661 if (!waiter
|| waiter
->list_entry
.prio
== task
->prio
) {
662 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
666 /* gets dropped in rt_mutex_adjust_prio_chain()! */
667 get_task_struct(task
);
668 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
670 rt_mutex_adjust_prio_chain(task
, 0, NULL
, NULL
, task
);
674 * preemptible spin_lock functions:
677 #ifdef CONFIG_PREEMPT_RT
680 rt_spin_lock_fastlock(struct rt_mutex
*lock
,
681 void (*slowfn
)(struct rt_mutex
*lock
))
683 /* Temporary HACK! */
684 if (likely(!current
->in_printk
))
686 else if (in_atomic() || irqs_disabled())
687 /* don't grab locks for printk in atomic */
690 if (likely(rt_mutex_cmpxchg(lock
, NULL
, current
)))
691 rt_mutex_deadlock_account_lock(lock
, current
);
697 rt_spin_lock_fastunlock(struct rt_mutex
*lock
,
698 void (*slowfn
)(struct rt_mutex
*lock
))
700 /* Temporary HACK! */
701 if (unlikely(rt_mutex_owner(lock
) != current
) && current
->in_printk
)
702 /* don't grab locks for printk in atomic */
705 if (likely(rt_mutex_cmpxchg(lock
, current
, NULL
)))
706 rt_mutex_deadlock_account_unlock(current
);
713 static int adaptive_wait(struct rt_mutex_waiter
*waiter
,
714 struct task_struct
*orig_owner
)
718 /* we are the owner? */
722 /* Owner changed? Then lets update the original */
723 if (orig_owner
!= rt_mutex_owner(waiter
->lock
))
726 /* Owner went to bed, so should we */
727 if (!task_is_current(orig_owner
))
734 static int adaptive_wait(struct rt_mutex_waiter
*waiter
,
735 struct task_struct
*orig_owner
)
742 * The state setting needs to preserve the original state and needs to
743 * take care of non rtmutex wakeups.
745 * Called with rtmutex->wait_lock held to serialize against rtmutex
748 static inline unsigned long
749 rt_set_current_blocked_state(unsigned long saved_state
)
751 unsigned long state
, block_state
;
754 * If state is TASK_INTERRUPTIBLE, then we set the state for
755 * blocking to TASK_INTERRUPTIBLE as well, otherwise we would
756 * miss real wakeups via wake_up_interruptible(). If such a
757 * wakeup happens we see the running state and preserve it in
758 * saved_state. Now we can ignore further wakeups as we will
759 * return in state running from our "spin" sleep.
761 if (saved_state
== TASK_INTERRUPTIBLE
)
762 block_state
= TASK_INTERRUPTIBLE
;
764 block_state
= TASK_UNINTERRUPTIBLE
;
766 state
= xchg(¤t
->state
, block_state
);
768 * Take care of non rtmutex wakeups. rtmutex wakeups
769 * or TASK_RUNNING_MUTEX to (UN)INTERRUPTIBLE.
771 if (state
== TASK_RUNNING
)
772 saved_state
= TASK_RUNNING
;
777 static inline void rt_restore_current_state(unsigned long saved_state
)
779 unsigned long state
= xchg(¤t
->state
, saved_state
);
781 if (state
== TASK_RUNNING
)
782 current
->state
= TASK_RUNNING
;
786 * Slow path lock function spin_lock style: this variant is very
787 * careful not to miss any non-lock wakeups.
789 * The wakeup side uses wake_up_process_mutex, which, combined with
790 * the xchg code of this function is a transparent sleep/wakeup
791 * mechanism nested within any existing sleep/wakeup mechanism. This
792 * enables the seemless use of arbitrary (blocking) spinlocks within
793 * sleep/wakeup event loops.
795 static void noinline __sched
796 rt_spin_lock_slowlock(struct rt_mutex
*lock
)
798 struct rt_mutex_waiter waiter
;
799 unsigned long saved_state
, flags
;
800 struct task_struct
*orig_owner
;
802 debug_rt_mutex_init_waiter(&waiter
);
805 spin_lock_irqsave(&lock
->wait_lock
, flags
);
808 BUG_ON(rt_mutex_owner(lock
) == current
);
811 * Here we save whatever state the task was in originally,
812 * we'll restore it at the end of the function and we'll take
813 * any intermediate wakeup into account as well, independently
814 * of the lock sleep/wakeup mechanism. When we get a real
815 * wakeup the task->state is TASK_RUNNING and we change
816 * saved_state accordingly. If we did not get a real wakeup
817 * then we return with the saved state. We need to be careful
818 * about original state TASK_INTERRUPTIBLE as well, as we
819 * could miss a wakeup_interruptible()
821 saved_state
= rt_set_current_blocked_state(current
->state
);
824 unsigned long saved_flags
;
825 int saved_lock_depth
= current
->lock_depth
;
827 /* Try to acquire the lock */
828 if (do_try_to_take_rt_mutex(lock
, STEAL_LATERAL
))
832 * waiter.task is NULL the first time we come here and
833 * when we have been woken up by the previous owner
834 * but the lock got stolen by an higher prio task.
837 task_blocks_on_rt_mutex(lock
, &waiter
, current
, 0,
839 /* Wakeup during boost ? */
840 if (unlikely(!waiter
.task
))
845 * Prevent schedule() to drop BKL, while waiting for
846 * the lock ! We restore lock_depth when we come back.
848 saved_flags
= current
->flags
& PF_NOSCHED
;
849 current
->lock_depth
= -1;
850 current
->flags
&= ~PF_NOSCHED
;
851 orig_owner
= rt_mutex_owner(lock
);
852 get_task_struct(orig_owner
);
853 spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
855 debug_rt_mutex_print_deadlock(&waiter
);
857 if (adaptive_wait(&waiter
, orig_owner
)) {
858 put_task_struct(orig_owner
);
861 schedule_rt_mutex(lock
);
863 put_task_struct(orig_owner
);
865 spin_lock_irqsave(&lock
->wait_lock
, flags
);
866 current
->flags
|= saved_flags
;
867 current
->lock_depth
= saved_lock_depth
;
868 saved_state
= rt_set_current_blocked_state(saved_state
);
871 rt_restore_current_state(saved_state
);
874 * Extremely rare case, if we got woken up by a non-mutex wakeup,
875 * and we managed to steal the lock despite us not being the
876 * highest-prio waiter (due to SCHED_OTHER changing prio), then we
877 * can end up with a non-NULL waiter.task:
879 if (unlikely(waiter
.task
))
880 remove_waiter(lock
, &waiter
, flags
);
882 * try_to_take_rt_mutex() sets the waiter bit
883 * unconditionally. We might have to fix that up:
885 fixup_rt_mutex_waiters(lock
);
887 spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
889 debug_rt_mutex_free_waiter(&waiter
);
893 * Slow path to release a rt_mutex spin_lock style
895 static void noinline __sched
896 rt_spin_lock_slowunlock(struct rt_mutex
*lock
)
900 spin_lock_irqsave(&lock
->wait_lock
, flags
);
902 debug_rt_mutex_unlock(lock
);
904 rt_mutex_deadlock_account_unlock(current
);
906 if (!rt_mutex_has_waiters(lock
)) {
908 spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
912 wakeup_next_waiter(lock
, 1);
914 spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
916 /* Undo pi boosting.when necessary */
917 rt_mutex_adjust_prio(current
);
920 void __lockfunc
rt_spin_lock(spinlock_t
*lock
)
922 rt_spin_lock_fastlock(&lock
->lock
, rt_spin_lock_slowlock
);
923 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
925 EXPORT_SYMBOL(rt_spin_lock
);
927 void __lockfunc
__rt_spin_lock(struct rt_mutex
*lock
)
929 rt_spin_lock_fastlock(lock
, rt_spin_lock_slowlock
);
931 EXPORT_SYMBOL(__rt_spin_lock
);
933 #ifdef CONFIG_DEBUG_LOCK_ALLOC
935 void __lockfunc
rt_spin_lock_nested(spinlock_t
*lock
, int subclass
)
937 rt_spin_lock_fastlock(&lock
->lock
, rt_spin_lock_slowlock
);
938 spin_acquire(&lock
->dep_map
, subclass
, 0, _RET_IP_
);
940 EXPORT_SYMBOL(rt_spin_lock_nested
);
944 void __lockfunc
rt_spin_unlock(spinlock_t
*lock
)
946 /* NOTE: we always pass in '1' for nested, for simplicity */
947 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
948 rt_spin_lock_fastunlock(&lock
->lock
, rt_spin_lock_slowunlock
);
950 EXPORT_SYMBOL(rt_spin_unlock
);
952 void __lockfunc
__rt_spin_unlock(struct rt_mutex
*lock
)
954 rt_spin_lock_fastunlock(lock
, rt_spin_lock_slowunlock
);
956 EXPORT_SYMBOL(__rt_spin_unlock
);
959 * Wait for the lock to get unlocked: instead of polling for an unlock
960 * (like raw spinlocks do), we lock and unlock, to force the kernel to
961 * schedule if there's contention:
963 void __lockfunc
rt_spin_unlock_wait(spinlock_t
*lock
)
968 EXPORT_SYMBOL(rt_spin_unlock_wait
);
970 int __lockfunc
rt_spin_trylock(spinlock_t
*lock
)
972 int ret
= rt_mutex_trylock(&lock
->lock
);
975 spin_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
979 EXPORT_SYMBOL(rt_spin_trylock
);
981 int __lockfunc
rt_spin_trylock_irqsave(spinlock_t
*lock
, unsigned long *flags
)
986 ret
= rt_mutex_trylock(&lock
->lock
);
988 spin_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
992 EXPORT_SYMBOL(rt_spin_trylock_irqsave
);
994 int _atomic_dec_and_spin_lock(spinlock_t
*lock
, atomic_t
*atomic
)
996 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
997 if (atomic_add_unless(atomic
, -1, 1))
1000 if (atomic_dec_and_test(atomic
))
1002 rt_spin_unlock(lock
);
1005 EXPORT_SYMBOL(_atomic_dec_and_spin_lock
);
1008 __rt_spin_lock_init(spinlock_t
*lock
, char *name
, struct lock_class_key
*key
)
1010 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1012 * Make sure we are not reinitializing a held lock:
1014 debug_check_no_locks_freed((void *)lock
, sizeof(*lock
));
1015 lockdep_init_map(&lock
->dep_map
, name
, key
, 0);
1017 __rt_mutex_init(&lock
->lock
, name
);
1019 EXPORT_SYMBOL(__rt_spin_lock_init
);
1023 static inline int rt_release_bkl(struct rt_mutex
*lock
, unsigned long flags
)
1025 int saved_lock_depth
= current
->lock_depth
;
1027 #ifdef CONFIG_LOCK_KERNEL
1028 current
->lock_depth
= -1;
1030 * try_to_take_lock set the waiters, make sure it's
1033 fixup_rt_mutex_waiters(lock
);
1034 spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
1038 spin_lock_irq(&lock
->wait_lock
);
1040 return saved_lock_depth
;
1043 static inline void rt_reacquire_bkl(int saved_lock_depth
)
1045 #ifdef CONFIG_LOCK_KERNEL
1047 current
->lock_depth
= saved_lock_depth
;
1052 * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
1053 * @lock: the rt_mutex to take
1054 * @state: the state the task should block in (TASK_INTERRUPTIBLE
1055 * or TASK_UNINTERRUPTIBLE)
1056 * @timeout: the pre-initialized and started timer, or NULL for none
1057 * @waiter: the pre-initialized rt_mutex_waiter
1058 * @detect_deadlock: passed to task_blocks_on_rt_mutex
1060 * lock->wait_lock must be held by the caller.
1063 __rt_mutex_slowlock(struct rt_mutex
*lock
, int state
,
1064 struct hrtimer_sleeper
*timeout
,
1065 struct rt_mutex_waiter
*waiter
,
1066 int detect_deadlock
, unsigned long flags
)
1071 unsigned long saved_flags
;
1073 /* Try to acquire the lock: */
1074 if (try_to_take_rt_mutex(lock
))
1078 * TASK_INTERRUPTIBLE checks for signals and
1079 * timeout. Ignored otherwise.
1081 if (unlikely(state
== TASK_INTERRUPTIBLE
)) {
1082 /* Signal pending? */
1083 if (signal_pending(current
))
1085 if (timeout
&& !timeout
->task
)
1092 * waiter->task is NULL the first time we come here and
1093 * when we have been woken up by the previous owner
1094 * but the lock got stolen by a higher prio task.
1096 if (!waiter
->task
) {
1097 ret
= task_blocks_on_rt_mutex(lock
, waiter
, current
,
1098 detect_deadlock
, flags
);
1100 * If we got woken up by the owner then start loop
1101 * all over without going into schedule to try
1102 * to get the lock now:
1104 if (unlikely(!waiter
->task
)) {
1106 * Reset the return value. We might
1107 * have returned with -EDEADLK and the
1108 * owner released the lock while we
1109 * were walking the pi chain.
1118 saved_flags
= current
->flags
& PF_NOSCHED
;
1119 current
->flags
&= ~PF_NOSCHED
;
1121 spin_unlock_irq(&lock
->wait_lock
);
1123 debug_rt_mutex_print_deadlock(waiter
);
1126 schedule_rt_mutex(lock
);
1128 spin_lock_irq(&lock
->wait_lock
);
1130 current
->flags
|= saved_flags
;
1131 set_current_state(state
);
1138 * Slow path lock function:
1141 rt_mutex_slowlock(struct rt_mutex
*lock
, int state
,
1142 struct hrtimer_sleeper
*timeout
,
1143 int detect_deadlock
)
1145 int ret
= 0, saved_lock_depth
= -1;
1146 struct rt_mutex_waiter waiter
;
1147 unsigned long flags
;
1149 debug_rt_mutex_init_waiter(&waiter
);
1152 spin_lock_irqsave(&lock
->wait_lock
, flags
);
1155 /* Try to acquire the lock again: */
1156 if (try_to_take_rt_mutex(lock
)) {
1157 spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
1162 * We drop the BKL here before we go into the wait loop to avoid a
1163 * possible deadlock in the scheduler.
1165 if (unlikely(current
->lock_depth
>= 0))
1166 saved_lock_depth
= rt_release_bkl(lock
, flags
);
1168 set_current_state(state
);
1170 /* Setup the timer, when timeout != NULL */
1171 if (unlikely(timeout
)) {
1172 hrtimer_start_expires(&timeout
->timer
, HRTIMER_MODE_ABS
);
1173 if (!hrtimer_active(&timeout
->timer
))
1174 timeout
->task
= NULL
;
1177 ret
= __rt_mutex_slowlock(lock
, state
, timeout
, &waiter
,
1178 detect_deadlock
, flags
);
1180 set_current_state(TASK_RUNNING
);
1182 if (unlikely(waiter
.task
))
1183 remove_waiter(lock
, &waiter
, flags
);
1186 * try_to_take_rt_mutex() sets the waiter bit
1187 * unconditionally. We might have to fix that up.
1189 fixup_rt_mutex_waiters(lock
);
1191 spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
1193 /* Remove pending timer: */
1194 if (unlikely(timeout
))
1195 hrtimer_cancel(&timeout
->timer
);
1198 * Readjust priority, when we did not get the lock. We might
1199 * have been the pending owner and boosted. Since we did not
1200 * take the lock, the PI boost has to go.
1203 rt_mutex_adjust_prio(current
);
1205 /* Must we reaquire the BKL? */
1206 if (unlikely(saved_lock_depth
>= 0))
1207 rt_reacquire_bkl(saved_lock_depth
);
1209 debug_rt_mutex_free_waiter(&waiter
);
1215 * Slow path try-lock function:
1218 rt_mutex_slowtrylock(struct rt_mutex
*lock
)
1220 unsigned long flags
;
1223 spin_lock_irqsave(&lock
->wait_lock
, flags
);
1225 if (likely(rt_mutex_owner(lock
) != current
)) {
1229 ret
= try_to_take_rt_mutex(lock
);
1231 * try_to_take_rt_mutex() sets the lock waiters
1232 * bit unconditionally. Clean this up.
1234 fixup_rt_mutex_waiters(lock
);
1237 spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
1243 * Slow path to release a rt-mutex:
1246 rt_mutex_slowunlock(struct rt_mutex
*lock
)
1248 unsigned long flags
;
1250 spin_lock_irqsave(&lock
->wait_lock
, flags
);
1252 debug_rt_mutex_unlock(lock
);
1254 rt_mutex_deadlock_account_unlock(current
);
1256 if (!rt_mutex_has_waiters(lock
)) {
1258 spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
1262 wakeup_next_waiter(lock
, 0);
1264 spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
1266 /* Undo pi boosting if necessary: */
1267 rt_mutex_adjust_prio(current
);
1271 * debug aware fast / slowpath lock,trylock,unlock
1273 * The atomic acquire/release ops are compiled away, when either the
1274 * architecture does not support cmpxchg or when debugging is enabled.
1277 rt_mutex_fastlock(struct rt_mutex
*lock
, int state
,
1278 int detect_deadlock
,
1279 int (*slowfn
)(struct rt_mutex
*lock
, int state
,
1280 struct hrtimer_sleeper
*timeout
,
1281 int detect_deadlock
))
1283 if (!detect_deadlock
&& likely(rt_mutex_cmpxchg(lock
, NULL
, current
))) {
1284 rt_mutex_deadlock_account_lock(lock
, current
);
1287 return slowfn(lock
, state
, NULL
, detect_deadlock
);
1291 rt_mutex_timed_fastlock(struct rt_mutex
*lock
, int state
,
1292 struct hrtimer_sleeper
*timeout
, int detect_deadlock
,
1293 int (*slowfn
)(struct rt_mutex
*lock
, int state
,
1294 struct hrtimer_sleeper
*timeout
,
1295 int detect_deadlock
))
1297 if (!detect_deadlock
&& likely(rt_mutex_cmpxchg(lock
, NULL
, current
))) {
1298 rt_mutex_deadlock_account_lock(lock
, current
);
1301 return slowfn(lock
, state
, timeout
, detect_deadlock
);
1305 rt_mutex_fasttrylock(struct rt_mutex
*lock
,
1306 int (*slowfn
)(struct rt_mutex
*lock
))
1308 if (likely(rt_mutex_cmpxchg(lock
, NULL
, current
))) {
1309 rt_mutex_deadlock_account_lock(lock
, current
);
1312 return slowfn(lock
);
1316 rt_mutex_fastunlock(struct rt_mutex
*lock
,
1317 void (*slowfn
)(struct rt_mutex
*lock
))
1319 if (likely(rt_mutex_cmpxchg(lock
, current
, NULL
)))
1320 rt_mutex_deadlock_account_unlock(current
);
1326 * rt_mutex_lock_killable - lock a rt_mutex killable
1328 * @lock: the rt_mutex to be locked
1329 * @detect_deadlock: deadlock detection on/off
1333 * -EINTR when interrupted by a signal
1334 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
1336 int __sched
rt_mutex_lock_killable(struct rt_mutex
*lock
,
1337 int detect_deadlock
)
1341 return rt_mutex_fastlock(lock
, TASK_KILLABLE
,
1342 detect_deadlock
, rt_mutex_slowlock
);
1344 EXPORT_SYMBOL_GPL(rt_mutex_lock_killable
);
1347 * rt_mutex_lock - lock a rt_mutex
1349 * @lock: the rt_mutex to be locked
1351 void __sched
rt_mutex_lock(struct rt_mutex
*lock
)
1355 rt_mutex_fastlock(lock
, TASK_UNINTERRUPTIBLE
, 0, rt_mutex_slowlock
);
1357 EXPORT_SYMBOL_GPL(rt_mutex_lock
);
1360 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
1362 * @lock: the rt_mutex to be locked
1363 * @detect_deadlock: deadlock detection on/off
1367 * -EINTR when interrupted by a signal
1368 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
1370 int __sched
rt_mutex_lock_interruptible(struct rt_mutex
*lock
,
1371 int detect_deadlock
)
1375 return rt_mutex_fastlock(lock
, TASK_INTERRUPTIBLE
,
1376 detect_deadlock
, rt_mutex_slowlock
);
1378 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible
);
1381 * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
1382 * the timeout structure is provided
1385 * @lock: the rt_mutex to be locked
1386 * @timeout: timeout structure or NULL (no timeout)
1387 * @detect_deadlock: deadlock detection on/off
1391 * -EINTR when interrupted by a signal
1392 * -ETIMEOUT when the timeout expired
1393 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
1396 rt_mutex_timed_lock(struct rt_mutex
*lock
, struct hrtimer_sleeper
*timeout
,
1397 int detect_deadlock
)
1401 return rt_mutex_timed_fastlock(lock
, TASK_INTERRUPTIBLE
, timeout
,
1402 detect_deadlock
, rt_mutex_slowlock
);
1404 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock
);
1407 * rt_mutex_trylock - try to lock a rt_mutex
1409 * @lock: the rt_mutex to be locked
1411 * Returns 1 on success and 0 on contention
1413 int __sched
rt_mutex_trylock(struct rt_mutex
*lock
)
1415 return rt_mutex_fasttrylock(lock
, rt_mutex_slowtrylock
);
1417 EXPORT_SYMBOL_GPL(rt_mutex_trylock
);
1420 * rt_mutex_unlock - unlock a rt_mutex
1422 * @lock: the rt_mutex to be unlocked
1424 void __sched
rt_mutex_unlock(struct rt_mutex
*lock
)
1426 rt_mutex_fastunlock(lock
, rt_mutex_slowunlock
);
1428 EXPORT_SYMBOL_GPL(rt_mutex_unlock
);
1431 * rt_mutex_destroy - mark a mutex unusable
1432 * @lock: the mutex to be destroyed
1434 * This function marks the mutex uninitialized, and any subsequent
1435 * use of the mutex is forbidden. The mutex must not be locked when
1436 * this function is called.
1438 void rt_mutex_destroy(struct rt_mutex
*lock
)
1440 WARN_ON(rt_mutex_is_locked(lock
));
1441 #ifdef CONFIG_DEBUG_RT_MUTEXES
1446 EXPORT_SYMBOL_GPL(rt_mutex_destroy
);
1449 * __rt_mutex_init - initialize the rt lock
1451 * @lock: the rt lock to be initialized
1453 * Initialize the rt lock to unlocked state.
1455 * Initializing of a locked rt lock is not allowed
1457 void __rt_mutex_init(struct rt_mutex
*lock
, const char *name
)
1460 spin_lock_init(&lock
->wait_lock
);
1461 plist_head_init(&lock
->wait_list
, &lock
->wait_lock
);
1463 debug_rt_mutex_init(lock
, name
);
1465 EXPORT_SYMBOL_GPL(__rt_mutex_init
);
1468 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
1471 * @lock: the rt_mutex to be locked
1472 * @proxy_owner:the task to set as owner
1474 * No locking. Caller has to do serializing itself
1475 * Special API call for PI-futex support
1477 void rt_mutex_init_proxy_locked(struct rt_mutex
*lock
,
1478 struct task_struct
*proxy_owner
)
1480 __rt_mutex_init(lock
, NULL
);
1481 debug_rt_mutex_proxy_lock(lock
, proxy_owner
);
1482 rt_mutex_set_owner(lock
, proxy_owner
, 0);
1483 rt_mutex_deadlock_account_lock(lock
, proxy_owner
);
1487 * rt_mutex_proxy_unlock - release a lock on behalf of owner
1489 * @lock: the rt_mutex to be locked
1491 * No locking. Caller has to do serializing itself
1492 * Special API call for PI-futex support
1494 void rt_mutex_proxy_unlock(struct rt_mutex
*lock
,
1495 struct task_struct
*proxy_owner
)
1497 debug_rt_mutex_proxy_unlock(lock
);
1498 rt_mutex_set_owner(lock
, NULL
, 0);
1499 rt_mutex_deadlock_account_unlock(proxy_owner
);
1503 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1504 * @lock: the rt_mutex to take
1505 * @waiter: the pre-initialized rt_mutex_waiter
1506 * @task: the task to prepare
1507 * @detect_deadlock: perform deadlock detection (1) or not (0)
1510 * 0 - task blocked on lock
1511 * 1 - acquired the lock for task, caller should wake it up
1514 * Special API call for FUTEX_REQUEUE_PI support.
1516 int rt_mutex_start_proxy_lock(struct rt_mutex
*lock
,
1517 struct rt_mutex_waiter
*waiter
,
1518 struct task_struct
*task
, int detect_deadlock
)
1520 unsigned long flags
;
1523 spin_lock_irqsave(&lock
->wait_lock
, flags
);
1525 mark_rt_mutex_waiters(lock
);
1527 if (!rt_mutex_owner(lock
) ||
1528 try_to_steal_lock(lock
, task
, STEAL_NORMAL
)) {
1529 /* We got the lock for task. */
1530 debug_rt_mutex_lock(lock
);
1532 rt_mutex_set_owner(lock
, task
, 0);
1534 rt_mutex_deadlock_account_lock(lock
, task
);
1538 ret
= task_blocks_on_rt_mutex(lock
, waiter
, task
, detect_deadlock
,
1542 if (ret
&& !waiter
->task
) {
1544 * Reset the return value. We might have
1545 * returned with -EDEADLK and the owner
1546 * released the lock while we were walking the
1547 * pi chain. Let the waiter sort it out.
1551 spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
1553 debug_rt_mutex_print_deadlock(waiter
);
1559 * rt_mutex_next_owner - return the next owner of the lock
1561 * @lock: the rt lock query
1563 * Returns the next owner of the lock or NULL
1565 * Caller has to serialize against other accessors to the lock
1568 * Special API call for PI-futex support
1570 struct task_struct
*rt_mutex_next_owner(struct rt_mutex
*lock
)
1572 if (!rt_mutex_has_waiters(lock
))
1575 return rt_mutex_top_waiter(lock
)->task
;
1579 * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1580 * @lock: the rt_mutex we were woken on
1581 * @to: the timeout, null if none. hrtimer should already have
1583 * @waiter: the pre-initialized rt_mutex_waiter
1584 * @detect_deadlock: perform deadlock detection (1) or not (0)
1586 * Complete the lock acquisition started our behalf by another thread.
1590 * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
1592 * Special API call for PI-futex requeue support
1594 int rt_mutex_finish_proxy_lock(struct rt_mutex
*lock
,
1595 struct hrtimer_sleeper
*to
,
1596 struct rt_mutex_waiter
*waiter
,
1597 int detect_deadlock
)
1599 unsigned long flags
;
1602 spin_lock_irqsave(&lock
->wait_lock
, flags
);
1604 set_current_state(TASK_INTERRUPTIBLE
);
1606 ret
= __rt_mutex_slowlock(lock
, TASK_INTERRUPTIBLE
, to
, waiter
,
1607 detect_deadlock
, flags
);
1609 set_current_state(TASK_RUNNING
);
1611 if (unlikely(waiter
->task
))
1612 remove_waiter(lock
, waiter
, flags
);
1615 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1616 * have to fix that up.
1618 fixup_rt_mutex_waiters(lock
);
1620 spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
1623 * Readjust priority, when we did not get the lock. We might have been
1624 * the pending owner and boosted. Since we did not take the lock, the
1625 * PI boost has to go.
1628 rt_mutex_adjust_prio(current
);