2 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
4 * started by Ingo Molnar and Thomas Gleixner.
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9 * Copyright (C) 2006 Esben Nielsen
11 * See Documentation/rt-mutex-design.txt for details.
13 #include <linux/spinlock.h>
14 #include <linux/export.h>
15 #include <linux/sched.h>
16 #include <linux/sched/rt.h>
17 #include <linux/sched/deadline.h>
18 #include <linux/timer.h>
20 #include "rtmutex_common.h"
23 * lock->owner state tracking:
25 * lock->owner holds the task_struct pointer of the owner. Bit 0
26 * is used to keep track of the "lock has waiters" state.
29 * NULL 0 lock is free (fast acquire possible)
30 * NULL 1 lock is free and has waiters and the top waiter
31 * is going to take the lock*
32 * taskpointer 0 lock is held (fast release possible)
33 * taskpointer 1 lock is held and has waiters**
35 * The fast atomic compare exchange based acquire and release is only
36 * possible when bit 0 of lock->owner is 0.
38 * (*) It also can be a transitional state when grabbing the lock
39 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
40 * we need to set the bit0 before looking at the lock, and the owner may be
41 * NULL in this small time, hence this can be a transitional state.
43 * (**) There is a small time when bit 0 is set but there are no
44 * waiters. This can happen when grabbing the lock in the slow path.
45 * To prevent a cmpxchg of the owner releasing the lock, we need to
46 * set this bit before looking at the lock.
50 rt_mutex_set_owner(struct rt_mutex
*lock
, struct task_struct
*owner
)
52 unsigned long val
= (unsigned long)owner
;
54 if (rt_mutex_has_waiters(lock
))
55 val
|= RT_MUTEX_HAS_WAITERS
;
57 lock
->owner
= (struct task_struct
*)val
;
60 static inline void clear_rt_mutex_waiters(struct rt_mutex
*lock
)
62 lock
->owner
= (struct task_struct
*)
63 ((unsigned long)lock
->owner
& ~RT_MUTEX_HAS_WAITERS
);
66 static void fixup_rt_mutex_waiters(struct rt_mutex
*lock
)
68 if (!rt_mutex_has_waiters(lock
))
69 clear_rt_mutex_waiters(lock
);
73 * We can speed up the acquire/release, if the architecture
74 * supports cmpxchg and if there's no debugging state to be set up
76 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
77 # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
78 static inline void mark_rt_mutex_waiters(struct rt_mutex
*lock
)
80 unsigned long owner
, *p
= (unsigned long *) &lock
->owner
;
84 } while (cmpxchg(p
, owner
, owner
| RT_MUTEX_HAS_WAITERS
) != owner
);
87 # define rt_mutex_cmpxchg(l,c,n) (0)
88 static inline void mark_rt_mutex_waiters(struct rt_mutex
*lock
)
90 lock
->owner
= (struct task_struct
*)
91 ((unsigned long)lock
->owner
| RT_MUTEX_HAS_WAITERS
);
96 rt_mutex_waiter_less(struct rt_mutex_waiter
*left
,
97 struct rt_mutex_waiter
*right
)
99 if (left
->prio
< right
->prio
)
103 * If both waiters have dl_prio(), we check the deadlines of the
105 * If left waiter has a dl_prio(), and we didn't return 1 above,
106 * then right waiter has a dl_prio() too.
108 if (dl_prio(left
->prio
))
109 return (left
->task
->dl
.deadline
< right
->task
->dl
.deadline
);
115 rt_mutex_enqueue(struct rt_mutex
*lock
, struct rt_mutex_waiter
*waiter
)
117 struct rb_node
**link
= &lock
->waiters
.rb_node
;
118 struct rb_node
*parent
= NULL
;
119 struct rt_mutex_waiter
*entry
;
124 entry
= rb_entry(parent
, struct rt_mutex_waiter
, tree_entry
);
125 if (rt_mutex_waiter_less(waiter
, entry
)) {
126 link
= &parent
->rb_left
;
128 link
= &parent
->rb_right
;
134 lock
->waiters_leftmost
= &waiter
->tree_entry
;
136 rb_link_node(&waiter
->tree_entry
, parent
, link
);
137 rb_insert_color(&waiter
->tree_entry
, &lock
->waiters
);
141 rt_mutex_dequeue(struct rt_mutex
*lock
, struct rt_mutex_waiter
*waiter
)
143 if (RB_EMPTY_NODE(&waiter
->tree_entry
))
146 if (lock
->waiters_leftmost
== &waiter
->tree_entry
)
147 lock
->waiters_leftmost
= rb_next(&waiter
->tree_entry
);
149 rb_erase(&waiter
->tree_entry
, &lock
->waiters
);
150 RB_CLEAR_NODE(&waiter
->tree_entry
);
154 rt_mutex_enqueue_pi(struct task_struct
*task
, struct rt_mutex_waiter
*waiter
)
156 struct rb_node
**link
= &task
->pi_waiters
.rb_node
;
157 struct rb_node
*parent
= NULL
;
158 struct rt_mutex_waiter
*entry
;
163 entry
= rb_entry(parent
, struct rt_mutex_waiter
, pi_tree_entry
);
164 if (rt_mutex_waiter_less(waiter
, entry
)) {
165 link
= &parent
->rb_left
;
167 link
= &parent
->rb_right
;
173 task
->pi_waiters_leftmost
= &waiter
->pi_tree_entry
;
175 rb_link_node(&waiter
->pi_tree_entry
, parent
, link
);
176 rb_insert_color(&waiter
->pi_tree_entry
, &task
->pi_waiters
);
180 rt_mutex_dequeue_pi(struct task_struct
*task
, struct rt_mutex_waiter
*waiter
)
182 if (RB_EMPTY_NODE(&waiter
->pi_tree_entry
))
185 if (task
->pi_waiters_leftmost
== &waiter
->pi_tree_entry
)
186 task
->pi_waiters_leftmost
= rb_next(&waiter
->pi_tree_entry
);
188 rb_erase(&waiter
->pi_tree_entry
, &task
->pi_waiters
);
189 RB_CLEAR_NODE(&waiter
->pi_tree_entry
);
193 * Calculate task priority from the waiter tree priority
195 * Return task->normal_prio when the waiter tree is empty or when
196 * the waiter is not allowed to do priority boosting
198 int rt_mutex_getprio(struct task_struct
*task
)
200 if (likely(!task_has_pi_waiters(task
)))
201 return task
->normal_prio
;
203 return min(task_top_pi_waiter(task
)->prio
,
207 struct task_struct
*rt_mutex_get_top_task(struct task_struct
*task
)
209 if (likely(!task_has_pi_waiters(task
)))
212 return task_top_pi_waiter(task
)->task
;
216 * Adjust the priority of a task, after its pi_waiters got modified.
218 * This can be both boosting and unboosting. task->pi_lock must be held.
220 static void __rt_mutex_adjust_prio(struct task_struct
*task
)
222 int prio
= rt_mutex_getprio(task
);
224 if (task
->prio
!= prio
|| dl_prio(prio
))
225 rt_mutex_setprio(task
, prio
);
229 * Adjust task priority (undo boosting). Called from the exit path of
230 * rt_mutex_slowunlock() and rt_mutex_slowlock().
232 * (Note: We do this outside of the protection of lock->wait_lock to
233 * allow the lock to be taken while or before we readjust the priority
234 * of task. We do not use the spin_xx_mutex() variants here as we are
235 * outside of the debug path.)
237 static void rt_mutex_adjust_prio(struct task_struct
*task
)
241 raw_spin_lock_irqsave(&task
->pi_lock
, flags
);
242 __rt_mutex_adjust_prio(task
);
243 raw_spin_unlock_irqrestore(&task
->pi_lock
, flags
);
247 * Max number of times we'll walk the boosting chain:
249 int max_lock_depth
= 1024;
252 * Adjust the priority chain. Also used for deadlock detection.
253 * Decreases task's usage by one - may thus free the task.
255 * @task: the task owning the mutex (owner) for which a chain walk is probably
257 * @deadlock_detect: do we have to carry out deadlock detection?
258 * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
259 * things for a task that has just got its priority adjusted, and
260 * is waiting on a mutex)
261 * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
262 * its priority to the mutex owner (can be NULL in the case
263 * depicted above or if the top waiter is gone away and we are
264 * actually deboosting the owner)
265 * @top_task: the current top waiter
267 * Returns 0 or -EDEADLK.
269 static int rt_mutex_adjust_prio_chain(struct task_struct
*task
,
271 struct rt_mutex
*orig_lock
,
272 struct rt_mutex_waiter
*orig_waiter
,
273 struct task_struct
*top_task
)
275 struct rt_mutex
*lock
;
276 struct rt_mutex_waiter
*waiter
, *top_waiter
= orig_waiter
;
277 int detect_deadlock
, ret
= 0, depth
= 0;
280 detect_deadlock
= debug_rt_mutex_detect_deadlock(orig_waiter
,
284 * The (de)boosting is a step by step approach with a lot of
285 * pitfalls. We want this to be preemptible and we want hold a
286 * maximum of two locks per step. So we have to check
287 * carefully whether things change under us.
290 if (++depth
> max_lock_depth
) {
294 * Print this only once. If the admin changes the limit,
295 * print a new message when reaching the limit again.
297 if (prev_max
!= max_lock_depth
) {
298 prev_max
= max_lock_depth
;
299 printk(KERN_WARNING
"Maximum lock depth %d reached "
300 "task: %s (%d)\n", max_lock_depth
,
301 top_task
->comm
, task_pid_nr(top_task
));
303 put_task_struct(task
);
305 return deadlock_detect
? -EDEADLK
: 0;
309 * Task can not go away as we did a get_task() before !
311 raw_spin_lock_irqsave(&task
->pi_lock
, flags
);
313 waiter
= task
->pi_blocked_on
;
315 * Check whether the end of the boosting chain has been
316 * reached or the state of the chain has changed while we
323 * Check the orig_waiter state. After we dropped the locks,
324 * the previous owner of the lock might have released the lock.
326 if (orig_waiter
&& !rt_mutex_owner(orig_lock
))
330 * Drop out, when the task has no waiters. Note,
331 * top_waiter can be NULL, when we are in the deboosting
334 if (top_waiter
&& (!task_has_pi_waiters(task
) ||
335 top_waiter
!= task_top_pi_waiter(task
)))
339 * When deadlock detection is off then we check, if further
340 * priority adjustment is necessary.
342 if (!detect_deadlock
&& waiter
->prio
== task
->prio
)
346 if (!raw_spin_trylock(&lock
->wait_lock
)) {
347 raw_spin_unlock_irqrestore(&task
->pi_lock
, flags
);
352 /* Deadlock detection */
353 if (lock
== orig_lock
|| rt_mutex_owner(lock
) == top_task
) {
354 debug_rt_mutex_deadlock(deadlock_detect
, orig_waiter
, lock
);
355 raw_spin_unlock(&lock
->wait_lock
);
356 ret
= deadlock_detect
? -EDEADLK
: 0;
360 top_waiter
= rt_mutex_top_waiter(lock
);
362 /* Requeue the waiter */
363 rt_mutex_dequeue(lock
, waiter
);
364 waiter
->prio
= task
->prio
;
365 rt_mutex_enqueue(lock
, waiter
);
367 /* Release the task */
368 raw_spin_unlock_irqrestore(&task
->pi_lock
, flags
);
369 if (!rt_mutex_owner(lock
)) {
371 * If the requeue above changed the top waiter, then we need
372 * to wake the new top waiter up to try to get the lock.
375 if (top_waiter
!= rt_mutex_top_waiter(lock
))
376 wake_up_process(rt_mutex_top_waiter(lock
)->task
);
377 raw_spin_unlock(&lock
->wait_lock
);
380 put_task_struct(task
);
382 /* Grab the next task */
383 task
= rt_mutex_owner(lock
);
384 get_task_struct(task
);
385 raw_spin_lock_irqsave(&task
->pi_lock
, flags
);
387 if (waiter
== rt_mutex_top_waiter(lock
)) {
388 /* Boost the owner */
389 rt_mutex_dequeue_pi(task
, top_waiter
);
390 rt_mutex_enqueue_pi(task
, waiter
);
391 __rt_mutex_adjust_prio(task
);
393 } else if (top_waiter
== waiter
) {
394 /* Deboost the owner */
395 rt_mutex_dequeue_pi(task
, waiter
);
396 waiter
= rt_mutex_top_waiter(lock
);
397 rt_mutex_enqueue_pi(task
, waiter
);
398 __rt_mutex_adjust_prio(task
);
401 raw_spin_unlock_irqrestore(&task
->pi_lock
, flags
);
403 top_waiter
= rt_mutex_top_waiter(lock
);
404 raw_spin_unlock(&lock
->wait_lock
);
406 if (!detect_deadlock
&& waiter
!= top_waiter
)
412 raw_spin_unlock_irqrestore(&task
->pi_lock
, flags
);
414 put_task_struct(task
);
420 * Try to take an rt-mutex
422 * Must be called with lock->wait_lock held.
424 * @lock: the lock to be acquired.
425 * @task: the task which wants to acquire the lock
426 * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
428 static int try_to_take_rt_mutex(struct rt_mutex
*lock
, struct task_struct
*task
,
429 struct rt_mutex_waiter
*waiter
)
432 * We have to be careful here if the atomic speedups are
433 * enabled, such that, when
434 * - no other waiter is on the lock
435 * - the lock has been released since we did the cmpxchg
436 * the lock can be released or taken while we are doing the
437 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
439 * The atomic acquire/release aware variant of
440 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
441 * the WAITERS bit, the atomic release / acquire can not
442 * happen anymore and lock->wait_lock protects us from the
445 * Note, that this might set lock->owner =
446 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
447 * any more. This is fixed up when we take the ownership.
448 * This is the transitional state explained at the top of this file.
450 mark_rt_mutex_waiters(lock
);
452 if (rt_mutex_owner(lock
))
456 * It will get the lock because of one of these conditions:
457 * 1) there is no waiter
458 * 2) higher priority than waiters
459 * 3) it is top waiter
461 if (rt_mutex_has_waiters(lock
)) {
462 if (task
->prio
>= rt_mutex_top_waiter(lock
)->prio
) {
463 if (!waiter
|| waiter
!= rt_mutex_top_waiter(lock
))
468 if (waiter
|| rt_mutex_has_waiters(lock
)) {
470 struct rt_mutex_waiter
*top
;
472 raw_spin_lock_irqsave(&task
->pi_lock
, flags
);
474 /* remove the queued waiter. */
476 rt_mutex_dequeue(lock
, waiter
);
477 task
->pi_blocked_on
= NULL
;
481 * We have to enqueue the top waiter(if it exists) into
482 * task->pi_waiters list.
484 if (rt_mutex_has_waiters(lock
)) {
485 top
= rt_mutex_top_waiter(lock
);
486 rt_mutex_enqueue_pi(task
, top
);
488 raw_spin_unlock_irqrestore(&task
->pi_lock
, flags
);
491 /* We got the lock. */
492 debug_rt_mutex_lock(lock
);
494 rt_mutex_set_owner(lock
, task
);
496 rt_mutex_deadlock_account_lock(lock
, task
);
502 * Task blocks on lock.
504 * Prepare waiter and propagate pi chain
506 * This must be called with lock->wait_lock held.
508 static int task_blocks_on_rt_mutex(struct rt_mutex
*lock
,
509 struct rt_mutex_waiter
*waiter
,
510 struct task_struct
*task
,
513 struct task_struct
*owner
= rt_mutex_owner(lock
);
514 struct rt_mutex_waiter
*top_waiter
= waiter
;
516 int chain_walk
= 0, res
;
518 raw_spin_lock_irqsave(&task
->pi_lock
, flags
);
519 __rt_mutex_adjust_prio(task
);
522 waiter
->prio
= task
->prio
;
524 /* Get the top priority waiter on the lock */
525 if (rt_mutex_has_waiters(lock
))
526 top_waiter
= rt_mutex_top_waiter(lock
);
527 rt_mutex_enqueue(lock
, waiter
);
529 task
->pi_blocked_on
= waiter
;
531 raw_spin_unlock_irqrestore(&task
->pi_lock
, flags
);
536 if (waiter
== rt_mutex_top_waiter(lock
)) {
537 raw_spin_lock_irqsave(&owner
->pi_lock
, flags
);
538 rt_mutex_dequeue_pi(owner
, top_waiter
);
539 rt_mutex_enqueue_pi(owner
, waiter
);
541 __rt_mutex_adjust_prio(owner
);
542 if (owner
->pi_blocked_on
)
544 raw_spin_unlock_irqrestore(&owner
->pi_lock
, flags
);
546 else if (debug_rt_mutex_detect_deadlock(waiter
, detect_deadlock
))
553 * The owner can't disappear while holding a lock,
554 * so the owner struct is protected by wait_lock.
555 * Gets dropped in rt_mutex_adjust_prio_chain()!
557 get_task_struct(owner
);
559 raw_spin_unlock(&lock
->wait_lock
);
561 res
= rt_mutex_adjust_prio_chain(owner
, detect_deadlock
, lock
, waiter
,
564 raw_spin_lock(&lock
->wait_lock
);
570 * Wake up the next waiter on the lock.
572 * Remove the top waiter from the current tasks waiter list and wake it up.
574 * Called with lock->wait_lock held.
576 static void wakeup_next_waiter(struct rt_mutex
*lock
)
578 struct rt_mutex_waiter
*waiter
;
581 raw_spin_lock_irqsave(¤t
->pi_lock
, flags
);
583 waiter
= rt_mutex_top_waiter(lock
);
586 * Remove it from current->pi_waiters. We do not adjust a
587 * possible priority boost right now. We execute wakeup in the
588 * boosted mode and go back to normal after releasing
591 rt_mutex_dequeue_pi(current
, waiter
);
593 rt_mutex_set_owner(lock
, NULL
);
595 raw_spin_unlock_irqrestore(¤t
->pi_lock
, flags
);
597 wake_up_process(waiter
->task
);
601 * Remove a waiter from a lock and give up
603 * Must be called with lock->wait_lock held and
604 * have just failed to try_to_take_rt_mutex().
606 static void remove_waiter(struct rt_mutex
*lock
,
607 struct rt_mutex_waiter
*waiter
)
609 int first
= (waiter
== rt_mutex_top_waiter(lock
));
610 struct task_struct
*owner
= rt_mutex_owner(lock
);
614 raw_spin_lock_irqsave(¤t
->pi_lock
, flags
);
615 rt_mutex_dequeue(lock
, waiter
);
616 current
->pi_blocked_on
= NULL
;
617 raw_spin_unlock_irqrestore(¤t
->pi_lock
, flags
);
624 raw_spin_lock_irqsave(&owner
->pi_lock
, flags
);
626 rt_mutex_dequeue_pi(owner
, waiter
);
628 if (rt_mutex_has_waiters(lock
)) {
629 struct rt_mutex_waiter
*next
;
631 next
= rt_mutex_top_waiter(lock
);
632 rt_mutex_enqueue_pi(owner
, next
);
634 __rt_mutex_adjust_prio(owner
);
636 if (owner
->pi_blocked_on
)
639 raw_spin_unlock_irqrestore(&owner
->pi_lock
, flags
);
645 /* gets dropped in rt_mutex_adjust_prio_chain()! */
646 get_task_struct(owner
);
648 raw_spin_unlock(&lock
->wait_lock
);
650 rt_mutex_adjust_prio_chain(owner
, 0, lock
, NULL
, current
);
652 raw_spin_lock(&lock
->wait_lock
);
656 * Recheck the pi chain, in case we got a priority setting
658 * Called from sched_setscheduler
660 void rt_mutex_adjust_pi(struct task_struct
*task
)
662 struct rt_mutex_waiter
*waiter
;
665 raw_spin_lock_irqsave(&task
->pi_lock
, flags
);
667 waiter
= task
->pi_blocked_on
;
668 if (!waiter
|| (waiter
->prio
== task
->prio
&&
669 !dl_prio(task
->prio
))) {
670 raw_spin_unlock_irqrestore(&task
->pi_lock
, flags
);
674 raw_spin_unlock_irqrestore(&task
->pi_lock
, flags
);
676 /* gets dropped in rt_mutex_adjust_prio_chain()! */
677 get_task_struct(task
);
678 rt_mutex_adjust_prio_chain(task
, 0, NULL
, NULL
, task
);
682 * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
683 * @lock: the rt_mutex to take
684 * @state: the state the task should block in (TASK_INTERRUPTIBLE
685 * or TASK_UNINTERRUPTIBLE)
686 * @timeout: the pre-initialized and started timer, or NULL for none
687 * @waiter: the pre-initialized rt_mutex_waiter
689 * lock->wait_lock must be held by the caller.
692 __rt_mutex_slowlock(struct rt_mutex
*lock
, int state
,
693 struct hrtimer_sleeper
*timeout
,
694 struct rt_mutex_waiter
*waiter
)
699 /* Try to acquire the lock: */
700 if (try_to_take_rt_mutex(lock
, current
, waiter
))
704 * TASK_INTERRUPTIBLE checks for signals and
705 * timeout. Ignored otherwise.
707 if (unlikely(state
== TASK_INTERRUPTIBLE
)) {
708 /* Signal pending? */
709 if (signal_pending(current
))
711 if (timeout
&& !timeout
->task
)
717 raw_spin_unlock(&lock
->wait_lock
);
719 debug_rt_mutex_print_deadlock(waiter
);
721 schedule_rt_mutex(lock
);
723 raw_spin_lock(&lock
->wait_lock
);
724 set_current_state(state
);
731 * Slow path lock function:
734 rt_mutex_slowlock(struct rt_mutex
*lock
, int state
,
735 struct hrtimer_sleeper
*timeout
,
738 struct rt_mutex_waiter waiter
;
741 debug_rt_mutex_init_waiter(&waiter
);
742 RB_CLEAR_NODE(&waiter
.pi_tree_entry
);
743 RB_CLEAR_NODE(&waiter
.tree_entry
);
745 raw_spin_lock(&lock
->wait_lock
);
747 /* Try to acquire the lock again: */
748 if (try_to_take_rt_mutex(lock
, current
, NULL
)) {
749 raw_spin_unlock(&lock
->wait_lock
);
753 set_current_state(state
);
755 /* Setup the timer, when timeout != NULL */
756 if (unlikely(timeout
)) {
757 hrtimer_start_expires(&timeout
->timer
, HRTIMER_MODE_ABS
);
758 if (!hrtimer_active(&timeout
->timer
))
759 timeout
->task
= NULL
;
762 ret
= task_blocks_on_rt_mutex(lock
, &waiter
, current
, detect_deadlock
);
765 ret
= __rt_mutex_slowlock(lock
, state
, timeout
, &waiter
);
767 set_current_state(TASK_RUNNING
);
770 remove_waiter(lock
, &waiter
);
773 * try_to_take_rt_mutex() sets the waiter bit
774 * unconditionally. We might have to fix that up.
776 fixup_rt_mutex_waiters(lock
);
778 raw_spin_unlock(&lock
->wait_lock
);
780 /* Remove pending timer: */
781 if (unlikely(timeout
))
782 hrtimer_cancel(&timeout
->timer
);
784 debug_rt_mutex_free_waiter(&waiter
);
790 * Slow path try-lock function:
793 rt_mutex_slowtrylock(struct rt_mutex
*lock
)
797 raw_spin_lock(&lock
->wait_lock
);
799 if (likely(rt_mutex_owner(lock
) != current
)) {
801 ret
= try_to_take_rt_mutex(lock
, current
, NULL
);
803 * try_to_take_rt_mutex() sets the lock waiters
804 * bit unconditionally. Clean this up.
806 fixup_rt_mutex_waiters(lock
);
809 raw_spin_unlock(&lock
->wait_lock
);
815 * Slow path to release a rt-mutex:
818 rt_mutex_slowunlock(struct rt_mutex
*lock
)
820 raw_spin_lock(&lock
->wait_lock
);
822 debug_rt_mutex_unlock(lock
);
824 rt_mutex_deadlock_account_unlock(current
);
826 if (!rt_mutex_has_waiters(lock
)) {
828 raw_spin_unlock(&lock
->wait_lock
);
832 wakeup_next_waiter(lock
);
834 raw_spin_unlock(&lock
->wait_lock
);
836 /* Undo pi boosting if necessary: */
837 rt_mutex_adjust_prio(current
);
841 * debug aware fast / slowpath lock,trylock,unlock
843 * The atomic acquire/release ops are compiled away, when either the
844 * architecture does not support cmpxchg or when debugging is enabled.
847 rt_mutex_fastlock(struct rt_mutex
*lock
, int state
,
849 int (*slowfn
)(struct rt_mutex
*lock
, int state
,
850 struct hrtimer_sleeper
*timeout
,
851 int detect_deadlock
))
853 if (!detect_deadlock
&& likely(rt_mutex_cmpxchg(lock
, NULL
, current
))) {
854 rt_mutex_deadlock_account_lock(lock
, current
);
857 return slowfn(lock
, state
, NULL
, detect_deadlock
);
861 rt_mutex_timed_fastlock(struct rt_mutex
*lock
, int state
,
862 struct hrtimer_sleeper
*timeout
, int detect_deadlock
,
863 int (*slowfn
)(struct rt_mutex
*lock
, int state
,
864 struct hrtimer_sleeper
*timeout
,
865 int detect_deadlock
))
867 if (!detect_deadlock
&& likely(rt_mutex_cmpxchg(lock
, NULL
, current
))) {
868 rt_mutex_deadlock_account_lock(lock
, current
);
871 return slowfn(lock
, state
, timeout
, detect_deadlock
);
875 rt_mutex_fasttrylock(struct rt_mutex
*lock
,
876 int (*slowfn
)(struct rt_mutex
*lock
))
878 if (likely(rt_mutex_cmpxchg(lock
, NULL
, current
))) {
879 rt_mutex_deadlock_account_lock(lock
, current
);
886 rt_mutex_fastunlock(struct rt_mutex
*lock
,
887 void (*slowfn
)(struct rt_mutex
*lock
))
889 if (likely(rt_mutex_cmpxchg(lock
, current
, NULL
)))
890 rt_mutex_deadlock_account_unlock(current
);
896 * rt_mutex_lock - lock a rt_mutex
898 * @lock: the rt_mutex to be locked
900 void __sched
rt_mutex_lock(struct rt_mutex
*lock
)
904 rt_mutex_fastlock(lock
, TASK_UNINTERRUPTIBLE
, 0, rt_mutex_slowlock
);
906 EXPORT_SYMBOL_GPL(rt_mutex_lock
);
909 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
911 * @lock: the rt_mutex to be locked
912 * @detect_deadlock: deadlock detection on/off
916 * -EINTR when interrupted by a signal
917 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
919 int __sched
rt_mutex_lock_interruptible(struct rt_mutex
*lock
,
924 return rt_mutex_fastlock(lock
, TASK_INTERRUPTIBLE
,
925 detect_deadlock
, rt_mutex_slowlock
);
927 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible
);
930 * rt_mutex_timed_lock - lock a rt_mutex interruptible
931 * the timeout structure is provided
934 * @lock: the rt_mutex to be locked
935 * @timeout: timeout structure or NULL (no timeout)
936 * @detect_deadlock: deadlock detection on/off
940 * -EINTR when interrupted by a signal
941 * -ETIMEDOUT when the timeout expired
942 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
945 rt_mutex_timed_lock(struct rt_mutex
*lock
, struct hrtimer_sleeper
*timeout
,
950 return rt_mutex_timed_fastlock(lock
, TASK_INTERRUPTIBLE
, timeout
,
951 detect_deadlock
, rt_mutex_slowlock
);
953 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock
);
956 * rt_mutex_trylock - try to lock a rt_mutex
958 * @lock: the rt_mutex to be locked
960 * Returns 1 on success and 0 on contention
962 int __sched
rt_mutex_trylock(struct rt_mutex
*lock
)
964 return rt_mutex_fasttrylock(lock
, rt_mutex_slowtrylock
);
966 EXPORT_SYMBOL_GPL(rt_mutex_trylock
);
969 * rt_mutex_unlock - unlock a rt_mutex
971 * @lock: the rt_mutex to be unlocked
973 void __sched
rt_mutex_unlock(struct rt_mutex
*lock
)
975 rt_mutex_fastunlock(lock
, rt_mutex_slowunlock
);
977 EXPORT_SYMBOL_GPL(rt_mutex_unlock
);
980 * rt_mutex_destroy - mark a mutex unusable
981 * @lock: the mutex to be destroyed
983 * This function marks the mutex uninitialized, and any subsequent
984 * use of the mutex is forbidden. The mutex must not be locked when
985 * this function is called.
987 void rt_mutex_destroy(struct rt_mutex
*lock
)
989 WARN_ON(rt_mutex_is_locked(lock
));
990 #ifdef CONFIG_DEBUG_RT_MUTEXES
995 EXPORT_SYMBOL_GPL(rt_mutex_destroy
);
998 * __rt_mutex_init - initialize the rt lock
1000 * @lock: the rt lock to be initialized
1002 * Initialize the rt lock to unlocked state.
1004 * Initializing of a locked rt lock is not allowed
1006 void __rt_mutex_init(struct rt_mutex
*lock
, const char *name
)
1009 raw_spin_lock_init(&lock
->wait_lock
);
1010 lock
->waiters
= RB_ROOT
;
1011 lock
->waiters_leftmost
= NULL
;
1013 debug_rt_mutex_init(lock
, name
);
1015 EXPORT_SYMBOL_GPL(__rt_mutex_init
);
1018 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
1021 * @lock: the rt_mutex to be locked
1022 * @proxy_owner:the task to set as owner
1024 * No locking. Caller has to do serializing itself
1025 * Special API call for PI-futex support
1027 void rt_mutex_init_proxy_locked(struct rt_mutex
*lock
,
1028 struct task_struct
*proxy_owner
)
1030 __rt_mutex_init(lock
, NULL
);
1031 debug_rt_mutex_proxy_lock(lock
, proxy_owner
);
1032 rt_mutex_set_owner(lock
, proxy_owner
);
1033 rt_mutex_deadlock_account_lock(lock
, proxy_owner
);
1037 * rt_mutex_proxy_unlock - release a lock on behalf of owner
1039 * @lock: the rt_mutex to be locked
1041 * No locking. Caller has to do serializing itself
1042 * Special API call for PI-futex support
1044 void rt_mutex_proxy_unlock(struct rt_mutex
*lock
,
1045 struct task_struct
*proxy_owner
)
1047 debug_rt_mutex_proxy_unlock(lock
);
1048 rt_mutex_set_owner(lock
, NULL
);
1049 rt_mutex_deadlock_account_unlock(proxy_owner
);
1053 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1054 * @lock: the rt_mutex to take
1055 * @waiter: the pre-initialized rt_mutex_waiter
1056 * @task: the task to prepare
1057 * @detect_deadlock: perform deadlock detection (1) or not (0)
1060 * 0 - task blocked on lock
1061 * 1 - acquired the lock for task, caller should wake it up
1064 * Special API call for FUTEX_REQUEUE_PI support.
1066 int rt_mutex_start_proxy_lock(struct rt_mutex
*lock
,
1067 struct rt_mutex_waiter
*waiter
,
1068 struct task_struct
*task
, int detect_deadlock
)
1072 raw_spin_lock(&lock
->wait_lock
);
1074 if (try_to_take_rt_mutex(lock
, task
, NULL
)) {
1075 raw_spin_unlock(&lock
->wait_lock
);
1079 ret
= task_blocks_on_rt_mutex(lock
, waiter
, task
, detect_deadlock
);
1081 if (ret
&& !rt_mutex_owner(lock
)) {
1083 * Reset the return value. We might have
1084 * returned with -EDEADLK and the owner
1085 * released the lock while we were walking the
1086 * pi chain. Let the waiter sort it out.
1092 remove_waiter(lock
, waiter
);
1094 raw_spin_unlock(&lock
->wait_lock
);
1096 debug_rt_mutex_print_deadlock(waiter
);
1102 * rt_mutex_next_owner - return the next owner of the lock
1104 * @lock: the rt lock query
1106 * Returns the next owner of the lock or NULL
1108 * Caller has to serialize against other accessors to the lock
1111 * Special API call for PI-futex support
1113 struct task_struct
*rt_mutex_next_owner(struct rt_mutex
*lock
)
1115 if (!rt_mutex_has_waiters(lock
))
1118 return rt_mutex_top_waiter(lock
)->task
;
1122 * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1123 * @lock: the rt_mutex we were woken on
1124 * @to: the timeout, null if none. hrtimer should already have
1126 * @waiter: the pre-initialized rt_mutex_waiter
1127 * @detect_deadlock: perform deadlock detection (1) or not (0)
1129 * Complete the lock acquisition started our behalf by another thread.
1133 * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
1135 * Special API call for PI-futex requeue support
1137 int rt_mutex_finish_proxy_lock(struct rt_mutex
*lock
,
1138 struct hrtimer_sleeper
*to
,
1139 struct rt_mutex_waiter
*waiter
,
1140 int detect_deadlock
)
1144 raw_spin_lock(&lock
->wait_lock
);
1146 set_current_state(TASK_INTERRUPTIBLE
);
1148 ret
= __rt_mutex_slowlock(lock
, TASK_INTERRUPTIBLE
, to
, waiter
);
1150 set_current_state(TASK_RUNNING
);
1153 remove_waiter(lock
, waiter
);
1156 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1157 * have to fix that up.
1159 fixup_rt_mutex_waiters(lock
);
1161 raw_spin_unlock(&lock
->wait_lock
);