1 // SPDX-License-Identifier: GPL-2.0-only
3 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
5 * started by Ingo Molnar and Thomas Gleixner.
7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
9 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
10 * Copyright (C) 2006 Esben Nielsen
12 * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
14 * Adaptive Spinlocks simplification:
15 * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
17 * See Documentation/locking/rt-mutex-design.rst for details.
19 #include <linux/sched.h>
20 #include <linux/sched/debug.h>
21 #include <linux/sched/deadline.h>
22 #include <linux/sched/signal.h>
23 #include <linux/sched/rt.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/ww_mutex.h>
27 #include <trace/events/lock.h>
29 #include "rtmutex_common.h"
32 # define build_ww_mutex() (false)
33 # define ww_container_of(rtm) NULL
35 static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter
*waiter
,
36 struct rt_mutex
*lock
,
37 struct ww_acquire_ctx
*ww_ctx
,
38 struct wake_q_head
*wake_q
)
43 static inline void __ww_mutex_check_waiters(struct rt_mutex
*lock
,
44 struct ww_acquire_ctx
*ww_ctx
,
45 struct wake_q_head
*wake_q
)
49 static inline void ww_mutex_lock_acquired(struct ww_mutex
*lock
,
50 struct ww_acquire_ctx
*ww_ctx
)
54 static inline int __ww_mutex_check_kill(struct rt_mutex
*lock
,
55 struct rt_mutex_waiter
*waiter
,
56 struct ww_acquire_ctx
*ww_ctx
)
62 # define build_ww_mutex() (true)
63 # define ww_container_of(rtm) container_of(rtm, struct ww_mutex, base)
64 # include "ww_mutex.h"
68 * lock->owner state tracking:
70 * lock->owner holds the task_struct pointer of the owner. Bit 0
71 * is used to keep track of the "lock has waiters" state.
74 * NULL 0 lock is free (fast acquire possible)
75 * NULL 1 lock is free and has waiters and the top waiter
76 * is going to take the lock*
77 * taskpointer 0 lock is held (fast release possible)
78 * taskpointer 1 lock is held and has waiters**
80 * The fast atomic compare exchange based acquire and release is only
81 * possible when bit 0 of lock->owner is 0.
83 * (*) It also can be a transitional state when grabbing the lock
84 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
85 * we need to set the bit0 before looking at the lock, and the owner may be
86 * NULL in this small time, hence this can be a transitional state.
88 * (**) There is a small time when bit 0 is set but there are no
89 * waiters. This can happen when grabbing the lock in the slow path.
90 * To prevent a cmpxchg of the owner releasing the lock, we need to
91 * set this bit before looking at the lock.
94 static __always_inline
struct task_struct
*
95 rt_mutex_owner_encode(struct rt_mutex_base
*lock
, struct task_struct
*owner
)
97 unsigned long val
= (unsigned long)owner
;
99 if (rt_mutex_has_waiters(lock
))
100 val
|= RT_MUTEX_HAS_WAITERS
;
102 return (struct task_struct
*)val
;
105 static __always_inline
void
106 rt_mutex_set_owner(struct rt_mutex_base
*lock
, struct task_struct
*owner
)
109 * lock->wait_lock is held but explicit acquire semantics are needed
110 * for a new lock owner so WRITE_ONCE is insufficient.
112 xchg_acquire(&lock
->owner
, rt_mutex_owner_encode(lock
, owner
));
115 static __always_inline
void rt_mutex_clear_owner(struct rt_mutex_base
*lock
)
117 /* lock->wait_lock is held so the unlock provides release semantics. */
118 WRITE_ONCE(lock
->owner
, rt_mutex_owner_encode(lock
, NULL
));
121 static __always_inline
void clear_rt_mutex_waiters(struct rt_mutex_base
*lock
)
123 lock
->owner
= (struct task_struct
*)
124 ((unsigned long)lock
->owner
& ~RT_MUTEX_HAS_WAITERS
);
127 static __always_inline
void
128 fixup_rt_mutex_waiters(struct rt_mutex_base
*lock
, bool acquire_lock
)
130 unsigned long owner
, *p
= (unsigned long *) &lock
->owner
;
132 if (rt_mutex_has_waiters(lock
))
136 * The rbtree has no waiters enqueued, now make sure that the
137 * lock->owner still has the waiters bit set, otherwise the
138 * following can happen:
144 * l->owner = T1 | HAS_WAITERS;
152 * l->owner = T1 | HAS_WAITERS;
157 * signal(->T2) signal(->T3)
164 * ==> wait list is empty
168 * fixup_rt_mutex_waiters()
169 * if (wait_list_empty(l) {
171 * owner = l->owner & ~HAS_WAITERS;
175 * rt_mutex_unlock(l) fixup_rt_mutex_waiters()
176 * if (wait_list_empty(l) {
177 * owner = l->owner & ~HAS_WAITERS;
178 * cmpxchg(l->owner, T1, NULL)
179 * ===> Success (l->owner = NULL)
185 * With the check for the waiter bit in place T3 on CPU2 will not
186 * overwrite. All tasks fiddling with the waiters bit are
187 * serialized by l->lock, so nothing else can modify the waiters
188 * bit. If the bit is set then nothing can change l->owner either
189 * so the simple RMW is safe. The cmpxchg() will simply fail if it
190 * happens in the middle of the RMW because the waiters bit is
193 owner
= READ_ONCE(*p
);
194 if (owner
& RT_MUTEX_HAS_WAITERS
) {
196 * See rt_mutex_set_owner() and rt_mutex_clear_owner() on
197 * why xchg_acquire() is used for updating owner for
198 * locking and WRITE_ONCE() for unlocking.
200 * WRITE_ONCE() would work for the acquire case too, but
201 * in case that the lock acquisition failed it might
202 * force other lockers into the slow path unnecessarily.
205 xchg_acquire(p
, owner
& ~RT_MUTEX_HAS_WAITERS
);
207 WRITE_ONCE(*p
, owner
& ~RT_MUTEX_HAS_WAITERS
);
212 * We can speed up the acquire/release, if there's no debugging state to be
215 #ifndef CONFIG_DEBUG_RT_MUTEXES
216 static __always_inline
bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base
*lock
,
217 struct task_struct
*old
,
218 struct task_struct
*new)
220 return try_cmpxchg_acquire(&lock
->owner
, &old
, new);
223 static __always_inline
bool rt_mutex_try_acquire(struct rt_mutex_base
*lock
)
225 return rt_mutex_cmpxchg_acquire(lock
, NULL
, current
);
228 static __always_inline
bool rt_mutex_cmpxchg_release(struct rt_mutex_base
*lock
,
229 struct task_struct
*old
,
230 struct task_struct
*new)
232 return try_cmpxchg_release(&lock
->owner
, &old
, new);
236 * Callers must hold the ->wait_lock -- which is the whole purpose as we force
237 * all future threads that attempt to [Rmw] the lock to the slowpath. As such
238 * relaxed semantics suffice.
240 static __always_inline
void mark_rt_mutex_waiters(struct rt_mutex_base
*lock
)
242 unsigned long *p
= (unsigned long *) &lock
->owner
;
243 unsigned long owner
, new;
245 owner
= READ_ONCE(*p
);
247 new = owner
| RT_MUTEX_HAS_WAITERS
;
248 } while (!try_cmpxchg_relaxed(p
, &owner
, new));
251 * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
252 * operations in the event of contention. Ensure the successful
253 * cmpxchg is visible.
255 smp_mb__after_atomic();
259 * Safe fastpath aware unlock:
260 * 1) Clear the waiters bit
261 * 2) Drop lock->wait_lock
262 * 3) Try to unlock the lock with cmpxchg
264 static __always_inline
bool unlock_rt_mutex_safe(struct rt_mutex_base
*lock
,
266 __releases(lock
->wait_lock
)
268 struct task_struct
*owner
= rt_mutex_owner(lock
);
270 clear_rt_mutex_waiters(lock
);
271 raw_spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
273 * If a new waiter comes in between the unlock and the cmpxchg
274 * we have two situations:
278 * cmpxchg(p, owner, 0) == owner
279 * mark_rt_mutex_waiters(lock);
285 * mark_rt_mutex_waiters(lock);
287 * cmpxchg(p, owner, 0) != owner
296 return rt_mutex_cmpxchg_release(lock
, owner
, NULL
);
300 static __always_inline
bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base
*lock
,
301 struct task_struct
*old
,
302 struct task_struct
*new)
308 static int __sched
rt_mutex_slowtrylock(struct rt_mutex_base
*lock
);
310 static __always_inline
bool rt_mutex_try_acquire(struct rt_mutex_base
*lock
)
313 * With debug enabled rt_mutex_cmpxchg trylock() will always fail.
315 * Avoid unconditionally taking the slow path by using
316 * rt_mutex_slow_trylock() which is covered by the debug code and can
317 * acquire a non-contended rtmutex.
319 return rt_mutex_slowtrylock(lock
);
322 static __always_inline
bool rt_mutex_cmpxchg_release(struct rt_mutex_base
*lock
,
323 struct task_struct
*old
,
324 struct task_struct
*new)
329 static __always_inline
void mark_rt_mutex_waiters(struct rt_mutex_base
*lock
)
331 lock
->owner
= (struct task_struct
*)
332 ((unsigned long)lock
->owner
| RT_MUTEX_HAS_WAITERS
);
336 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
338 static __always_inline
bool unlock_rt_mutex_safe(struct rt_mutex_base
*lock
,
340 __releases(lock
->wait_lock
)
343 raw_spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
348 static __always_inline
int __waiter_prio(struct task_struct
*task
)
350 int prio
= task
->prio
;
352 if (!rt_or_dl_prio(prio
))
359 * Update the waiter->tree copy of the sort keys.
361 static __always_inline
void
362 waiter_update_prio(struct rt_mutex_waiter
*waiter
, struct task_struct
*task
)
364 lockdep_assert_held(&waiter
->lock
->wait_lock
);
365 lockdep_assert(RB_EMPTY_NODE(&waiter
->tree
.entry
));
367 waiter
->tree
.prio
= __waiter_prio(task
);
368 waiter
->tree
.deadline
= task
->dl
.deadline
;
372 * Update the waiter->pi_tree copy of the sort keys (from the tree copy).
374 static __always_inline
void
375 waiter_clone_prio(struct rt_mutex_waiter
*waiter
, struct task_struct
*task
)
377 lockdep_assert_held(&waiter
->lock
->wait_lock
);
378 lockdep_assert_held(&task
->pi_lock
);
379 lockdep_assert(RB_EMPTY_NODE(&waiter
->pi_tree
.entry
));
381 waiter
->pi_tree
.prio
= waiter
->tree
.prio
;
382 waiter
->pi_tree
.deadline
= waiter
->tree
.deadline
;
386 * Only use with rt_waiter_node_{less,equal}()
388 #define task_to_waiter_node(p) \
389 &(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
390 #define task_to_waiter(p) \
391 &(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) }
393 static __always_inline
int rt_waiter_node_less(struct rt_waiter_node
*left
,
394 struct rt_waiter_node
*right
)
396 if (left
->prio
< right
->prio
)
400 * If both waiters have dl_prio(), we check the deadlines of the
402 * If left waiter has a dl_prio(), and we didn't return 1 above,
403 * then right waiter has a dl_prio() too.
405 if (dl_prio(left
->prio
))
406 return dl_time_before(left
->deadline
, right
->deadline
);
411 static __always_inline
int rt_waiter_node_equal(struct rt_waiter_node
*left
,
412 struct rt_waiter_node
*right
)
414 if (left
->prio
!= right
->prio
)
418 * If both waiters have dl_prio(), we check the deadlines of the
420 * If left waiter has a dl_prio(), and we didn't return 0 above,
421 * then right waiter has a dl_prio() too.
423 if (dl_prio(left
->prio
))
424 return left
->deadline
== right
->deadline
;
429 static inline bool rt_mutex_steal(struct rt_mutex_waiter
*waiter
,
430 struct rt_mutex_waiter
*top_waiter
)
432 if (rt_waiter_node_less(&waiter
->tree
, &top_waiter
->tree
))
435 #ifdef RT_MUTEX_BUILD_SPINLOCKS
437 * Note that RT tasks are excluded from same priority (lateral)
438 * steals to prevent the introduction of an unbounded latency.
440 if (rt_or_dl_prio(waiter
->tree
.prio
))
443 return rt_waiter_node_equal(&waiter
->tree
, &top_waiter
->tree
);
449 #define __node_2_waiter(node) \
450 rb_entry((node), struct rt_mutex_waiter, tree.entry)
452 static __always_inline
bool __waiter_less(struct rb_node
*a
, const struct rb_node
*b
)
454 struct rt_mutex_waiter
*aw
= __node_2_waiter(a
);
455 struct rt_mutex_waiter
*bw
= __node_2_waiter(b
);
457 if (rt_waiter_node_less(&aw
->tree
, &bw
->tree
))
460 if (!build_ww_mutex())
463 if (rt_waiter_node_less(&bw
->tree
, &aw
->tree
))
466 /* NOTE: relies on waiter->ww_ctx being set before insertion */
471 return (signed long)(aw
->ww_ctx
->stamp
-
472 bw
->ww_ctx
->stamp
) < 0;
478 static __always_inline
void
479 rt_mutex_enqueue(struct rt_mutex_base
*lock
, struct rt_mutex_waiter
*waiter
)
481 lockdep_assert_held(&lock
->wait_lock
);
483 rb_add_cached(&waiter
->tree
.entry
, &lock
->waiters
, __waiter_less
);
486 static __always_inline
void
487 rt_mutex_dequeue(struct rt_mutex_base
*lock
, struct rt_mutex_waiter
*waiter
)
489 lockdep_assert_held(&lock
->wait_lock
);
491 if (RB_EMPTY_NODE(&waiter
->tree
.entry
))
494 rb_erase_cached(&waiter
->tree
.entry
, &lock
->waiters
);
495 RB_CLEAR_NODE(&waiter
->tree
.entry
);
498 #define __node_2_rt_node(node) \
499 rb_entry((node), struct rt_waiter_node, entry)
501 static __always_inline
bool __pi_waiter_less(struct rb_node
*a
, const struct rb_node
*b
)
503 return rt_waiter_node_less(__node_2_rt_node(a
), __node_2_rt_node(b
));
506 static __always_inline
void
507 rt_mutex_enqueue_pi(struct task_struct
*task
, struct rt_mutex_waiter
*waiter
)
509 lockdep_assert_held(&task
->pi_lock
);
511 rb_add_cached(&waiter
->pi_tree
.entry
, &task
->pi_waiters
, __pi_waiter_less
);
514 static __always_inline
void
515 rt_mutex_dequeue_pi(struct task_struct
*task
, struct rt_mutex_waiter
*waiter
)
517 lockdep_assert_held(&task
->pi_lock
);
519 if (RB_EMPTY_NODE(&waiter
->pi_tree
.entry
))
522 rb_erase_cached(&waiter
->pi_tree
.entry
, &task
->pi_waiters
);
523 RB_CLEAR_NODE(&waiter
->pi_tree
.entry
);
526 static __always_inline
void rt_mutex_adjust_prio(struct rt_mutex_base
*lock
,
527 struct task_struct
*p
)
529 struct task_struct
*pi_task
= NULL
;
531 lockdep_assert_held(&lock
->wait_lock
);
532 lockdep_assert(rt_mutex_owner(lock
) == p
);
533 lockdep_assert_held(&p
->pi_lock
);
535 if (task_has_pi_waiters(p
))
536 pi_task
= task_top_pi_waiter(p
)->task
;
538 rt_mutex_setprio(p
, pi_task
);
541 /* RT mutex specific wake_q wrappers */
542 static __always_inline
void rt_mutex_wake_q_add_task(struct rt_wake_q_head
*wqh
,
543 struct task_struct
*task
,
544 unsigned int wake_state
)
546 if (IS_ENABLED(CONFIG_PREEMPT_RT
) && wake_state
== TASK_RTLOCK_WAIT
) {
547 if (IS_ENABLED(CONFIG_PROVE_LOCKING
))
548 WARN_ON_ONCE(wqh
->rtlock_task
);
549 get_task_struct(task
);
550 wqh
->rtlock_task
= task
;
552 wake_q_add(&wqh
->head
, task
);
556 static __always_inline
void rt_mutex_wake_q_add(struct rt_wake_q_head
*wqh
,
557 struct rt_mutex_waiter
*w
)
559 rt_mutex_wake_q_add_task(wqh
, w
->task
, w
->wake_state
);
562 static __always_inline
void rt_mutex_wake_up_q(struct rt_wake_q_head
*wqh
)
564 if (IS_ENABLED(CONFIG_PREEMPT_RT
) && wqh
->rtlock_task
) {
565 wake_up_state(wqh
->rtlock_task
, TASK_RTLOCK_WAIT
);
566 put_task_struct(wqh
->rtlock_task
);
567 wqh
->rtlock_task
= NULL
;
570 if (!wake_q_empty(&wqh
->head
))
571 wake_up_q(&wqh
->head
);
573 /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */
578 * Deadlock detection is conditional:
580 * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
581 * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
583 * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
584 * conducted independent of the detect argument.
586 * If the waiter argument is NULL this indicates the deboost path and
587 * deadlock detection is disabled independent of the detect argument
588 * and the config settings.
590 static __always_inline
bool
591 rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter
*waiter
,
592 enum rtmutex_chainwalk chwalk
)
594 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES
))
595 return waiter
!= NULL
;
596 return chwalk
== RT_MUTEX_FULL_CHAINWALK
;
599 static __always_inline
struct rt_mutex_base
*task_blocked_on_lock(struct task_struct
*p
)
601 return p
->pi_blocked_on
? p
->pi_blocked_on
->lock
: NULL
;
605 * Adjust the priority chain. Also used for deadlock detection.
606 * Decreases task's usage by one - may thus free the task.
608 * @task: the task owning the mutex (owner) for which a chain walk is
610 * @chwalk: do we have to carry out deadlock detection?
611 * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
612 * things for a task that has just got its priority adjusted, and
613 * is waiting on a mutex)
614 * @next_lock: the mutex on which the owner of @orig_lock was blocked before
615 * we dropped its pi_lock. Is never dereferenced, only used for
616 * comparison to detect lock chain changes.
617 * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
618 * its priority to the mutex owner (can be NULL in the case
619 * depicted above or if the top waiter is gone away and we are
620 * actually deboosting the owner)
621 * @top_task: the current top waiter
623 * Returns 0 or -EDEADLK.
625 * Chain walk basics and protection scope
627 * [R] refcount on task
628 * [Pn] task->pi_lock held
629 * [L] rtmutex->wait_lock held
631 * Normal locking order:
636 * Step Description Protected by
637 * function arguments:
639 * @orig_lock if != NULL @top_task is blocked on it
640 * @next_lock Unprotected. Cannot be
641 * dereferenced. Only used for
643 * @orig_waiter if != NULL @top_task is blocked on it
644 * @top_task current, or in case of proxy
645 * locking protected by calling
648 * loop_sanity_check();
650 * [1] lock(task->pi_lock); [R] acquire [P1]
651 * [2] waiter = task->pi_blocked_on; [P1]
652 * [3] check_exit_conditions_1(); [P1]
653 * [4] lock = waiter->lock; [P1]
654 * [5] if (!try_lock(lock->wait_lock)) { [P1] try to acquire [L]
655 * unlock(task->pi_lock); release [P1]
658 * [6] check_exit_conditions_2(); [P1] + [L]
659 * [7] requeue_lock_waiter(lock, waiter); [P1] + [L]
660 * [8] unlock(task->pi_lock); release [P1]
661 * put_task_struct(task); release [R]
662 * [9] check_exit_conditions_3(); [L]
663 * [10] task = owner(lock); [L]
664 * get_task_struct(task); [L] acquire [R]
665 * lock(task->pi_lock); [L] acquire [P2]
666 * [11] requeue_pi_waiter(tsk, waiters(lock));[P2] + [L]
667 * [12] check_exit_conditions_4(); [P2] + [L]
668 * [13] unlock(task->pi_lock); release [P2]
669 * unlock(lock->wait_lock); release [L]
672 * Where P1 is the blocking task and P2 is the lock owner; going up one step
673 * the owner becomes the next blocked task etc..
677 static int __sched
rt_mutex_adjust_prio_chain(struct task_struct
*task
,
678 enum rtmutex_chainwalk chwalk
,
679 struct rt_mutex_base
*orig_lock
,
680 struct rt_mutex_base
*next_lock
,
681 struct rt_mutex_waiter
*orig_waiter
,
682 struct task_struct
*top_task
)
684 struct rt_mutex_waiter
*waiter
, *top_waiter
= orig_waiter
;
685 struct rt_mutex_waiter
*prerequeue_top_waiter
;
686 int ret
= 0, depth
= 0;
687 struct rt_mutex_base
*lock
;
688 bool detect_deadlock
;
691 detect_deadlock
= rt_mutex_cond_detect_deadlock(orig_waiter
, chwalk
);
694 * The (de)boosting is a step by step approach with a lot of
695 * pitfalls. We want this to be preemptible and we want hold a
696 * maximum of two locks per step. So we have to check
697 * carefully whether things change under us.
701 * We limit the lock chain length for each invocation.
703 if (++depth
> max_lock_depth
) {
707 * Print this only once. If the admin changes the limit,
708 * print a new message when reaching the limit again.
710 if (prev_max
!= max_lock_depth
) {
711 prev_max
= max_lock_depth
;
712 printk(KERN_WARNING
"Maximum lock depth %d reached "
713 "task: %s (%d)\n", max_lock_depth
,
714 top_task
->comm
, task_pid_nr(top_task
));
716 put_task_struct(task
);
722 * We are fully preemptible here and only hold the refcount on
723 * @task. So everything can have changed under us since the
724 * caller or our own code below (goto retry/again) dropped all
729 * [1] Task cannot go away as we did a get_task() before !
731 raw_spin_lock_irq(&task
->pi_lock
);
734 * [2] Get the waiter on which @task is blocked on.
736 waiter
= task
->pi_blocked_on
;
739 * [3] check_exit_conditions_1() protected by task->pi_lock.
743 * Check whether the end of the boosting chain has been
744 * reached or the state of the chain has changed while we
751 * Check the orig_waiter state. After we dropped the locks,
752 * the previous owner of the lock might have released the lock.
754 if (orig_waiter
&& !rt_mutex_owner(orig_lock
))
758 * We dropped all locks after taking a refcount on @task, so
759 * the task might have moved on in the lock chain or even left
760 * the chain completely and blocks now on an unrelated lock or
763 * We stored the lock on which @task was blocked in @next_lock,
764 * so we can detect the chain change.
766 if (next_lock
!= waiter
->lock
)
770 * There could be 'spurious' loops in the lock graph due to ww_mutex,
777 * P3 should not return -EDEADLK because it gets trapped in the cycle
778 * created by P1 and P2 (which will resolve -- and runs into
779 * max_lock_depth above). Therefore disable detect_deadlock such that
780 * the below termination condition can trigger once all relevant tasks
783 * Even when we start with ww_mutex we can disable deadlock detection,
784 * since we would supress a ww_mutex induced deadlock at [6] anyway.
785 * Supressing it here however is not sufficient since we might still
786 * hit [6] due to adjustment driven iteration.
788 * NOTE: if someone were to create a deadlock between 2 ww_classes we'd
789 * utterly fail to report it; lockdep should.
791 if (IS_ENABLED(CONFIG_PREEMPT_RT
) && waiter
->ww_ctx
&& detect_deadlock
)
792 detect_deadlock
= false;
795 * Drop out, when the task has no waiters. Note,
796 * top_waiter can be NULL, when we are in the deboosting
800 if (!task_has_pi_waiters(task
))
803 * If deadlock detection is off, we stop here if we
804 * are not the top pi waiter of the task. If deadlock
805 * detection is enabled we continue, but stop the
806 * requeueing in the chain walk.
808 if (top_waiter
!= task_top_pi_waiter(task
)) {
809 if (!detect_deadlock
)
817 * If the waiter priority is the same as the task priority
818 * then there is no further priority adjustment necessary. If
819 * deadlock detection is off, we stop the chain walk. If its
820 * enabled we continue, but stop the requeueing in the chain
823 if (rt_waiter_node_equal(&waiter
->tree
, task_to_waiter_node(task
))) {
824 if (!detect_deadlock
)
831 * [4] Get the next lock; per holding task->pi_lock we can't unblock
832 * and guarantee @lock's existence.
836 * [5] We need to trylock here as we are holding task->pi_lock,
837 * which is the reverse lock order versus the other rtmutex
840 * Per the above, holding task->pi_lock guarantees lock exists, so
841 * inverting this lock order is infeasible from a life-time
844 if (!raw_spin_trylock(&lock
->wait_lock
)) {
845 raw_spin_unlock_irq(&task
->pi_lock
);
851 * [6] check_exit_conditions_2() protected by task->pi_lock and
854 * Deadlock detection. If the lock is the same as the original
855 * lock which caused us to walk the lock chain or if the
856 * current lock is owned by the task which initiated the chain
857 * walk, we detected a deadlock.
859 if (lock
== orig_lock
|| rt_mutex_owner(lock
) == top_task
) {
863 * When the deadlock is due to ww_mutex; also see above. Don't
864 * report the deadlock and instead let the ww_mutex wound/die
865 * logic pick which of the contending threads gets -EDEADLK.
867 * NOTE: assumes the cycle only contains a single ww_class; any
868 * other configuration and we fail to report; also, see
871 if (IS_ENABLED(CONFIG_PREEMPT_RT
) && orig_waiter
&& orig_waiter
->ww_ctx
)
874 raw_spin_unlock(&lock
->wait_lock
);
879 * If we just follow the lock chain for deadlock detection, no
880 * need to do all the requeue operations. To avoid a truckload
881 * of conditionals around the various places below, just do the
882 * minimum chain walk checks.
886 * No requeue[7] here. Just release @task [8]
888 raw_spin_unlock(&task
->pi_lock
);
889 put_task_struct(task
);
892 * [9] check_exit_conditions_3 protected by lock->wait_lock.
893 * If there is no owner of the lock, end of chain.
895 if (!rt_mutex_owner(lock
)) {
896 raw_spin_unlock_irq(&lock
->wait_lock
);
900 /* [10] Grab the next task, i.e. owner of @lock */
901 task
= get_task_struct(rt_mutex_owner(lock
));
902 raw_spin_lock(&task
->pi_lock
);
905 * No requeue [11] here. We just do deadlock detection.
907 * [12] Store whether owner is blocked
908 * itself. Decision is made after dropping the locks
910 next_lock
= task_blocked_on_lock(task
);
912 * Get the top waiter for the next iteration
914 top_waiter
= rt_mutex_top_waiter(lock
);
916 /* [13] Drop locks */
917 raw_spin_unlock(&task
->pi_lock
);
918 raw_spin_unlock_irq(&lock
->wait_lock
);
920 /* If owner is not blocked, end of chain. */
927 * Store the current top waiter before doing the requeue
928 * operation on @lock. We need it for the boost/deboost
931 prerequeue_top_waiter
= rt_mutex_top_waiter(lock
);
933 /* [7] Requeue the waiter in the lock waiter tree. */
934 rt_mutex_dequeue(lock
, waiter
);
937 * Update the waiter prio fields now that we're dequeued.
939 * These values can have changed through either:
941 * sys_sched_set_scheduler() / sys_sched_setattr()
945 * DL CBS enforcement advancing the effective deadline.
947 waiter_update_prio(waiter
, task
);
949 rt_mutex_enqueue(lock
, waiter
);
952 * [8] Release the (blocking) task in preparation for
953 * taking the owner task in [10].
955 * Since we hold lock->waiter_lock, task cannot unblock, even if we
956 * release task->pi_lock.
958 raw_spin_unlock(&task
->pi_lock
);
959 put_task_struct(task
);
962 * [9] check_exit_conditions_3 protected by lock->wait_lock.
964 * We must abort the chain walk if there is no lock owner even
965 * in the dead lock detection case, as we have nothing to
966 * follow here. This is the end of the chain we are walking.
968 if (!rt_mutex_owner(lock
)) {
970 * If the requeue [7] above changed the top waiter,
971 * then we need to wake the new top waiter up to try
974 top_waiter
= rt_mutex_top_waiter(lock
);
975 if (prerequeue_top_waiter
!= top_waiter
)
976 wake_up_state(top_waiter
->task
, top_waiter
->wake_state
);
977 raw_spin_unlock_irq(&lock
->wait_lock
);
982 * [10] Grab the next task, i.e. the owner of @lock
984 * Per holding lock->wait_lock and checking for !owner above, there
985 * must be an owner and it cannot go away.
987 task
= get_task_struct(rt_mutex_owner(lock
));
988 raw_spin_lock(&task
->pi_lock
);
990 /* [11] requeue the pi waiters if necessary */
991 if (waiter
== rt_mutex_top_waiter(lock
)) {
993 * The waiter became the new top (highest priority)
994 * waiter on the lock. Replace the previous top waiter
995 * in the owner tasks pi waiters tree with this waiter
996 * and adjust the priority of the owner.
998 rt_mutex_dequeue_pi(task
, prerequeue_top_waiter
);
999 waiter_clone_prio(waiter
, task
);
1000 rt_mutex_enqueue_pi(task
, waiter
);
1001 rt_mutex_adjust_prio(lock
, task
);
1003 } else if (prerequeue_top_waiter
== waiter
) {
1005 * The waiter was the top waiter on the lock, but is
1006 * no longer the top priority waiter. Replace waiter in
1007 * the owner tasks pi waiters tree with the new top
1008 * (highest priority) waiter and adjust the priority
1010 * The new top waiter is stored in @waiter so that
1011 * @waiter == @top_waiter evaluates to true below and
1012 * we continue to deboost the rest of the chain.
1014 rt_mutex_dequeue_pi(task
, waiter
);
1015 waiter
= rt_mutex_top_waiter(lock
);
1016 waiter_clone_prio(waiter
, task
);
1017 rt_mutex_enqueue_pi(task
, waiter
);
1018 rt_mutex_adjust_prio(lock
, task
);
1021 * Nothing changed. No need to do any priority
1027 * [12] check_exit_conditions_4() protected by task->pi_lock
1028 * and lock->wait_lock. The actual decisions are made after we
1029 * dropped the locks.
1031 * Check whether the task which owns the current lock is pi
1032 * blocked itself. If yes we store a pointer to the lock for
1033 * the lock chain change detection above. After we dropped
1034 * task->pi_lock next_lock cannot be dereferenced anymore.
1036 next_lock
= task_blocked_on_lock(task
);
1038 * Store the top waiter of @lock for the end of chain walk
1041 top_waiter
= rt_mutex_top_waiter(lock
);
1043 /* [13] Drop the locks */
1044 raw_spin_unlock(&task
->pi_lock
);
1045 raw_spin_unlock_irq(&lock
->wait_lock
);
1048 * Make the actual exit decisions [12], based on the stored
1051 * We reached the end of the lock chain. Stop right here. No
1052 * point to go back just to figure that out.
1058 * If the current waiter is not the top waiter on the lock,
1059 * then we can stop the chain walk here if we are not in full
1060 * deadlock detection mode.
1062 if (!detect_deadlock
&& waiter
!= top_waiter
)
1068 raw_spin_unlock_irq(&task
->pi_lock
);
1070 put_task_struct(task
);
1076 * Try to take an rt-mutex
1078 * Must be called with lock->wait_lock held and interrupts disabled
1080 * @lock: The lock to be acquired.
1081 * @task: The task which wants to acquire the lock
1082 * @waiter: The waiter that is queued to the lock's wait tree if the
1083 * callsite called task_blocked_on_lock(), otherwise NULL
1086 try_to_take_rt_mutex(struct rt_mutex_base
*lock
, struct task_struct
*task
,
1087 struct rt_mutex_waiter
*waiter
)
1089 lockdep_assert_held(&lock
->wait_lock
);
1092 * Before testing whether we can acquire @lock, we set the
1093 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
1094 * other tasks which try to modify @lock into the slow path
1095 * and they serialize on @lock->wait_lock.
1097 * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
1098 * as explained at the top of this file if and only if:
1100 * - There is a lock owner. The caller must fixup the
1101 * transient state if it does a trylock or leaves the lock
1102 * function due to a signal or timeout.
1104 * - @task acquires the lock and there are no other
1105 * waiters. This is undone in rt_mutex_set_owner(@task) at
1106 * the end of this function.
1108 mark_rt_mutex_waiters(lock
);
1111 * If @lock has an owner, give up.
1113 if (rt_mutex_owner(lock
))
1117 * If @waiter != NULL, @task has already enqueued the waiter
1118 * into @lock waiter tree. If @waiter == NULL then this is a
1122 struct rt_mutex_waiter
*top_waiter
= rt_mutex_top_waiter(lock
);
1125 * If waiter is the highest priority waiter of @lock,
1126 * or allowed to steal it, take it over.
1128 if (waiter
== top_waiter
|| rt_mutex_steal(waiter
, top_waiter
)) {
1130 * We can acquire the lock. Remove the waiter from the
1131 * lock waiters tree.
1133 rt_mutex_dequeue(lock
, waiter
);
1139 * If the lock has waiters already we check whether @task is
1140 * eligible to take over the lock.
1142 * If there are no other waiters, @task can acquire
1143 * the lock. @task->pi_blocked_on is NULL, so it does
1144 * not need to be dequeued.
1146 if (rt_mutex_has_waiters(lock
)) {
1147 /* Check whether the trylock can steal it. */
1148 if (!rt_mutex_steal(task_to_waiter(task
),
1149 rt_mutex_top_waiter(lock
)))
1153 * The current top waiter stays enqueued. We
1154 * don't have to change anything in the lock
1159 * No waiters. Take the lock without the
1160 * pi_lock dance.@task->pi_blocked_on is NULL
1161 * and we have no waiters to enqueue in @task
1169 * Clear @task->pi_blocked_on. Requires protection by
1170 * @task->pi_lock. Redundant operation for the @waiter == NULL
1171 * case, but conditionals are more expensive than a redundant
1174 raw_spin_lock(&task
->pi_lock
);
1175 task
->pi_blocked_on
= NULL
;
1177 * Finish the lock acquisition. @task is the new owner. If
1178 * other waiters exist we have to insert the highest priority
1179 * waiter into @task->pi_waiters tree.
1181 if (rt_mutex_has_waiters(lock
))
1182 rt_mutex_enqueue_pi(task
, rt_mutex_top_waiter(lock
));
1183 raw_spin_unlock(&task
->pi_lock
);
1187 * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
1188 * are still waiters or clears it.
1190 rt_mutex_set_owner(lock
, task
);
1196 * Task blocks on lock.
1198 * Prepare waiter and propagate pi chain
1200 * This must be called with lock->wait_lock held and interrupts disabled
1202 static int __sched
task_blocks_on_rt_mutex(struct rt_mutex_base
*lock
,
1203 struct rt_mutex_waiter
*waiter
,
1204 struct task_struct
*task
,
1205 struct ww_acquire_ctx
*ww_ctx
,
1206 enum rtmutex_chainwalk chwalk
,
1207 struct wake_q_head
*wake_q
)
1209 struct task_struct
*owner
= rt_mutex_owner(lock
);
1210 struct rt_mutex_waiter
*top_waiter
= waiter
;
1211 struct rt_mutex_base
*next_lock
;
1212 int chain_walk
= 0, res
;
1214 lockdep_assert_held(&lock
->wait_lock
);
1217 * Early deadlock detection. We really don't want the task to
1218 * enqueue on itself just to untangle the mess later. It's not
1219 * only an optimization. We drop the locks, so another waiter
1220 * can come in before the chain walk detects the deadlock. So
1221 * the other will detect the deadlock and return -EDEADLOCK,
1222 * which is wrong, as the other waiter is not in a deadlock
1225 * Except for ww_mutex, in that case the chain walk must already deal
1226 * with spurious cycles, see the comments at [3] and [6].
1228 if (owner
== task
&& !(build_ww_mutex() && ww_ctx
))
1231 raw_spin_lock(&task
->pi_lock
);
1232 waiter
->task
= task
;
1233 waiter
->lock
= lock
;
1234 waiter_update_prio(waiter
, task
);
1235 waiter_clone_prio(waiter
, task
);
1237 /* Get the top priority waiter on the lock */
1238 if (rt_mutex_has_waiters(lock
))
1239 top_waiter
= rt_mutex_top_waiter(lock
);
1240 rt_mutex_enqueue(lock
, waiter
);
1242 task
->pi_blocked_on
= waiter
;
1244 raw_spin_unlock(&task
->pi_lock
);
1246 if (build_ww_mutex() && ww_ctx
) {
1247 struct rt_mutex
*rtm
;
1249 /* Check whether the waiter should back out immediately */
1250 rtm
= container_of(lock
, struct rt_mutex
, rtmutex
);
1252 res
= __ww_mutex_add_waiter(waiter
, rtm
, ww_ctx
, wake_q
);
1256 raw_spin_lock(&task
->pi_lock
);
1257 rt_mutex_dequeue(lock
, waiter
);
1258 task
->pi_blocked_on
= NULL
;
1259 raw_spin_unlock(&task
->pi_lock
);
1267 raw_spin_lock(&owner
->pi_lock
);
1268 if (waiter
== rt_mutex_top_waiter(lock
)) {
1269 rt_mutex_dequeue_pi(owner
, top_waiter
);
1270 rt_mutex_enqueue_pi(owner
, waiter
);
1272 rt_mutex_adjust_prio(lock
, owner
);
1273 if (owner
->pi_blocked_on
)
1275 } else if (rt_mutex_cond_detect_deadlock(waiter
, chwalk
)) {
1279 /* Store the lock on which owner is blocked or NULL */
1280 next_lock
= task_blocked_on_lock(owner
);
1282 raw_spin_unlock(&owner
->pi_lock
);
1284 * Even if full deadlock detection is on, if the owner is not
1285 * blocked itself, we can avoid finding this out in the chain
1288 if (!chain_walk
|| !next_lock
)
1292 * The owner can't disappear while holding a lock,
1293 * so the owner struct is protected by wait_lock.
1294 * Gets dropped in rt_mutex_adjust_prio_chain()!
1296 get_task_struct(owner
);
1298 raw_spin_unlock_irq(&lock
->wait_lock
);
1300 res
= rt_mutex_adjust_prio_chain(owner
, chwalk
, lock
,
1301 next_lock
, waiter
, task
);
1303 raw_spin_lock_irq(&lock
->wait_lock
);
1309 * Remove the top waiter from the current tasks pi waiter tree and
1312 * Called with lock->wait_lock held and interrupts disabled.
1314 static void __sched
mark_wakeup_next_waiter(struct rt_wake_q_head
*wqh
,
1315 struct rt_mutex_base
*lock
)
1317 struct rt_mutex_waiter
*waiter
;
1319 lockdep_assert_held(&lock
->wait_lock
);
1321 raw_spin_lock(¤t
->pi_lock
);
1323 waiter
= rt_mutex_top_waiter(lock
);
1326 * Remove it from current->pi_waiters and deboost.
1328 * We must in fact deboost here in order to ensure we call
1329 * rt_mutex_setprio() to update p->pi_top_task before the
1332 rt_mutex_dequeue_pi(current
, waiter
);
1333 rt_mutex_adjust_prio(lock
, current
);
1336 * As we are waking up the top waiter, and the waiter stays
1337 * queued on the lock until it gets the lock, this lock
1338 * obviously has waiters. Just set the bit here and this has
1339 * the added benefit of forcing all new tasks into the
1340 * slow path making sure no task of lower priority than
1341 * the top waiter can steal this lock.
1343 lock
->owner
= (void *) RT_MUTEX_HAS_WAITERS
;
1346 * We deboosted before waking the top waiter task such that we don't
1347 * run two tasks with the 'same' priority (and ensure the
1348 * p->pi_top_task pointer points to a blocked task). This however can
1349 * lead to priority inversion if we would get preempted after the
1350 * deboost but before waking our donor task, hence the preempt_disable()
1353 * Pairs with preempt_enable() in rt_mutex_wake_up_q();
1356 rt_mutex_wake_q_add(wqh
, waiter
);
1357 raw_spin_unlock(¤t
->pi_lock
);
1360 static int __sched
__rt_mutex_slowtrylock(struct rt_mutex_base
*lock
)
1362 int ret
= try_to_take_rt_mutex(lock
, current
, NULL
);
1365 * try_to_take_rt_mutex() sets the lock waiters bit
1366 * unconditionally. Clean this up.
1368 fixup_rt_mutex_waiters(lock
, true);
1374 * Slow path try-lock function:
1376 static int __sched
rt_mutex_slowtrylock(struct rt_mutex_base
*lock
)
1378 unsigned long flags
;
1382 * If the lock already has an owner we fail to get the lock.
1383 * This can be done without taking the @lock->wait_lock as
1384 * it is only being read, and this is a trylock anyway.
1386 if (rt_mutex_owner(lock
))
1390 * The mutex has currently no owner. Lock the wait lock and try to
1391 * acquire the lock. We use irqsave here to support early boot calls.
1393 raw_spin_lock_irqsave(&lock
->wait_lock
, flags
);
1395 ret
= __rt_mutex_slowtrylock(lock
);
1397 raw_spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
1402 static __always_inline
int __rt_mutex_trylock(struct rt_mutex_base
*lock
)
1404 if (likely(rt_mutex_cmpxchg_acquire(lock
, NULL
, current
)))
1407 return rt_mutex_slowtrylock(lock
);
1411 * Slow path to release a rt-mutex.
1413 static void __sched
rt_mutex_slowunlock(struct rt_mutex_base
*lock
)
1415 DEFINE_RT_WAKE_Q(wqh
);
1416 unsigned long flags
;
1418 /* irqsave required to support early boot calls */
1419 raw_spin_lock_irqsave(&lock
->wait_lock
, flags
);
1421 debug_rt_mutex_unlock(lock
);
1424 * We must be careful here if the fast path is enabled. If we
1425 * have no waiters queued we cannot set owner to NULL here
1428 * foo->lock->owner = NULL;
1429 * rtmutex_lock(foo->lock); <- fast path
1430 * free = atomic_dec_and_test(foo->refcnt);
1431 * rtmutex_unlock(foo->lock); <- fast path
1434 * raw_spin_unlock(foo->lock->wait_lock);
1436 * So for the fastpath enabled kernel:
1438 * Nothing can set the waiters bit as long as we hold
1439 * lock->wait_lock. So we do the following sequence:
1441 * owner = rt_mutex_owner(lock);
1442 * clear_rt_mutex_waiters(lock);
1443 * raw_spin_unlock(&lock->wait_lock);
1444 * if (cmpxchg(&lock->owner, owner, 0) == owner)
1448 * The fastpath disabled variant is simple as all access to
1449 * lock->owner is serialized by lock->wait_lock:
1451 * lock->owner = NULL;
1452 * raw_spin_unlock(&lock->wait_lock);
1454 while (!rt_mutex_has_waiters(lock
)) {
1455 /* Drops lock->wait_lock ! */
1456 if (unlock_rt_mutex_safe(lock
, flags
) == true)
1458 /* Relock the rtmutex and try again */
1459 raw_spin_lock_irqsave(&lock
->wait_lock
, flags
);
1463 * The wakeup next waiter path does not suffer from the above
1464 * race. See the comments there.
1466 * Queue the next waiter for wakeup once we release the wait_lock.
1468 mark_wakeup_next_waiter(&wqh
, lock
);
1469 raw_spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
1471 rt_mutex_wake_up_q(&wqh
);
1474 static __always_inline
void __rt_mutex_unlock(struct rt_mutex_base
*lock
)
1476 if (likely(rt_mutex_cmpxchg_release(lock
, current
, NULL
)))
1479 rt_mutex_slowunlock(lock
);
1483 static bool rtmutex_spin_on_owner(struct rt_mutex_base
*lock
,
1484 struct rt_mutex_waiter
*waiter
,
1485 struct task_struct
*owner
)
1491 /* If owner changed, trylock again. */
1492 if (owner
!= rt_mutex_owner(lock
))
1495 * Ensure that @owner is dereferenced after checking that
1496 * the lock owner still matches @owner. If that fails,
1497 * @owner might point to freed memory. If it still matches,
1498 * the rcu_read_lock() ensures the memory stays valid.
1502 * Stop spinning when:
1503 * - the lock owner has been scheduled out
1504 * - current is not longer the top waiter
1505 * - current is requested to reschedule (redundant
1506 * for CONFIG_PREEMPT_RCU=y)
1507 * - the VCPU on which owner runs is preempted
1509 if (!owner_on_cpu(owner
) || need_resched() ||
1510 !rt_mutex_waiter_is_top_waiter(lock
, waiter
)) {
1520 static bool rtmutex_spin_on_owner(struct rt_mutex_base
*lock
,
1521 struct rt_mutex_waiter
*waiter
,
1522 struct task_struct
*owner
)
1528 #ifdef RT_MUTEX_BUILD_MUTEX
1530 * Functions required for:
1531 * - rtmutex, futex on all kernels
1532 * - mutex and rwsem substitutions on RT kernels
1536 * Remove a waiter from a lock and give up
1538 * Must be called with lock->wait_lock held and interrupts disabled. It must
1539 * have just failed to try_to_take_rt_mutex().
1541 static void __sched
remove_waiter(struct rt_mutex_base
*lock
,
1542 struct rt_mutex_waiter
*waiter
)
1544 bool is_top_waiter
= (waiter
== rt_mutex_top_waiter(lock
));
1545 struct task_struct
*owner
= rt_mutex_owner(lock
);
1546 struct rt_mutex_base
*next_lock
;
1548 lockdep_assert_held(&lock
->wait_lock
);
1550 raw_spin_lock(¤t
->pi_lock
);
1551 rt_mutex_dequeue(lock
, waiter
);
1552 current
->pi_blocked_on
= NULL
;
1553 raw_spin_unlock(¤t
->pi_lock
);
1556 * Only update priority if the waiter was the highest priority
1557 * waiter of the lock and there is an owner to update.
1559 if (!owner
|| !is_top_waiter
)
1562 raw_spin_lock(&owner
->pi_lock
);
1564 rt_mutex_dequeue_pi(owner
, waiter
);
1566 if (rt_mutex_has_waiters(lock
))
1567 rt_mutex_enqueue_pi(owner
, rt_mutex_top_waiter(lock
));
1569 rt_mutex_adjust_prio(lock
, owner
);
1571 /* Store the lock on which owner is blocked or NULL */
1572 next_lock
= task_blocked_on_lock(owner
);
1574 raw_spin_unlock(&owner
->pi_lock
);
1577 * Don't walk the chain, if the owner task is not blocked
1583 /* gets dropped in rt_mutex_adjust_prio_chain()! */
1584 get_task_struct(owner
);
1586 raw_spin_unlock_irq(&lock
->wait_lock
);
1588 rt_mutex_adjust_prio_chain(owner
, RT_MUTEX_MIN_CHAINWALK
, lock
,
1589 next_lock
, NULL
, current
);
1591 raw_spin_lock_irq(&lock
->wait_lock
);
1595 * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
1596 * @lock: the rt_mutex to take
1597 * @ww_ctx: WW mutex context pointer
1598 * @state: the state the task should block in (TASK_INTERRUPTIBLE
1599 * or TASK_UNINTERRUPTIBLE)
1600 * @timeout: the pre-initialized and started timer, or NULL for none
1601 * @waiter: the pre-initialized rt_mutex_waiter
1603 * Must be called with lock->wait_lock held and interrupts disabled
1605 static int __sched
rt_mutex_slowlock_block(struct rt_mutex_base
*lock
,
1606 struct ww_acquire_ctx
*ww_ctx
,
1608 struct hrtimer_sleeper
*timeout
,
1609 struct rt_mutex_waiter
*waiter
)
1610 __releases(&lock
->wait_lock
) __acquires(&lock
->wait_lock
)
1612 struct rt_mutex
*rtm
= container_of(lock
, struct rt_mutex
, rtmutex
);
1613 struct task_struct
*owner
;
1617 /* Try to acquire the lock: */
1618 if (try_to_take_rt_mutex(lock
, current
, waiter
))
1621 if (timeout
&& !timeout
->task
) {
1625 if (signal_pending_state(state
, current
)) {
1630 if (build_ww_mutex() && ww_ctx
) {
1631 ret
= __ww_mutex_check_kill(rtm
, waiter
, ww_ctx
);
1636 if (waiter
== rt_mutex_top_waiter(lock
))
1637 owner
= rt_mutex_owner(lock
);
1640 raw_spin_unlock_irq(&lock
->wait_lock
);
1642 if (!owner
|| !rtmutex_spin_on_owner(lock
, waiter
, owner
))
1643 rt_mutex_schedule();
1645 raw_spin_lock_irq(&lock
->wait_lock
);
1646 set_current_state(state
);
1649 __set_current_state(TASK_RUNNING
);
1653 static void __sched
rt_mutex_handle_deadlock(int res
, int detect_deadlock
,
1654 struct rt_mutex_base
*lock
,
1655 struct rt_mutex_waiter
*w
)
1658 * If the result is not -EDEADLOCK or the caller requested
1659 * deadlock detection, nothing to do here.
1661 if (res
!= -EDEADLOCK
|| detect_deadlock
)
1664 if (build_ww_mutex() && w
->ww_ctx
)
1667 raw_spin_unlock_irq(&lock
->wait_lock
);
1669 WARN(1, "rtmutex deadlock detected\n");
1672 set_current_state(TASK_INTERRUPTIBLE
);
1673 rt_mutex_schedule();
1678 * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
1679 * @lock: The rtmutex to block lock
1680 * @ww_ctx: WW mutex context pointer
1681 * @state: The task state for sleeping
1682 * @chwalk: Indicator whether full or partial chainwalk is requested
1683 * @waiter: Initializer waiter for blocking
1684 * @wake_q: The wake_q to wake tasks after we release the wait_lock
1686 static int __sched
__rt_mutex_slowlock(struct rt_mutex_base
*lock
,
1687 struct ww_acquire_ctx
*ww_ctx
,
1689 enum rtmutex_chainwalk chwalk
,
1690 struct rt_mutex_waiter
*waiter
,
1691 struct wake_q_head
*wake_q
)
1693 struct rt_mutex
*rtm
= container_of(lock
, struct rt_mutex
, rtmutex
);
1694 struct ww_mutex
*ww
= ww_container_of(rtm
);
1697 lockdep_assert_held(&lock
->wait_lock
);
1699 /* Try to acquire the lock again: */
1700 if (try_to_take_rt_mutex(lock
, current
, NULL
)) {
1701 if (build_ww_mutex() && ww_ctx
) {
1702 __ww_mutex_check_waiters(rtm
, ww_ctx
, wake_q
);
1703 ww_mutex_lock_acquired(ww
, ww_ctx
);
1708 set_current_state(state
);
1710 trace_contention_begin(lock
, LCB_F_RT
);
1712 ret
= task_blocks_on_rt_mutex(lock
, waiter
, current
, ww_ctx
, chwalk
, wake_q
);
1714 ret
= rt_mutex_slowlock_block(lock
, ww_ctx
, state
, NULL
, waiter
);
1717 /* acquired the lock */
1718 if (build_ww_mutex() && ww_ctx
) {
1719 if (!ww_ctx
->is_wait_die
)
1720 __ww_mutex_check_waiters(rtm
, ww_ctx
, wake_q
);
1721 ww_mutex_lock_acquired(ww
, ww_ctx
);
1724 __set_current_state(TASK_RUNNING
);
1725 remove_waiter(lock
, waiter
);
1726 rt_mutex_handle_deadlock(ret
, chwalk
, lock
, waiter
);
1730 * try_to_take_rt_mutex() sets the waiter bit
1731 * unconditionally. We might have to fix that up.
1733 fixup_rt_mutex_waiters(lock
, true);
1735 trace_contention_end(lock
, ret
);
1740 static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base
*lock
,
1741 struct ww_acquire_ctx
*ww_ctx
,
1743 struct wake_q_head
*wake_q
)
1745 struct rt_mutex_waiter waiter
;
1748 rt_mutex_init_waiter(&waiter
);
1749 waiter
.ww_ctx
= ww_ctx
;
1751 ret
= __rt_mutex_slowlock(lock
, ww_ctx
, state
, RT_MUTEX_MIN_CHAINWALK
,
1754 debug_rt_mutex_free_waiter(&waiter
);
1759 * rt_mutex_slowlock - Locking slowpath invoked when fast path fails
1760 * @lock: The rtmutex to block lock
1761 * @ww_ctx: WW mutex context pointer
1762 * @state: The task state for sleeping
1764 static int __sched
rt_mutex_slowlock(struct rt_mutex_base
*lock
,
1765 struct ww_acquire_ctx
*ww_ctx
,
1768 DEFINE_WAKE_Q(wake_q
);
1769 unsigned long flags
;
1773 * Do all pre-schedule work here, before we queue a waiter and invoke
1774 * PI -- any such work that trips on rtlock (PREEMPT_RT spinlock) would
1775 * otherwise recurse back into task_blocks_on_rt_mutex() through
1776 * rtlock_slowlock() and will then enqueue a second waiter for this
1777 * same task and things get really confusing real fast.
1779 rt_mutex_pre_schedule();
1782 * Technically we could use raw_spin_[un]lock_irq() here, but this can
1783 * be called in early boot if the cmpxchg() fast path is disabled
1784 * (debug, no architecture support). In this case we will acquire the
1785 * rtmutex with lock->wait_lock held. But we cannot unconditionally
1786 * enable interrupts in that early boot case. So we need to use the
1787 * irqsave/restore variants.
1789 raw_spin_lock_irqsave(&lock
->wait_lock
, flags
);
1790 ret
= __rt_mutex_slowlock_locked(lock
, ww_ctx
, state
, &wake_q
);
1792 raw_spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
1795 rt_mutex_post_schedule();
1800 static __always_inline
int __rt_mutex_lock(struct rt_mutex_base
*lock
,
1803 lockdep_assert(!current
->pi_blocked_on
);
1805 if (likely(rt_mutex_try_acquire(lock
)))
1808 return rt_mutex_slowlock(lock
, NULL
, state
);
1810 #endif /* RT_MUTEX_BUILD_MUTEX */
1812 #ifdef RT_MUTEX_BUILD_SPINLOCKS
1814 * Functions required for spin/rw_lock substitution on RT kernels
1818 * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
1819 * @lock: The underlying RT mutex
1820 * @wake_q: The wake_q to wake tasks after we release the wait_lock
1822 static void __sched
rtlock_slowlock_locked(struct rt_mutex_base
*lock
,
1823 struct wake_q_head
*wake_q
)
1824 __releases(&lock
->wait_lock
) __acquires(&lock
->wait_lock
)
1826 struct rt_mutex_waiter waiter
;
1827 struct task_struct
*owner
;
1829 lockdep_assert_held(&lock
->wait_lock
);
1831 if (try_to_take_rt_mutex(lock
, current
, NULL
))
1834 rt_mutex_init_rtlock_waiter(&waiter
);
1836 /* Save current state and set state to TASK_RTLOCK_WAIT */
1837 current_save_and_set_rtlock_wait_state();
1839 trace_contention_begin(lock
, LCB_F_RT
);
1841 task_blocks_on_rt_mutex(lock
, &waiter
, current
, NULL
, RT_MUTEX_MIN_CHAINWALK
, wake_q
);
1844 /* Try to acquire the lock again */
1845 if (try_to_take_rt_mutex(lock
, current
, &waiter
))
1848 if (&waiter
== rt_mutex_top_waiter(lock
))
1849 owner
= rt_mutex_owner(lock
);
1853 raw_spin_unlock_irq(&lock
->wait_lock
);
1855 wake_q_init(wake_q
);
1858 if (!owner
|| !rtmutex_spin_on_owner(lock
, &waiter
, owner
))
1861 raw_spin_lock_irq(&lock
->wait_lock
);
1862 set_current_state(TASK_RTLOCK_WAIT
);
1865 /* Restore the task state */
1866 current_restore_rtlock_saved_state();
1869 * try_to_take_rt_mutex() sets the waiter bit unconditionally.
1870 * We might have to fix that up:
1872 fixup_rt_mutex_waiters(lock
, true);
1873 debug_rt_mutex_free_waiter(&waiter
);
1875 trace_contention_end(lock
, 0);
1878 static __always_inline
void __sched
rtlock_slowlock(struct rt_mutex_base
*lock
)
1880 unsigned long flags
;
1881 DEFINE_WAKE_Q(wake_q
);
1883 raw_spin_lock_irqsave(&lock
->wait_lock
, flags
);
1884 rtlock_slowlock_locked(lock
, &wake_q
);
1886 raw_spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
1891 #endif /* RT_MUTEX_BUILD_SPINLOCKS */