1 // SPDX-License-Identifier: GPL-2.0-only
3 * kernel/locking/mutex.c
5 * Mutexes: blocking mutual exclusion locks
7 * Started by Ingo Molnar:
9 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12 * David Howells for suggestions and improvements.
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
19 * Also see Documentation/locking/mutex-design.rst.
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/lock.h>
36 #ifndef CONFIG_PREEMPT_RT
39 #ifdef CONFIG_DEBUG_MUTEXES
40 # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
42 # define MUTEX_WARN_ON(cond)
46 __mutex_init(struct mutex
*lock
, const char *name
, struct lock_class_key
*key
)
48 atomic_long_set(&lock
->owner
, 0);
49 raw_spin_lock_init(&lock
->wait_lock
);
50 INIT_LIST_HEAD(&lock
->wait_list
);
51 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
52 osq_lock_init(&lock
->osq
);
55 debug_mutex_init(lock
, name
, key
);
57 EXPORT_SYMBOL(__mutex_init
);
59 static inline struct task_struct
*__owner_task(unsigned long owner
)
61 return (struct task_struct
*)(owner
& ~MUTEX_FLAGS
);
64 bool mutex_is_locked(struct mutex
*lock
)
66 return __mutex_owner(lock
) != NULL
;
68 EXPORT_SYMBOL(mutex_is_locked
);
70 static inline unsigned long __owner_flags(unsigned long owner
)
72 return owner
& MUTEX_FLAGS
;
76 * Returns: __mutex_owner(lock) on failure or NULL on success.
78 static inline struct task_struct
*__mutex_trylock_common(struct mutex
*lock
, bool handoff
)
80 unsigned long owner
, curr
= (unsigned long)current
;
82 owner
= atomic_long_read(&lock
->owner
);
83 for (;;) { /* must loop, can race against a flag */
84 unsigned long flags
= __owner_flags(owner
);
85 unsigned long task
= owner
& ~MUTEX_FLAGS
;
88 if (flags
& MUTEX_FLAG_PICKUP
) {
91 flags
&= ~MUTEX_FLAG_PICKUP
;
93 if (flags
& MUTEX_FLAG_HANDOFF
)
95 flags
|= MUTEX_FLAG_HANDOFF
;
100 MUTEX_WARN_ON(flags
& (MUTEX_FLAG_HANDOFF
| MUTEX_FLAG_PICKUP
));
104 if (atomic_long_try_cmpxchg_acquire(&lock
->owner
, &owner
, task
| flags
)) {
111 return __owner_task(owner
);
115 * Trylock or set HANDOFF
117 static inline bool __mutex_trylock_or_handoff(struct mutex
*lock
, bool handoff
)
119 return !__mutex_trylock_common(lock
, handoff
);
123 * Actual trylock that will work on any unlocked state.
125 static inline bool __mutex_trylock(struct mutex
*lock
)
127 return !__mutex_trylock_common(lock
, false);
130 #ifndef CONFIG_DEBUG_LOCK_ALLOC
132 * Lockdep annotations are contained to the slow paths for simplicity.
133 * There is nothing that would stop spreading the lockdep annotations outwards
138 * Optimistic trylock that only works in the uncontended case. Make sure to
139 * follow with a __mutex_trylock() before failing.
141 static __always_inline
bool __mutex_trylock_fast(struct mutex
*lock
)
143 unsigned long curr
= (unsigned long)current
;
144 unsigned long zero
= 0UL;
146 if (atomic_long_try_cmpxchg_acquire(&lock
->owner
, &zero
, curr
))
152 static __always_inline
bool __mutex_unlock_fast(struct mutex
*lock
)
154 unsigned long curr
= (unsigned long)current
;
156 return atomic_long_try_cmpxchg_release(&lock
->owner
, &curr
, 0UL);
160 static inline void __mutex_set_flag(struct mutex
*lock
, unsigned long flag
)
162 atomic_long_or(flag
, &lock
->owner
);
165 static inline void __mutex_clear_flag(struct mutex
*lock
, unsigned long flag
)
167 atomic_long_andnot(flag
, &lock
->owner
);
170 static inline bool __mutex_waiter_is_first(struct mutex
*lock
, struct mutex_waiter
*waiter
)
172 return list_first_entry(&lock
->wait_list
, struct mutex_waiter
, list
) == waiter
;
176 * Add @waiter to a given location in the lock wait_list and set the
177 * FLAG_WAITERS flag if it's the first waiter.
180 __mutex_add_waiter(struct mutex
*lock
, struct mutex_waiter
*waiter
,
181 struct list_head
*list
)
183 debug_mutex_add_waiter(lock
, waiter
, current
);
185 list_add_tail(&waiter
->list
, list
);
186 if (__mutex_waiter_is_first(lock
, waiter
))
187 __mutex_set_flag(lock
, MUTEX_FLAG_WAITERS
);
191 __mutex_remove_waiter(struct mutex
*lock
, struct mutex_waiter
*waiter
)
193 list_del(&waiter
->list
);
194 if (likely(list_empty(&lock
->wait_list
)))
195 __mutex_clear_flag(lock
, MUTEX_FLAGS
);
197 debug_mutex_remove_waiter(lock
, waiter
, current
);
201 * Give up ownership to a specific task, when @task = NULL, this is equivalent
202 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
203 * WAITERS. Provides RELEASE semantics like a regular unlock, the
204 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
206 static void __mutex_handoff(struct mutex
*lock
, struct task_struct
*task
)
208 unsigned long owner
= atomic_long_read(&lock
->owner
);
213 MUTEX_WARN_ON(__owner_task(owner
) != current
);
214 MUTEX_WARN_ON(owner
& MUTEX_FLAG_PICKUP
);
216 new = (owner
& MUTEX_FLAG_WAITERS
);
217 new |= (unsigned long)task
;
219 new |= MUTEX_FLAG_PICKUP
;
221 if (atomic_long_try_cmpxchg_release(&lock
->owner
, &owner
, new))
226 #ifndef CONFIG_DEBUG_LOCK_ALLOC
228 * We split the mutex lock/unlock logic into separate fastpath and
229 * slowpath functions, to reduce the register pressure on the fastpath.
230 * We also put the fastpath first in the kernel image, to make sure the
231 * branch is predicted by the CPU as default-untaken.
233 static void __sched
__mutex_lock_slowpath(struct mutex
*lock
);
236 * mutex_lock - acquire the mutex
237 * @lock: the mutex to be acquired
239 * Lock the mutex exclusively for this task. If the mutex is not
240 * available right now, it will sleep until it can get it.
242 * The mutex must later on be released by the same task that
243 * acquired it. Recursive locking is not allowed. The task
244 * may not exit without first unlocking the mutex. Also, kernel
245 * memory where the mutex resides must not be freed with
246 * the mutex still locked. The mutex must first be initialized
247 * (or statically defined) before it can be locked. memset()-ing
248 * the mutex to 0 is not allowed.
250 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
251 * checks that will enforce the restrictions and will also do
252 * deadlock debugging)
254 * This function is similar to (but not equivalent to) down().
256 void __sched
mutex_lock(struct mutex
*lock
)
260 if (!__mutex_trylock_fast(lock
))
261 __mutex_lock_slowpath(lock
);
263 EXPORT_SYMBOL(mutex_lock
);
266 #include "ww_mutex.h"
268 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
271 * Trylock variant that returns the owning task on failure.
273 static inline struct task_struct
*__mutex_trylock_or_owner(struct mutex
*lock
)
275 return __mutex_trylock_common(lock
, false);
279 bool ww_mutex_spin_on_owner(struct mutex
*lock
, struct ww_acquire_ctx
*ww_ctx
,
280 struct mutex_waiter
*waiter
)
284 ww
= container_of(lock
, struct ww_mutex
, base
);
287 * If ww->ctx is set the contents are undefined, only
288 * by acquiring wait_lock there is a guarantee that
289 * they are not invalid when reading.
291 * As such, when deadlock detection needs to be
292 * performed the optimistic spinning cannot be done.
294 * Check this in every inner iteration because we may
295 * be racing against another thread's ww_mutex_lock.
297 if (ww_ctx
->acquired
> 0 && READ_ONCE(ww
->ctx
))
301 * If we aren't on the wait list yet, cancel the spin
302 * if there are waiters. We want to avoid stealing the
303 * lock from a waiter with an earlier stamp, since the
304 * other thread may already own a lock that we also
307 if (!waiter
&& (atomic_long_read(&lock
->owner
) & MUTEX_FLAG_WAITERS
))
311 * Similarly, stop spinning if we are no longer the
314 if (waiter
&& !__mutex_waiter_is_first(lock
, waiter
))
321 * Look out! "owner" is an entirely speculative pointer access and not
324 * "noinline" so that this function shows up on perf profiles.
327 bool mutex_spin_on_owner(struct mutex
*lock
, struct task_struct
*owner
,
328 struct ww_acquire_ctx
*ww_ctx
, struct mutex_waiter
*waiter
)
332 lockdep_assert_preemption_disabled();
334 while (__mutex_owner(lock
) == owner
) {
336 * Ensure we emit the owner->on_cpu, dereference _after_
337 * checking lock->owner still matches owner. And we already
338 * disabled preemption which is equal to the RCU read-side
339 * crital section in optimistic spinning code. Thus the
340 * task_strcut structure won't go away during the spinning
346 * Use vcpu_is_preempted to detect lock holder preemption issue.
348 if (!owner_on_cpu(owner
) || need_resched()) {
353 if (ww_ctx
&& !ww_mutex_spin_on_owner(lock
, ww_ctx
, waiter
)) {
365 * Initial check for entering the mutex spinning loop
367 static inline int mutex_can_spin_on_owner(struct mutex
*lock
)
369 struct task_struct
*owner
;
372 lockdep_assert_preemption_disabled();
378 * We already disabled preemption which is equal to the RCU read-side
379 * crital section in optimistic spinning code. Thus the task_strcut
380 * structure won't go away during the spinning period.
382 owner
= __mutex_owner(lock
);
384 retval
= owner_on_cpu(owner
);
387 * If lock->owner is not set, the mutex has been released. Return true
388 * such that we'll trylock in the spin path, which is a faster option
389 * than the blocking slow path.
395 * Optimistic spinning.
397 * We try to spin for acquisition when we find that the lock owner
398 * is currently running on a (different) CPU and while we don't
399 * need to reschedule. The rationale is that if the lock owner is
400 * running, it is likely to release the lock soon.
402 * The mutex spinners are queued up using MCS lock so that only one
403 * spinner can compete for the mutex. However, if mutex spinning isn't
404 * going to happen, there is no point in going through the lock/unlock
407 * Returns true when the lock was taken, otherwise false, indicating
408 * that we need to jump to the slowpath and sleep.
410 * The waiter flag is set to true if the spinner is a waiter in the wait
411 * queue. The waiter-spinner will spin on the lock directly and concurrently
412 * with the spinner at the head of the OSQ, if present, until the owner is
415 static __always_inline
bool
416 mutex_optimistic_spin(struct mutex
*lock
, struct ww_acquire_ctx
*ww_ctx
,
417 struct mutex_waiter
*waiter
)
421 * The purpose of the mutex_can_spin_on_owner() function is
422 * to eliminate the overhead of osq_lock() and osq_unlock()
423 * in case spinning isn't possible. As a waiter-spinner
424 * is not going to take OSQ lock anyway, there is no need
425 * to call mutex_can_spin_on_owner().
427 if (!mutex_can_spin_on_owner(lock
))
431 * In order to avoid a stampede of mutex spinners trying to
432 * acquire the mutex all at once, the spinners need to take a
433 * MCS (queued) lock first before spinning on the owner field.
435 if (!osq_lock(&lock
->osq
))
440 struct task_struct
*owner
;
442 /* Try to acquire the mutex... */
443 owner
= __mutex_trylock_or_owner(lock
);
448 * There's an owner, wait for it to either
449 * release the lock or go to sleep.
451 if (!mutex_spin_on_owner(lock
, owner
, ww_ctx
, waiter
))
455 * The cpu_relax() call is a compiler barrier which forces
456 * everything in this loop to be re-loaded. We don't need
457 * memory barriers as we'll eventually observe the right
458 * values at the cost of a few extra spins.
464 osq_unlock(&lock
->osq
);
471 osq_unlock(&lock
->osq
);
475 * If we fell out of the spin path because of need_resched(),
476 * reschedule now, before we try-lock the mutex. This avoids getting
477 * scheduled out right after we obtained the mutex.
479 if (need_resched()) {
481 * We _should_ have TASK_RUNNING here, but just in case
482 * we do not, make it so, otherwise we might get stuck.
484 __set_current_state(TASK_RUNNING
);
485 schedule_preempt_disabled();
491 static __always_inline
bool
492 mutex_optimistic_spin(struct mutex
*lock
, struct ww_acquire_ctx
*ww_ctx
,
493 struct mutex_waiter
*waiter
)
499 static noinline
void __sched
__mutex_unlock_slowpath(struct mutex
*lock
, unsigned long ip
);
502 * mutex_unlock - release the mutex
503 * @lock: the mutex to be released
505 * Unlock a mutex that has been locked by this task previously.
507 * This function must not be used in interrupt context. Unlocking
508 * of a not locked mutex is not allowed.
510 * The caller must ensure that the mutex stays alive until this function has
511 * returned - mutex_unlock() can NOT directly be used to release an object such
512 * that another concurrent task can free it.
513 * Mutexes are different from spinlocks & refcounts in this aspect.
515 * This function is similar to (but not equivalent to) up().
517 void __sched
mutex_unlock(struct mutex
*lock
)
519 #ifndef CONFIG_DEBUG_LOCK_ALLOC
520 if (__mutex_unlock_fast(lock
))
523 __mutex_unlock_slowpath(lock
, _RET_IP_
);
525 EXPORT_SYMBOL(mutex_unlock
);
528 * ww_mutex_unlock - release the w/w mutex
529 * @lock: the mutex to be released
531 * Unlock a mutex that has been locked by this task previously with any of the
532 * ww_mutex_lock* functions (with or without an acquire context). It is
533 * forbidden to release the locks after releasing the acquire context.
535 * This function must not be used in interrupt context. Unlocking
536 * of a unlocked mutex is not allowed.
538 void __sched
ww_mutex_unlock(struct ww_mutex
*lock
)
540 __ww_mutex_unlock(lock
);
541 mutex_unlock(&lock
->base
);
543 EXPORT_SYMBOL(ww_mutex_unlock
);
546 * Lock a mutex (possibly interruptible), slowpath:
548 static __always_inline
int __sched
549 __mutex_lock_common(struct mutex
*lock
, unsigned int state
, unsigned int subclass
,
550 struct lockdep_map
*nest_lock
, unsigned long ip
,
551 struct ww_acquire_ctx
*ww_ctx
, const bool use_ww_ctx
)
553 DEFINE_WAKE_Q(wake_q
);
554 struct mutex_waiter waiter
;
564 MUTEX_WARN_ON(lock
->magic
!= lock
);
566 ww
= container_of(lock
, struct ww_mutex
, base
);
568 if (unlikely(ww_ctx
== READ_ONCE(ww
->ctx
)))
572 * Reset the wounded flag after a kill. No other process can
573 * race and wound us here since they can't have a valid owner
574 * pointer if we don't have any locks held.
576 if (ww_ctx
->acquired
== 0)
579 #ifdef CONFIG_DEBUG_LOCK_ALLOC
580 nest_lock
= &ww_ctx
->dep_map
;
585 mutex_acquire_nest(&lock
->dep_map
, subclass
, 0, nest_lock
, ip
);
587 trace_contention_begin(lock
, LCB_F_MUTEX
| LCB_F_SPIN
);
588 if (__mutex_trylock(lock
) ||
589 mutex_optimistic_spin(lock
, ww_ctx
, NULL
)) {
590 /* got the lock, yay! */
591 lock_acquired(&lock
->dep_map
, ip
);
593 ww_mutex_set_context_fastpath(ww
, ww_ctx
);
594 trace_contention_end(lock
, 0);
599 raw_spin_lock_irqsave(&lock
->wait_lock
, flags
);
601 * After waiting to acquire the wait_lock, try again.
603 if (__mutex_trylock(lock
)) {
605 __ww_mutex_check_waiters(lock
, ww_ctx
, &wake_q
);
610 debug_mutex_lock_common(lock
, &waiter
);
611 waiter
.task
= current
;
613 waiter
.ww_ctx
= ww_ctx
;
615 lock_contended(&lock
->dep_map
, ip
);
618 /* add waiting tasks to the end of the waitqueue (FIFO): */
619 __mutex_add_waiter(lock
, &waiter
, &lock
->wait_list
);
622 * Add in stamp order, waking up waiters that must kill
625 ret
= __ww_mutex_add_waiter(&waiter
, lock
, ww_ctx
, &wake_q
);
630 set_current_state(state
);
631 trace_contention_begin(lock
, LCB_F_MUTEX
);
636 * Once we hold wait_lock, we're serialized against
637 * mutex_unlock() handing the lock off to us, do a trylock
638 * before testing the error conditions to make sure we pick up
641 if (__mutex_trylock(lock
))
645 * Check for signals and kill conditions while holding
646 * wait_lock. This ensures the lock cancellation is ordered
647 * against mutex_unlock() and wake-ups do not go missing.
649 if (signal_pending_state(state
, current
)) {
655 ret
= __ww_mutex_check_kill(lock
, &waiter
, ww_ctx
);
660 raw_spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
661 /* Make sure we do wakeups before calling schedule */
663 wake_q_init(&wake_q
);
665 schedule_preempt_disabled();
667 first
= __mutex_waiter_is_first(lock
, &waiter
);
669 set_current_state(state
);
671 * Here we order against unlock; we must either see it change
672 * state back to RUNNING and fall through the next schedule(),
673 * or we must see its unlock and acquire.
675 if (__mutex_trylock_or_handoff(lock
, first
))
679 trace_contention_begin(lock
, LCB_F_MUTEX
| LCB_F_SPIN
);
680 if (mutex_optimistic_spin(lock
, ww_ctx
, &waiter
))
682 trace_contention_begin(lock
, LCB_F_MUTEX
);
685 raw_spin_lock_irqsave(&lock
->wait_lock
, flags
);
687 raw_spin_lock_irqsave(&lock
->wait_lock
, flags
);
689 __set_current_state(TASK_RUNNING
);
693 * Wound-Wait; we stole the lock (!first_waiter), check the
694 * waiters as anyone might want to wound us.
696 if (!ww_ctx
->is_wait_die
&&
697 !__mutex_waiter_is_first(lock
, &waiter
))
698 __ww_mutex_check_waiters(lock
, ww_ctx
, &wake_q
);
701 __mutex_remove_waiter(lock
, &waiter
);
703 debug_mutex_free_waiter(&waiter
);
706 /* got the lock - cleanup and rejoice! */
707 lock_acquired(&lock
->dep_map
, ip
);
708 trace_contention_end(lock
, 0);
711 ww_mutex_lock_acquired(ww
, ww_ctx
);
713 raw_spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
719 __set_current_state(TASK_RUNNING
);
720 __mutex_remove_waiter(lock
, &waiter
);
722 trace_contention_end(lock
, ret
);
723 raw_spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
724 debug_mutex_free_waiter(&waiter
);
725 mutex_release(&lock
->dep_map
, ip
);
732 __mutex_lock(struct mutex
*lock
, unsigned int state
, unsigned int subclass
,
733 struct lockdep_map
*nest_lock
, unsigned long ip
)
735 return __mutex_lock_common(lock
, state
, subclass
, nest_lock
, ip
, NULL
, false);
739 __ww_mutex_lock(struct mutex
*lock
, unsigned int state
, unsigned int subclass
,
740 unsigned long ip
, struct ww_acquire_ctx
*ww_ctx
)
742 return __mutex_lock_common(lock
, state
, subclass
, NULL
, ip
, ww_ctx
, true);
746 * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
748 * @ww_ctx: optional w/w acquire context
750 * Trylocks a mutex with the optional acquire context; no deadlock detection is
751 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
753 * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
754 * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
756 * A mutex acquired with this function must be released with ww_mutex_unlock.
758 int ww_mutex_trylock(struct ww_mutex
*ww
, struct ww_acquire_ctx
*ww_ctx
)
761 return mutex_trylock(&ww
->base
);
763 MUTEX_WARN_ON(ww
->base
.magic
!= &ww
->base
);
766 * Reset the wounded flag after a kill. No other process can
767 * race and wound us here, since they can't have a valid owner
768 * pointer if we don't have any locks held.
770 if (ww_ctx
->acquired
== 0)
773 if (__mutex_trylock(&ww
->base
)) {
774 ww_mutex_set_context_fastpath(ww
, ww_ctx
);
775 mutex_acquire_nest(&ww
->base
.dep_map
, 0, 1, &ww_ctx
->dep_map
, _RET_IP_
);
781 EXPORT_SYMBOL(ww_mutex_trylock
);
783 #ifdef CONFIG_DEBUG_LOCK_ALLOC
785 mutex_lock_nested(struct mutex
*lock
, unsigned int subclass
)
787 __mutex_lock(lock
, TASK_UNINTERRUPTIBLE
, subclass
, NULL
, _RET_IP_
);
790 EXPORT_SYMBOL_GPL(mutex_lock_nested
);
793 _mutex_lock_nest_lock(struct mutex
*lock
, struct lockdep_map
*nest
)
795 __mutex_lock(lock
, TASK_UNINTERRUPTIBLE
, 0, nest
, _RET_IP_
);
797 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock
);
800 mutex_lock_killable_nested(struct mutex
*lock
, unsigned int subclass
)
802 return __mutex_lock(lock
, TASK_KILLABLE
, subclass
, NULL
, _RET_IP_
);
804 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested
);
807 mutex_lock_interruptible_nested(struct mutex
*lock
, unsigned int subclass
)
809 return __mutex_lock(lock
, TASK_INTERRUPTIBLE
, subclass
, NULL
, _RET_IP_
);
811 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested
);
814 mutex_lock_io_nested(struct mutex
*lock
, unsigned int subclass
)
820 token
= io_schedule_prepare();
821 __mutex_lock_common(lock
, TASK_UNINTERRUPTIBLE
,
822 subclass
, NULL
, _RET_IP_
, NULL
, 0);
823 io_schedule_finish(token
);
825 EXPORT_SYMBOL_GPL(mutex_lock_io_nested
);
828 ww_mutex_deadlock_injection(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
830 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
833 if (ctx
->deadlock_inject_countdown
-- == 0) {
834 tmp
= ctx
->deadlock_inject_interval
;
835 if (tmp
> UINT_MAX
/4)
838 tmp
= tmp
*2 + tmp
+ tmp
/2;
840 ctx
->deadlock_inject_interval
= tmp
;
841 ctx
->deadlock_inject_countdown
= tmp
;
842 ctx
->contending_lock
= lock
;
844 ww_mutex_unlock(lock
);
854 ww_mutex_lock(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
859 ret
= __ww_mutex_lock(&lock
->base
, TASK_UNINTERRUPTIBLE
,
861 if (!ret
&& ctx
&& ctx
->acquired
> 1)
862 return ww_mutex_deadlock_injection(lock
, ctx
);
866 EXPORT_SYMBOL_GPL(ww_mutex_lock
);
869 ww_mutex_lock_interruptible(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
874 ret
= __ww_mutex_lock(&lock
->base
, TASK_INTERRUPTIBLE
,
877 if (!ret
&& ctx
&& ctx
->acquired
> 1)
878 return ww_mutex_deadlock_injection(lock
, ctx
);
882 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible
);
887 * Release the lock, slowpath:
889 static noinline
void __sched
__mutex_unlock_slowpath(struct mutex
*lock
, unsigned long ip
)
891 struct task_struct
*next
= NULL
;
892 DEFINE_WAKE_Q(wake_q
);
896 mutex_release(&lock
->dep_map
, ip
);
899 * Release the lock before (potentially) taking the spinlock such that
900 * other contenders can get on with things ASAP.
902 * Except when HANDOFF, in that case we must not clear the owner field,
903 * but instead set it to the top waiter.
905 owner
= atomic_long_read(&lock
->owner
);
907 MUTEX_WARN_ON(__owner_task(owner
) != current
);
908 MUTEX_WARN_ON(owner
& MUTEX_FLAG_PICKUP
);
910 if (owner
& MUTEX_FLAG_HANDOFF
)
913 if (atomic_long_try_cmpxchg_release(&lock
->owner
, &owner
, __owner_flags(owner
))) {
914 if (owner
& MUTEX_FLAG_WAITERS
)
921 raw_spin_lock_irqsave(&lock
->wait_lock
, flags
);
922 debug_mutex_unlock(lock
);
923 if (!list_empty(&lock
->wait_list
)) {
924 /* get the first entry from the wait-list: */
925 struct mutex_waiter
*waiter
=
926 list_first_entry(&lock
->wait_list
,
927 struct mutex_waiter
, list
);
931 debug_mutex_wake_waiter(lock
, waiter
);
932 wake_q_add(&wake_q
, next
);
935 if (owner
& MUTEX_FLAG_HANDOFF
)
936 __mutex_handoff(lock
, next
);
939 raw_spin_unlock_irqrestore(&lock
->wait_lock
, flags
);
944 #ifndef CONFIG_DEBUG_LOCK_ALLOC
946 * Here come the less common (and hence less performance-critical) APIs:
947 * mutex_lock_interruptible() and mutex_trylock().
949 static noinline
int __sched
950 __mutex_lock_killable_slowpath(struct mutex
*lock
);
952 static noinline
int __sched
953 __mutex_lock_interruptible_slowpath(struct mutex
*lock
);
956 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
957 * @lock: The mutex to be acquired.
959 * Lock the mutex like mutex_lock(). If a signal is delivered while the
960 * process is sleeping, this function will return without acquiring the
963 * Context: Process context.
964 * Return: 0 if the lock was successfully acquired or %-EINTR if a
967 int __sched
mutex_lock_interruptible(struct mutex
*lock
)
971 if (__mutex_trylock_fast(lock
))
974 return __mutex_lock_interruptible_slowpath(lock
);
977 EXPORT_SYMBOL(mutex_lock_interruptible
);
980 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
981 * @lock: The mutex to be acquired.
983 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
984 * the current process is delivered while the process is sleeping, this
985 * function will return without acquiring the mutex.
987 * Context: Process context.
988 * Return: 0 if the lock was successfully acquired or %-EINTR if a
989 * fatal signal arrived.
991 int __sched
mutex_lock_killable(struct mutex
*lock
)
995 if (__mutex_trylock_fast(lock
))
998 return __mutex_lock_killable_slowpath(lock
);
1000 EXPORT_SYMBOL(mutex_lock_killable
);
1003 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1004 * @lock: The mutex to be acquired.
1006 * Lock the mutex like mutex_lock(). While the task is waiting for this
1007 * mutex, it will be accounted as being in the IO wait state by the
1010 * Context: Process context.
1012 void __sched
mutex_lock_io(struct mutex
*lock
)
1016 token
= io_schedule_prepare();
1018 io_schedule_finish(token
);
1020 EXPORT_SYMBOL_GPL(mutex_lock_io
);
1022 static noinline
void __sched
1023 __mutex_lock_slowpath(struct mutex
*lock
)
1025 __mutex_lock(lock
, TASK_UNINTERRUPTIBLE
, 0, NULL
, _RET_IP_
);
1028 static noinline
int __sched
1029 __mutex_lock_killable_slowpath(struct mutex
*lock
)
1031 return __mutex_lock(lock
, TASK_KILLABLE
, 0, NULL
, _RET_IP_
);
1034 static noinline
int __sched
1035 __mutex_lock_interruptible_slowpath(struct mutex
*lock
)
1037 return __mutex_lock(lock
, TASK_INTERRUPTIBLE
, 0, NULL
, _RET_IP_
);
1040 static noinline
int __sched
1041 __ww_mutex_lock_slowpath(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
1043 return __ww_mutex_lock(&lock
->base
, TASK_UNINTERRUPTIBLE
, 0,
1047 static noinline
int __sched
1048 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex
*lock
,
1049 struct ww_acquire_ctx
*ctx
)
1051 return __ww_mutex_lock(&lock
->base
, TASK_INTERRUPTIBLE
, 0,
1058 * mutex_trylock - try to acquire the mutex, without waiting
1059 * @lock: the mutex to be acquired
1061 * Try to acquire the mutex atomically. Returns 1 if the mutex
1062 * has been acquired successfully, and 0 on contention.
1064 * NOTE: this function follows the spin_trylock() convention, so
1065 * it is negated from the down_trylock() return values! Be careful
1066 * about this when converting semaphore users to mutexes.
1068 * This function must not be used in interrupt context. The
1069 * mutex must be released by the same task that acquired it.
1071 int __sched
mutex_trylock(struct mutex
*lock
)
1075 MUTEX_WARN_ON(lock
->magic
!= lock
);
1077 locked
= __mutex_trylock(lock
);
1079 mutex_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
1083 EXPORT_SYMBOL(mutex_trylock
);
1085 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1087 ww_mutex_lock(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
1091 if (__mutex_trylock_fast(&lock
->base
)) {
1093 ww_mutex_set_context_fastpath(lock
, ctx
);
1097 return __ww_mutex_lock_slowpath(lock
, ctx
);
1099 EXPORT_SYMBOL(ww_mutex_lock
);
1102 ww_mutex_lock_interruptible(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
1106 if (__mutex_trylock_fast(&lock
->base
)) {
1108 ww_mutex_set_context_fastpath(lock
, ctx
);
1112 return __ww_mutex_lock_interruptible_slowpath(lock
, ctx
);
1114 EXPORT_SYMBOL(ww_mutex_lock_interruptible
);
1116 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
1117 #endif /* !CONFIG_PREEMPT_RT */
1119 EXPORT_TRACEPOINT_SYMBOL_GPL(contention_begin
);
1120 EXPORT_TRACEPOINT_SYMBOL_GPL(contention_end
);
1123 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1124 * @cnt: the atomic which we are to dec
1125 * @lock: the mutex to return holding if we dec to 0
1127 * return true and hold lock if we dec to 0, return false otherwise
1129 int atomic_dec_and_mutex_lock(atomic_t
*cnt
, struct mutex
*lock
)
1131 /* dec if we can't possibly hit 0 */
1132 if (atomic_add_unless(cnt
, -1, 1))
1134 /* we might hit 0, so take the lock */
1136 if (!atomic_dec_and_test(cnt
)) {
1137 /* when we actually did the dec, we didn't hit 0 */
1141 /* we hit 0, and we hold the lock */
1144 EXPORT_SYMBOL(atomic_dec_and_mutex_lock
);