1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_MUTEX_H
14 #define SANITIZER_MUTEX_H
16 #include "sanitizer_atomic.h"
17 #include "sanitizer_internal_defs.h"
18 #include "sanitizer_libc.h"
19 #include "sanitizer_thread_safety.h"
21 namespace __sanitizer
{
23 class SANITIZER_MUTEX StaticSpinMutex
{
26 atomic_store(&state_
, 0, memory_order_relaxed
);
29 void Lock() SANITIZER_ACQUIRE() {
30 if (LIKELY(TryLock()))
35 bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
36 return atomic_exchange(&state_
, 1, memory_order_acquire
) == 0;
39 void Unlock() SANITIZER_RELEASE() {
40 atomic_store(&state_
, 0, memory_order_release
);
43 void CheckLocked() const SANITIZER_CHECK_LOCKED() {
44 CHECK_EQ(atomic_load(&state_
, memory_order_relaxed
), 1);
48 atomic_uint8_t state_
;
53 class SANITIZER_MUTEX SpinMutex
: public StaticSpinMutex
{
59 SpinMutex(const SpinMutex
&) = delete;
60 void operator=(const SpinMutex
&) = delete;
63 // Semaphore provides an OS-dependent way to park/unpark threads.
64 // The last thread returned from Wait can destroy the object
65 // (destruction-safety).
68 constexpr Semaphore() {}
69 Semaphore(const Semaphore
&) = delete;
70 void operator=(const Semaphore
&) = delete;
73 void Post(u32 count
= 1);
76 atomic_uint32_t state_
= {0};
79 typedef int MutexType
;
82 // Used as sentinel and to catch unassigned types
83 // (should not be used as real Mutex type).
86 // Each tool own mutexes must start at this number.
88 // Type for legacy mutexes that are not checked for deadlocks.
90 // Special marks that can be used in MutexMeta::can_lock table.
91 // The leaf mutexes can be locked under any other non-leaf mutex,
92 // but no other mutex can be locked while under a leaf mutex.
94 // Multiple mutexes of this type can be locked at the same time.
98 // Go linker does not support THREADLOCAL variables,
99 // so we can't use per-thread state.
100 // Disable checked locks on Darwin. Although Darwin platforms support
101 // THREADLOCAL variables they are not usable early on during process init when
102 // `__sanitizer::Mutex` is used.
103 #define SANITIZER_CHECK_DEADLOCKS \
104 (SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL && !SANITIZER_APPLE)
106 #if SANITIZER_CHECK_DEADLOCKS
110 // The table fixes what mutexes can be locked under what mutexes.
111 // If the entry for MutexTypeFoo contains MutexTypeBar,
112 // then Bar mutex can be locked while under Foo mutex.
113 // Can also contain the special MutexLeaf/MutexMulti marks.
114 MutexType can_lock
[10];
120 explicit constexpr CheckedMutex(MutexType type
)
121 #if SANITIZER_CHECK_DEADLOCKS
127 ALWAYS_INLINE
void Lock() {
128 #if SANITIZER_CHECK_DEADLOCKS
129 LockImpl(GET_CALLER_PC());
133 ALWAYS_INLINE
void Unlock() {
134 #if SANITIZER_CHECK_DEADLOCKS
139 // Checks that the current thread does not hold any mutexes
140 // (e.g. when returning from a runtime function to user code).
141 static void CheckNoLocks() {
142 #if SANITIZER_CHECK_DEADLOCKS
148 #if SANITIZER_CHECK_DEADLOCKS
149 const MutexType type_
;
151 void LockImpl(uptr pc
);
153 static void CheckNoLocksImpl();
157 // Reader-writer mutex.
158 // Derive from CheckedMutex for the purposes of EBO.
159 // We could make it a field marked with [[no_unique_address]],
160 // but this attribute is not supported by some older compilers.
161 class SANITIZER_MUTEX Mutex
: CheckedMutex
{
163 explicit constexpr Mutex(MutexType type
= MutexUnchecked
)
164 : CheckedMutex(type
) {}
166 void Lock() SANITIZER_ACQUIRE() {
167 CheckedMutex::Lock();
168 u64 reset_mask
= ~0ull;
169 u64 state
= atomic_load_relaxed(&state_
);
170 for (uptr spin_iters
= 0;; spin_iters
++) {
172 bool locked
= (state
& (kWriterLock
| kReaderLockMask
)) != 0;
173 if (LIKELY(!locked
)) {
174 // The mutex is not read-/write-locked, try to lock.
175 new_state
= (state
| kWriterLock
) & reset_mask
;
176 } else if (spin_iters
> kMaxSpinIters
) {
177 // We've spun enough, increment waiting writers count and block.
178 // The counter will be decremented by whoever wakes us.
179 new_state
= (state
+ kWaitingWriterInc
) & reset_mask
;
180 } else if ((state
& kWriterSpinWait
) == 0) {
181 // Active spinning, but denote our presence so that unlocking
182 // thread does not wake up other threads.
183 new_state
= state
| kWriterSpinWait
;
186 state
= atomic_load(&state_
, memory_order_relaxed
);
189 if (UNLIKELY(!atomic_compare_exchange_weak(&state_
, &state
, new_state
,
190 memory_order_acquire
)))
193 return; // We've locked the mutex.
194 if (spin_iters
> kMaxSpinIters
) {
195 // We've incremented waiting writers, so now block.
199 // We've set kWriterSpinWait, but we are still in active spinning.
201 // We either blocked and were unblocked,
202 // or we just spun but set kWriterSpinWait.
203 // Either way we need to reset kWriterSpinWait
204 // next time we take the lock or block again.
205 reset_mask
= ~kWriterSpinWait
;
206 state
= atomic_load(&state_
, memory_order_relaxed
);
207 DCHECK_NE(state
& kWriterSpinWait
, 0);
211 bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
212 u64 state
= atomic_load_relaxed(&state_
);
214 if (UNLIKELY(state
& (kWriterLock
| kReaderLockMask
)))
216 // The mutex is not read-/write-locked, try to lock.
217 if (LIKELY(atomic_compare_exchange_weak(
218 &state_
, &state
, state
| kWriterLock
, memory_order_acquire
))) {
219 CheckedMutex::Lock();
225 void Unlock() SANITIZER_RELEASE() {
226 CheckedMutex::Unlock();
230 u64 state
= atomic_load_relaxed(&state_
);
232 DCHECK_NE(state
& kWriterLock
, 0);
233 DCHECK_EQ(state
& kReaderLockMask
, 0);
234 new_state
= state
& ~kWriterLock
;
235 wake_writer
= (state
& (kWriterSpinWait
| kReaderSpinWait
)) == 0 &&
236 (state
& kWaitingWriterMask
) != 0;
238 new_state
= (new_state
- kWaitingWriterInc
) | kWriterSpinWait
;
240 wake_writer
|| (state
& kWriterSpinWait
) != 0
242 : ((state
& kWaitingReaderMask
) >> kWaitingReaderShift
);
244 new_state
= (new_state
& ~kWaitingReaderMask
) | kReaderSpinWait
;
245 } while (UNLIKELY(!atomic_compare_exchange_weak(&state_
, &state
, new_state
,
246 memory_order_release
)));
247 if (UNLIKELY(wake_writer
))
249 else if (UNLIKELY(wake_readers
))
250 readers_
.Post(wake_readers
);
253 void ReadLock() SANITIZER_ACQUIRE_SHARED() {
254 CheckedMutex::Lock();
255 u64 reset_mask
= ~0ull;
256 u64 state
= atomic_load_relaxed(&state_
);
257 for (uptr spin_iters
= 0;; spin_iters
++) {
258 bool locked
= (state
& kWriterLock
) != 0;
260 if (LIKELY(!locked
)) {
261 new_state
= (state
+ kReaderLockInc
) & reset_mask
;
262 } else if (spin_iters
> kMaxSpinIters
) {
263 new_state
= (state
+ kWaitingReaderInc
) & reset_mask
;
264 } else if ((state
& kReaderSpinWait
) == 0) {
265 // Active spinning, but denote our presence so that unlocking
266 // thread does not wake up other threads.
267 new_state
= state
| kReaderSpinWait
;
270 state
= atomic_load(&state_
, memory_order_relaxed
);
273 if (UNLIKELY(!atomic_compare_exchange_weak(&state_
, &state
, new_state
,
274 memory_order_acquire
)))
277 return; // We've locked the mutex.
278 if (spin_iters
> kMaxSpinIters
) {
279 // We've incremented waiting readers, so now block.
283 // We've set kReaderSpinWait, but we are still in active spinning.
285 reset_mask
= ~kReaderSpinWait
;
286 state
= atomic_load(&state_
, memory_order_relaxed
);
290 void ReadUnlock() SANITIZER_RELEASE_SHARED() {
291 CheckedMutex::Unlock();
294 u64 state
= atomic_load_relaxed(&state_
);
296 DCHECK_NE(state
& kReaderLockMask
, 0);
297 DCHECK_EQ(state
& kWriterLock
, 0);
298 new_state
= state
- kReaderLockInc
;
300 (kReaderLockMask
| kWriterSpinWait
| kReaderSpinWait
)) == 0 &&
301 (new_state
& kWaitingWriterMask
) != 0;
303 new_state
= (new_state
- kWaitingWriterInc
) | kWriterSpinWait
;
304 } while (UNLIKELY(!atomic_compare_exchange_weak(&state_
, &state
, new_state
,
305 memory_order_release
)));
310 // This function does not guarantee an explicit check that the calling thread
311 // is the thread which owns the mutex. This behavior, while more strictly
312 // correct, causes problems in cases like StopTheWorld, where a parent thread
313 // owns the mutex but a child checks that it is locked. Rather than
314 // maintaining complex state to work around those situations, the check only
315 // checks that the mutex is owned.
316 void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() {
317 CHECK(atomic_load(&state_
, memory_order_relaxed
) & kWriterLock
);
320 void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); }
322 void CheckReadLocked() const SANITIZER_CHECK_LOCKED() {
323 CHECK(atomic_load(&state_
, memory_order_relaxed
) & kReaderLockMask
);
327 atomic_uint64_t state_
= {0};
331 // The state has 3 counters:
332 // - number of readers holding the lock,
333 // if non zero, the mutex is read-locked
334 // - number of waiting readers,
335 // if not zero, the mutex is write-locked
336 // - number of waiting writers,
337 // if non zero, the mutex is read- or write-locked
340 // if set, the mutex is write-locked
341 // - a writer is awake and spin-waiting
342 // the flag is used to prevent thundering herd problem
343 // (new writers are not woken if this flag is set)
344 // - a reader is awake and spin-waiting
346 // Both writers and readers use active spinning before blocking.
347 // But readers are more aggressive and always take the mutex
348 // if there are any other readers.
349 // After wake up both writers and readers compete to lock the
350 // mutex again. This is needed to allow repeated locks even in presence
351 // of other blocked threads.
352 static constexpr u64 kCounterWidth
= 20;
353 static constexpr u64 kReaderLockShift
= 0;
354 static constexpr u64 kReaderLockInc
= 1ull << kReaderLockShift
;
355 static constexpr u64 kReaderLockMask
= ((1ull << kCounterWidth
) - 1)
357 static constexpr u64 kWaitingReaderShift
= kCounterWidth
;
358 static constexpr u64 kWaitingReaderInc
= 1ull << kWaitingReaderShift
;
359 static constexpr u64 kWaitingReaderMask
= ((1ull << kCounterWidth
) - 1)
360 << kWaitingReaderShift
;
361 static constexpr u64 kWaitingWriterShift
= 2 * kCounterWidth
;
362 static constexpr u64 kWaitingWriterInc
= 1ull << kWaitingWriterShift
;
363 static constexpr u64 kWaitingWriterMask
= ((1ull << kCounterWidth
) - 1)
364 << kWaitingWriterShift
;
365 static constexpr u64 kWriterLock
= 1ull << (3 * kCounterWidth
);
366 static constexpr u64 kWriterSpinWait
= 1ull << (3 * kCounterWidth
+ 1);
367 static constexpr u64 kReaderSpinWait
= 1ull << (3 * kCounterWidth
+ 2);
369 static constexpr uptr kMaxSpinIters
= 1500;
371 Mutex(LinkerInitialized
) = delete;
372 Mutex(const Mutex
&) = delete;
373 void operator=(const Mutex
&) = delete;
376 void FutexWait(atomic_uint32_t
*p
, u32 cmp
);
377 void FutexWake(atomic_uint32_t
*p
, u32 count
);
379 template <typename MutexType
>
380 class SANITIZER_SCOPED_LOCK GenericScopedLock
{
382 explicit GenericScopedLock(MutexType
*mu
) SANITIZER_ACQUIRE(mu
) : mu_(mu
) {
386 ~GenericScopedLock() SANITIZER_RELEASE() { mu_
->Unlock(); }
391 GenericScopedLock(const GenericScopedLock
&) = delete;
392 void operator=(const GenericScopedLock
&) = delete;
395 template <typename MutexType
>
396 class SANITIZER_SCOPED_LOCK GenericScopedReadLock
{
398 explicit GenericScopedReadLock(MutexType
*mu
) SANITIZER_ACQUIRE(mu
)
403 ~GenericScopedReadLock() SANITIZER_RELEASE() { mu_
->ReadUnlock(); }
408 GenericScopedReadLock(const GenericScopedReadLock
&) = delete;
409 void operator=(const GenericScopedReadLock
&) = delete;
412 template <typename MutexType
>
413 class SANITIZER_SCOPED_LOCK GenericScopedRWLock
{
415 ALWAYS_INLINE
explicit GenericScopedRWLock(MutexType
*mu
, bool write
)
416 SANITIZER_ACQUIRE(mu
)
417 : mu_(mu
), write_(write
) {
424 ALWAYS_INLINE
~GenericScopedRWLock() SANITIZER_RELEASE() {
435 GenericScopedRWLock(const GenericScopedRWLock
&) = delete;
436 void operator=(const GenericScopedRWLock
&) = delete;
439 typedef GenericScopedLock
<StaticSpinMutex
> SpinMutexLock
;
440 typedef GenericScopedLock
<Mutex
> Lock
;
441 typedef GenericScopedReadLock
<Mutex
> ReadLock
;
442 typedef GenericScopedRWLock
<Mutex
> RWLock
;
444 } // namespace __sanitizer
446 #endif // SANITIZER_MUTEX_H