1 //===-- sanitizer_mutex.cpp -----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_mutex.h"
15 #include "sanitizer_common.h"
17 namespace __sanitizer
{
19 void StaticSpinMutex::LockSlow() {
20 for (int i
= 0;; i
++) {
24 internal_sched_yield();
25 if (atomic_load(&state_
, memory_order_relaxed
) == 0 &&
26 atomic_exchange(&state_
, 1, memory_order_acquire
) == 0)
31 void Semaphore::Wait() {
32 u32 count
= atomic_load(&state_
, memory_order_relaxed
);
35 FutexWait(&state_
, 0);
36 count
= atomic_load(&state_
, memory_order_relaxed
);
39 if (atomic_compare_exchange_weak(&state_
, &count
, count
- 1,
40 memory_order_acquire
))
45 void Semaphore::Post(u32 count
) {
47 atomic_fetch_add(&state_
, count
, memory_order_release
);
48 FutexWake(&state_
, count
);
51 #if SANITIZER_CHECK_DEADLOCKS
52 // An empty mutex meta table, it effectively disables deadlock detection.
53 // Each tool can override the table to define own mutex hierarchy and
54 // enable deadlock detection.
55 // The table defines a static mutex type hierarchy (what mutex types can be locked
56 // under what mutex types). This table is checked to be acyclic and then
57 // actual mutex lock/unlock operations are checked to adhere to this hierarchy.
58 // The checking happens on mutex types rather than on individual mutex instances
59 // because doing it on mutex instances will both significantly complicate
60 // the implementation, worsen performance and memory overhead and is mostly
61 // unnecessary (we almost never lock multiple mutexes of the same type recursively).
62 static constexpr int kMutexTypeMax
= 20;
63 SANITIZER_WEAK_ATTRIBUTE MutexMeta mutex_meta
[kMutexTypeMax
] = {};
64 SANITIZER_WEAK_ATTRIBUTE
void PrintMutexPC(uptr pc
) {}
65 static StaticSpinMutex mutex_meta_mtx
;
66 static int mutex_type_count
= -1;
67 // Adjacency matrix of what mutexes can be locked under what mutexes.
68 static bool mutex_can_lock
[kMutexTypeMax
][kMutexTypeMax
];
69 // Mutex types with MutexMulti mark.
70 static bool mutex_multi
[kMutexTypeMax
];
72 void DebugMutexInit() {
73 // Build adjacency matrix.
74 bool leaf
[kMutexTypeMax
];
75 internal_memset(&leaf
, 0, sizeof(leaf
));
76 int cnt
[kMutexTypeMax
];
77 internal_memset(&cnt
, 0, sizeof(cnt
));
78 for (int t
= 0; t
< kMutexTypeMax
; t
++) {
80 if (!mutex_meta
[t
].name
)
82 CHECK_EQ(t
, mutex_meta
[t
].type
);
83 for (uptr j
= 0; j
< ARRAY_SIZE(mutex_meta
[t
].can_lock
); j
++) {
84 MutexType z
= mutex_meta
[t
].can_lock
[j
];
85 if (z
== MutexInvalid
)
92 if (z
== MutexMulti
) {
93 mutex_multi
[t
] = true;
96 CHECK_LT(z
, kMutexTypeMax
);
97 CHECK(!mutex_can_lock
[t
][z
]);
98 mutex_can_lock
[t
][z
] = true;
102 // Indicates the array is not properly terminated.
103 CHECK_LT(mutex_type_count
, kMutexTypeMax
);
105 for (int t
= 0; t
< mutex_type_count
; t
++) {
109 for (int z
= 0; z
< mutex_type_count
; z
++) {
110 if (z
== MutexInvalid
|| t
== z
|| leaf
[z
])
112 CHECK(!mutex_can_lock
[z
][t
]);
113 mutex_can_lock
[z
][t
] = true;
116 // Build the transitive closure and check that the graphs is acyclic.
117 u32 trans
[kMutexTypeMax
];
118 static_assert(sizeof(trans
[0]) * 8 >= kMutexTypeMax
,
119 "kMutexTypeMax does not fit into u32, switch to u64");
120 internal_memset(&trans
, 0, sizeof(trans
));
121 for (int i
= 0; i
< mutex_type_count
; i
++) {
122 for (int j
= 0; j
< mutex_type_count
; j
++)
123 if (mutex_can_lock
[i
][j
])
126 for (int k
= 0; k
< mutex_type_count
; k
++) {
127 for (int i
= 0; i
< mutex_type_count
; i
++) {
128 if (trans
[i
] & (1 << k
))
129 trans
[i
] |= trans
[k
];
132 for (int i
= 0; i
< mutex_type_count
; i
++) {
133 if (trans
[i
] & (1 << i
)) {
134 Printf("Mutex %s participates in a cycle\n", mutex_meta
[i
].name
);
140 struct InternalDeadlockDetector
{
148 LockDesc locked
[kMutexTypeMax
];
150 void Lock(MutexType type
, uptr pc
) {
151 if (!Initialize(type
))
153 CHECK_LT(type
, mutex_type_count
);
154 // Find the last locked mutex type.
155 // This is the type we will use for hierarchy checks.
157 MutexType max_idx
= MutexInvalid
;
158 for (int i
= 0; i
!= mutex_type_count
; i
++) {
159 if (locked
[i
].seq
== 0)
161 CHECK_NE(locked
[i
].seq
, max_seq
);
162 if (max_seq
< locked
[i
].seq
) {
163 max_seq
= locked
[i
].seq
;
164 max_idx
= (MutexType
)i
;
167 if (max_idx
== type
&& mutex_multi
[type
]) {
168 // Recursive lock of the same type.
169 CHECK_EQ(locked
[type
].seq
, max_seq
);
170 CHECK(locked
[type
].pc
);
171 locked
[type
].recursion
++;
174 if (max_idx
!= MutexInvalid
&& !mutex_can_lock
[max_idx
][type
]) {
175 Printf("%s: internal deadlock: can't lock %s under %s mutex\n", SanitizerToolName
,
176 mutex_meta
[type
].name
, mutex_meta
[max_idx
].name
);
177 PrintMutexPC(locked
[max_idx
].pc
);
180 locked
[type
].seq
= ++sequence
;
181 locked
[type
].pc
= pc
;
182 locked
[type
].recursion
= 1;
185 void Unlock(MutexType type
) {
186 if (!Initialize(type
))
188 CHECK_LT(type
, mutex_type_count
);
189 CHECK(locked
[type
].seq
);
190 CHECK_GT(locked
[type
].recursion
, 0);
191 if (--locked
[type
].recursion
)
193 locked
[type
].seq
= 0;
197 void CheckNoLocks() {
198 for (int i
= 0; i
< mutex_type_count
; i
++) CHECK_EQ(locked
[i
].recursion
, 0);
201 bool Initialize(MutexType type
) {
202 if (type
== MutexUnchecked
|| type
== MutexInvalid
)
204 CHECK_GT(type
, MutexInvalid
);
205 if (initialized
!= 0)
206 return initialized
> 0;
208 SpinMutexLock
lock(&mutex_meta_mtx
);
209 if (mutex_type_count
< 0)
211 initialized
= mutex_type_count
? 1 : -1;
212 return initialized
> 0;
216 static THREADLOCAL InternalDeadlockDetector deadlock_detector
;
218 void CheckedMutex::LockImpl(uptr pc
) { deadlock_detector
.Lock(type_
, pc
); }
220 void CheckedMutex::UnlockImpl() { deadlock_detector
.Unlock(type_
); }
222 void CheckedMutex::CheckNoLocksImpl() { deadlock_detector
.CheckNoLocks(); }
225 } // namespace __sanitizer