1 //===-- sanitizer_deadlock_detector2.cpp ----------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Deadlock detector implementation based on adjacency lists.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_deadlock_detector_interface.h"
14 #include "sanitizer_common.h"
15 #include "sanitizer_allocator_internal.h"
16 #include "sanitizer_placement_new.h"
17 #include "sanitizer_mutex.h"
19 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
21 namespace __sanitizer
{
23 const int kMaxNesting
= 64;
25 const u32 kEndId
= -2;
26 const int kMaxLink
= 8;
27 const int kL1Size
= 1024;
28 const int kL2Size
= 1024;
29 const int kMaxMutex
= kL1Size
* kL2Size
;
35 explicit Id(u32 id
= 0, u32 seq
= 0)
48 explicit Link(u32 id
= 0, u32 seq
= 0, u32 tid
= 0, u32 s0
= 0, u32 s1
= 0)
57 struct DDPhysicalThread
{
60 bool visited
[kMaxMutex
];
61 Link pending
[kMaxMutex
];
70 struct DDLogicalThread
{
72 ThreadMutex locked
[kMaxNesting
];
83 struct DD final
: public DDetector
{
84 explicit DD(const DDFlags
*flags
);
86 DDPhysicalThread
* CreatePhysicalThread();
87 void DestroyPhysicalThread(DDPhysicalThread
*pt
);
89 DDLogicalThread
* CreateLogicalThread(u64 ctx
);
90 void DestroyLogicalThread(DDLogicalThread
*lt
);
92 void MutexInit(DDCallback
*cb
, DDMutex
*m
);
93 void MutexBeforeLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
);
94 void MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
,
96 void MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
);
97 void MutexDestroy(DDCallback
*cb
, DDMutex
*m
);
99 DDReport
*GetReport(DDCallback
*cb
);
101 void CycleCheck(DDPhysicalThread
*pt
, DDLogicalThread
*lt
, DDMutex
*mtx
);
102 void Report(DDPhysicalThread
*pt
, DDLogicalThread
*lt
, int npath
);
103 u32
allocateId(DDCallback
*cb
);
104 MutexState
*getMutex(u32 id
);
105 u32
getMutexId(MutexState
*m
);
109 MutexState
*mutex
[kL1Size
];
112 InternalMmapVector
<u32
> free_id
;
116 DDetector
*DDetector::Create(const DDFlags
*flags
) {
118 void *mem
= MmapOrDie(sizeof(DD
), "deadlock detector");
119 return new(mem
) DD(flags
);
122 DD::DD(const DDFlags
*flags
) : flags(*flags
) { free_id
.reserve(1024); }
124 DDPhysicalThread
* DD::CreatePhysicalThread() {
125 DDPhysicalThread
*pt
= (DDPhysicalThread
*)MmapOrDie(sizeof(DDPhysicalThread
),
126 "deadlock detector (physical thread)");
130 void DD::DestroyPhysicalThread(DDPhysicalThread
*pt
) {
131 pt
->~DDPhysicalThread();
132 UnmapOrDie(pt
, sizeof(DDPhysicalThread
));
135 DDLogicalThread
* DD::CreateLogicalThread(u64 ctx
) {
136 DDLogicalThread
*lt
= (DDLogicalThread
*)InternalAlloc(
137 sizeof(DDLogicalThread
));
143 void DD::DestroyLogicalThread(DDLogicalThread
*lt
) {
144 lt
->~DDLogicalThread();
148 void DD::MutexInit(DDCallback
*cb
, DDMutex
*m
) {
149 VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb
->lt
->ctx
, m
);
152 atomic_store(&m
->owner
, 0, memory_order_relaxed
);
155 MutexState
*DD::getMutex(u32 id
) { return &mutex
[id
/ kL2Size
][id
% kL2Size
]; }
157 u32
DD::getMutexId(MutexState
*m
) {
158 for (int i
= 0; i
< kL1Size
; i
++) {
159 MutexState
*tab
= mutex
[i
];
162 if (m
>= tab
&& m
< tab
+ kL2Size
)
163 return i
* kL2Size
+ (m
- tab
);
168 u32
DD::allocateId(DDCallback
*cb
) {
170 SpinMutexLock
l(&mtx
);
171 if (free_id
.size() > 0) {
175 CHECK_LT(id_gen
, kMaxMutex
);
176 if ((id_gen
% kL2Size
) == 0) {
177 mutex
[id_gen
/ kL2Size
] = (MutexState
*)MmapOrDie(
178 kL2Size
* sizeof(MutexState
), "deadlock detector (mutex table)");
182 CHECK_LE(id
, kMaxMutex
);
183 VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb
->lt
->ctx
, id
);
187 void DD::MutexBeforeLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) {
188 VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
189 cb
->lt
->ctx
, m
, wlock
, cb
->lt
->nlocked
);
190 DDPhysicalThread
*pt
= cb
->pt
;
191 DDLogicalThread
*lt
= cb
->lt
;
193 uptr owner
= atomic_load(&m
->owner
, memory_order_relaxed
);
194 if (owner
== (uptr
)cb
->lt
) {
195 VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
200 CHECK_LE(lt
->nlocked
, kMaxNesting
);
202 // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
204 m
->id
= allocateId(cb
);
206 ThreadMutex
*tm
= <
->locked
[lt
->nlocked
++];
208 if (flags
.second_deadlock_stack
)
209 tm
->stk
= cb
->Unwind();
210 if (lt
->nlocked
== 1) {
211 VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
217 MutexState
*mtx
= getMutex(m
->id
);
218 for (int i
= 0; i
< lt
->nlocked
- 1; i
++) {
219 u32 id1
= lt
->locked
[i
].id
;
220 u32 stk1
= lt
->locked
[i
].stk
;
221 MutexState
*mtx1
= getMutex(id1
);
222 SpinMutexLock
l(&mtx1
->mtx
);
223 if (mtx1
->nlink
== kMaxLink
) {
224 // FIXME(dvyukov): check stale links
228 for (; li
< mtx1
->nlink
; li
++) {
229 Link
*link
= &mtx1
->link
[li
];
230 if (link
->id
== m
->id
) {
231 if (link
->seq
!= mtx
->seq
) {
232 link
->seq
= mtx
->seq
;
235 link
->stk1
= cb
->Unwind();
237 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
238 cb
->lt
->ctx
, getMutexId(mtx1
), m
->id
);
243 if (li
== mtx1
->nlink
) {
244 // FIXME(dvyukov): check stale links
245 Link
*link
= &mtx1
->link
[mtx1
->nlink
++];
247 link
->seq
= mtx
->seq
;
250 link
->stk1
= cb
->Unwind();
252 VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
253 cb
->lt
->ctx
, getMutexId(mtx1
), m
->id
);
257 if (!added
|| mtx
->nlink
== 0) {
258 VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
263 CycleCheck(pt
, lt
, m
);
266 void DD::MutexAfterLock(DDCallback
*cb
, DDMutex
*m
, bool wlock
,
268 VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
269 cb
->lt
->ctx
, m
, wlock
, trylock
, cb
->lt
->nlocked
);
270 DDLogicalThread
*lt
= cb
->lt
;
272 uptr owner
= atomic_load(&m
->owner
, memory_order_relaxed
);
273 if (owner
== (uptr
)cb
->lt
) {
274 VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb
->lt
->ctx
);
281 VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb
->lt
->ctx
);
282 CHECK_EQ(m
->recursion
, 0);
284 atomic_store(&m
->owner
, (uptr
)cb
->lt
, memory_order_relaxed
);
290 CHECK_LE(lt
->nlocked
, kMaxNesting
);
292 m
->id
= allocateId(cb
);
293 ThreadMutex
*tm
= <
->locked
[lt
->nlocked
++];
295 if (flags
.second_deadlock_stack
)
296 tm
->stk
= cb
->Unwind();
299 void DD::MutexBeforeUnlock(DDCallback
*cb
, DDMutex
*m
, bool wlock
) {
300 VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
301 cb
->lt
->ctx
, m
, wlock
, cb
->lt
->nlocked
);
302 DDLogicalThread
*lt
= cb
->lt
;
304 uptr owner
= atomic_load(&m
->owner
, memory_order_relaxed
);
305 if (owner
== (uptr
)cb
->lt
) {
306 VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb
->lt
->ctx
);
307 if (--m
->recursion
> 0)
309 VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb
->lt
->ctx
);
310 atomic_store(&m
->owner
, 0, memory_order_relaxed
);
312 CHECK_NE(m
->id
, kNoId
);
313 int last
= lt
->nlocked
- 1;
314 for (int i
= last
; i
>= 0; i
--) {
315 if (cb
->lt
->locked
[i
].id
== m
->id
) {
316 lt
->locked
[i
] = lt
->locked
[last
];
323 void DD::MutexDestroy(DDCallback
*cb
, DDMutex
*m
) {
324 VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
326 DDLogicalThread
*lt
= cb
->lt
;
331 // Remove the mutex from lt->locked if there.
332 int last
= lt
->nlocked
- 1;
333 for (int i
= last
; i
>= 0; i
--) {
334 if (lt
->locked
[i
].id
== m
->id
) {
335 lt
->locked
[i
] = lt
->locked
[last
];
341 // Clear and invalidate the mutex descriptor.
343 MutexState
*mtx
= getMutex(m
->id
);
344 SpinMutexLock
l(&mtx
->mtx
);
349 // Return id to cache.
351 SpinMutexLock
l(&mtx
);
352 free_id
.push_back(m
->id
);
356 void DD::CycleCheck(DDPhysicalThread
*pt
, DDLogicalThread
*lt
,
358 internal_memset(pt
->visited
, 0, sizeof(pt
->visited
));
362 MutexState
*mtx
= getMutex(m
->id
);
363 SpinMutexLock
l(&mtx
->mtx
);
364 for (int li
= 0; li
< mtx
->nlink
; li
++)
365 pt
->pending
[npending
++] = mtx
->link
[li
];
367 while (npending
> 0) {
368 Link link
= pt
->pending
[--npending
];
369 if (link
.id
== kEndId
) {
373 if (pt
->visited
[link
.id
])
375 MutexState
*mtx1
= getMutex(link
.id
);
376 SpinMutexLock
l(&mtx1
->mtx
);
377 if (mtx1
->seq
!= link
.seq
)
379 pt
->visited
[link
.id
] = true;
380 if (mtx1
->nlink
== 0)
382 pt
->path
[npath
++] = link
;
383 pt
->pending
[npending
++] = Link(kEndId
);
384 if (link
.id
== m
->id
)
385 return Report(pt
, lt
, npath
); // Bingo!
386 for (int li
= 0; li
< mtx1
->nlink
; li
++) {
387 Link
*link1
= &mtx1
->link
[li
];
388 // MutexState *mtx2 = getMutex(link->id);
389 // FIXME(dvyukov): fast seq check
390 // FIXME(dvyukov): fast nlink != 0 check
391 // FIXME(dvyukov): fast pending check?
392 // FIXME(dvyukov): npending can be larger than kMaxMutex
393 pt
->pending
[npending
++] = *link1
;
398 void DD::Report(DDPhysicalThread
*pt
, DDLogicalThread
*lt
, int npath
) {
399 DDReport
*rep
= &pt
->rep
;
401 for (int i
= 0; i
< npath
; i
++) {
402 Link
*link
= &pt
->path
[i
];
403 Link
*link0
= &pt
->path
[i
? i
- 1 : npath
- 1];
404 rep
->loop
[i
].thr_ctx
= link
->tid
;
405 rep
->loop
[i
].mtx_ctx0
= link0
->id
;
406 rep
->loop
[i
].mtx_ctx1
= link
->id
;
407 rep
->loop
[i
].stk
[0] = flags
.second_deadlock_stack
? link
->stk0
: 0;
408 rep
->loop
[i
].stk
[1] = link
->stk1
;
410 pt
->report_pending
= true;
413 DDReport
*DD::GetReport(DDCallback
*cb
) {
414 if (!cb
->pt
->report_pending
)
416 cb
->pt
->report_pending
= false;
420 } // namespace __sanitizer
421 #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2