1 //===-- tsan_rtl_report.cpp -----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_common.h"
14 #include "sanitizer_common/sanitizer_libc.h"
15 #include "sanitizer_common/sanitizer_placement_new.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_stacktrace.h"
19 #include "tsan_flags.h"
20 #include "tsan_mman.h"
21 #include "tsan_platform.h"
22 #include "tsan_report.h"
24 #include "tsan_suppressions.h"
25 #include "tsan_symbolize.h"
26 #include "tsan_sync.h"
30 using namespace __sanitizer
;
32 static ReportStack
*SymbolizeStack(StackTrace trace
);
34 // Can be overriden by an application/test to intercept reports.
35 #ifdef TSAN_EXTERNAL_HOOKS
36 bool OnReport(const ReportDesc
*rep
, bool suppressed
);
38 SANITIZER_WEAK_CXX_DEFAULT_IMPL
39 bool OnReport(const ReportDesc
*rep
, bool suppressed
) {
45 SANITIZER_WEAK_DEFAULT_IMPL
46 void __tsan_on_report(const ReportDesc
*rep
) {
50 static void StackStripMain(SymbolizedStack
*frames
) {
51 SymbolizedStack
*last_frame
= nullptr;
52 SymbolizedStack
*last_frame2
= nullptr;
53 for (SymbolizedStack
*cur
= frames
; cur
; cur
= cur
->next
) {
54 last_frame2
= last_frame
;
61 const char *last
= last_frame
->info
.function
;
62 const char *last2
= last_frame2
->info
.function
;
63 // Strip frame above 'main'
64 if (last2
&& 0 == internal_strcmp(last2
, "main")) {
65 last_frame
->ClearAll();
66 last_frame2
->next
= nullptr;
67 // Strip our internal thread start routine.
68 } else if (last
&& 0 == internal_strcmp(last
, "__tsan_thread_start_func")) {
69 last_frame
->ClearAll();
70 last_frame2
->next
= nullptr;
71 // Strip global ctors init, .preinit_array and main caller.
72 } else if (last
&& (0 == internal_strcmp(last
, "__do_global_ctors_aux") ||
73 0 == internal_strcmp(last
, "__libc_csu_init") ||
74 0 == internal_strcmp(last
, "__libc_start_main"))) {
75 last_frame
->ClearAll();
76 last_frame2
->next
= nullptr;
77 // If both are 0, then we probably just failed to symbolize.
78 } else if (last
|| last2
) {
79 // Ensure that we recovered stack completely. Trimmed stack
80 // can actually happen if we do not instrument some code,
81 // so it's only a debug print. However we must try hard to not miss it
83 DPrintf("Bottom stack frame is missed\n");
86 // The last frame always point into runtime (gosched0, goexit0, runtime.main).
87 last_frame
->ClearAll();
88 last_frame2
->next
= nullptr;
92 ReportStack
*SymbolizeStackId(u32 stack_id
) {
95 StackTrace stack
= StackDepotGet(stack_id
);
96 if (stack
.trace
== nullptr)
98 return SymbolizeStack(stack
);
101 static ReportStack
*SymbolizeStack(StackTrace trace
) {
104 SymbolizedStack
*top
= nullptr;
105 for (uptr si
= 0; si
< trace
.size
; si
++) {
106 const uptr pc
= trace
.trace
[si
];
108 // We obtain the return address, but we're interested in the previous
110 if ((pc
& kExternalPCBit
) == 0)
111 pc1
= StackTrace::GetPreviousInstructionPc(pc
);
112 SymbolizedStack
*ent
= SymbolizeCode(pc1
);
114 SymbolizedStack
*last
= ent
;
116 last
->info
.address
= pc
; // restore original pc for report
119 last
->info
.address
= pc
; // restore original pc for report
125 auto *stack
= New
<ReportStack
>();
130 bool ShouldReport(ThreadState
*thr
, ReportType typ
) {
131 // We set thr->suppress_reports in the fork context.
132 // Taking any locking in the fork context can lead to deadlocks.
133 // If any locks are already taken, it's too late to do this check.
134 CheckedMutex::CheckNoLocks();
135 // For the same reason check we didn't lock thread_registry yet.
137 ThreadRegistryLock
l(&ctx
->thread_registry
);
138 if (!flags()->report_bugs
|| thr
->suppress_reports
)
141 case ReportTypeSignalUnsafe
:
142 return flags()->report_signal_unsafe
;
143 case ReportTypeThreadLeak
:
145 // It's impossible to join phantom threads
146 // in the child after fork.
147 if (ctx
->after_multithreaded_fork
)
150 return flags()->report_thread_leaks
;
151 case ReportTypeMutexDestroyLocked
:
152 return flags()->report_destroy_locked
;
158 ScopedReportBase::ScopedReportBase(ReportType typ
, uptr tag
) {
159 ctx
->thread_registry
.CheckLocked();
160 rep_
= New
<ReportDesc
>();
163 ctx
->report_mtx
.Lock();
166 ScopedReportBase::~ScopedReportBase() {
167 ctx
->report_mtx
.Unlock();
168 DestroyAndFree(rep_
);
171 void ScopedReportBase::AddStack(StackTrace stack
, bool suppressable
) {
172 ReportStack
**rs
= rep_
->stacks
.PushBack();
173 *rs
= SymbolizeStack(stack
);
174 (*rs
)->suppressable
= suppressable
;
177 void ScopedReportBase::AddMemoryAccess(uptr addr
, uptr external_tag
, Shadow s
,
178 Tid tid
, StackTrace stack
,
179 const MutexSet
*mset
) {
182 s
.GetAccess(&addr0
, &size
, &typ
);
183 auto *mop
= New
<ReportMop
>();
184 rep_
->mops
.PushBack(mop
);
186 mop
->addr
= addr
+ addr0
;
188 mop
->write
= !(typ
& kAccessRead
);
189 mop
->atomic
= typ
& kAccessAtomic
;
190 mop
->stack
= SymbolizeStack(stack
);
191 mop
->external_tag
= external_tag
;
193 mop
->stack
->suppressable
= true;
194 for (uptr i
= 0; i
< mset
->Size(); i
++) {
195 MutexSet::Desc d
= mset
->Get(i
);
196 int id
= this->AddMutex(d
.addr
, d
.stack_id
);
197 ReportMopMutex mtx
= {id
, d
.write
};
198 mop
->mset
.PushBack(mtx
);
202 void ScopedReportBase::AddUniqueTid(Tid unique_tid
) {
203 rep_
->unique_tids
.PushBack(unique_tid
);
206 void ScopedReportBase::AddThread(const ThreadContext
*tctx
, bool suppressable
) {
207 for (uptr i
= 0; i
< rep_
->threads
.Size(); i
++) {
208 if ((u32
)rep_
->threads
[i
]->id
== tctx
->tid
)
211 auto *rt
= New
<ReportThread
>();
212 rep_
->threads
.PushBack(rt
);
214 rt
->os_id
= tctx
->os_id
;
215 rt
->running
= (tctx
->status
== ThreadStatusRunning
);
216 rt
->name
= internal_strdup(tctx
->name
);
217 rt
->parent_tid
= tctx
->parent_tid
;
218 rt
->thread_type
= tctx
->thread_type
;
220 rt
->stack
= SymbolizeStackId(tctx
->creation_stack_id
);
222 rt
->stack
->suppressable
= suppressable
;
226 static ThreadContext
*FindThreadByTidLocked(Tid tid
) {
227 ctx
->thread_registry
.CheckLocked();
228 return static_cast<ThreadContext
*>(
229 ctx
->thread_registry
.GetThreadLocked(tid
));
232 static bool IsInStackOrTls(ThreadContextBase
*tctx_base
, void *arg
) {
233 uptr addr
= (uptr
)arg
;
234 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
235 if (tctx
->status
!= ThreadStatusRunning
)
237 ThreadState
*thr
= tctx
->thr
;
239 return ((addr
>= thr
->stk_addr
&& addr
< thr
->stk_addr
+ thr
->stk_size
) ||
240 (addr
>= thr
->tls_addr
&& addr
< thr
->tls_addr
+ thr
->tls_size
));
243 ThreadContext
*IsThreadStackOrTls(uptr addr
, bool *is_stack
) {
244 ctx
->thread_registry
.CheckLocked();
245 ThreadContext
*tctx
=
246 static_cast<ThreadContext
*>(ctx
->thread_registry
.FindThreadContextLocked(
247 IsInStackOrTls
, (void *)addr
));
250 ThreadState
*thr
= tctx
->thr
;
252 *is_stack
= (addr
>= thr
->stk_addr
&& addr
< thr
->stk_addr
+ thr
->stk_size
);
257 void ScopedReportBase::AddThread(Tid tid
, bool suppressable
) {
259 if (const ThreadContext
*tctx
= FindThreadByTidLocked(tid
))
260 AddThread(tctx
, suppressable
);
264 int ScopedReportBase::AddMutex(uptr addr
, StackID creation_stack_id
) {
265 for (uptr i
= 0; i
< rep_
->mutexes
.Size(); i
++) {
266 if (rep_
->mutexes
[i
]->addr
== addr
)
267 return rep_
->mutexes
[i
]->id
;
269 auto *rm
= New
<ReportMutex
>();
270 rep_
->mutexes
.PushBack(rm
);
271 rm
->id
= rep_
->mutexes
.Size() - 1;
273 rm
->stack
= SymbolizeStackId(creation_stack_id
);
277 void ScopedReportBase::AddLocation(uptr addr
, uptr size
) {
282 Tid creat_tid
= kInvalidTid
;
283 StackID creat_stack
= 0;
285 if (FdLocation(addr
, &fd
, &creat_tid
, &creat_stack
, &closed
)) {
286 auto *loc
= New
<ReportLocation
>();
287 loc
->type
= ReportLocationFD
;
288 loc
->fd_closed
= closed
;
290 loc
->tid
= creat_tid
;
291 loc
->stack
= SymbolizeStackId(creat_stack
);
292 rep_
->locs
.PushBack(loc
);
293 AddThread(creat_tid
);
297 uptr block_begin
= 0;
298 Allocator
*a
= allocator();
299 if (a
->PointerIsMine((void*)addr
)) {
300 block_begin
= (uptr
)a
->GetBlockBegin((void *)addr
);
302 b
= ctx
->metamap
.GetBlock(block_begin
);
305 b
= JavaHeapBlock(addr
, &block_begin
);
307 auto *loc
= New
<ReportLocation
>();
308 loc
->type
= ReportLocationHeap
;
309 loc
->heap_chunk_start
= block_begin
;
310 loc
->heap_chunk_size
= b
->siz
;
311 loc
->external_tag
= b
->tag
;
313 loc
->stack
= SymbolizeStackId(b
->stk
);
314 rep_
->locs
.PushBack(loc
);
318 bool is_stack
= false;
319 if (ThreadContext
*tctx
= IsThreadStackOrTls(addr
, &is_stack
)) {
320 auto *loc
= New
<ReportLocation
>();
321 loc
->type
= is_stack
? ReportLocationStack
: ReportLocationTLS
;
322 loc
->tid
= tctx
->tid
;
323 rep_
->locs
.PushBack(loc
);
327 if (ReportLocation
*loc
= SymbolizeData(addr
)) {
328 loc
->suppressable
= true;
329 rep_
->locs
.PushBack(loc
);
335 void ScopedReportBase::AddSleep(StackID stack_id
) {
336 rep_
->sleep
= SymbolizeStackId(stack_id
);
340 void ScopedReportBase::SetCount(int count
) { rep_
->count
= count
; }
342 void ScopedReportBase::SetSigNum(int sig
) { rep_
->signum
= sig
; }
344 const ReportDesc
*ScopedReportBase::GetReport() const { return rep_
; }
346 ScopedReport::ScopedReport(ReportType typ
, uptr tag
)
347 : ScopedReportBase(typ
, tag
) {}
349 ScopedReport::~ScopedReport() {}
351 // Replays the trace up to last_pos position in the last part
352 // or up to the provided epoch/sid (whichever is earlier)
353 // and calls the provided function f for each event.
354 template <typename Func
>
355 void TraceReplay(Trace
*trace
, TracePart
*last
, Event
*last_pos
, Sid sid
,
356 Epoch epoch
, Func f
) {
357 TracePart
*part
= trace
->parts
.Front();
358 Sid ev_sid
= kFreeSid
;
359 Epoch ev_epoch
= kEpochOver
;
361 DCHECK_EQ(part
->trace
, trace
);
362 // Note: an event can't start in the last element.
363 // Since an event can take up to 2 elements,
364 // we ensure we have at least 2 before adding an event.
365 Event
*end
= &part
->events
[TracePart::kSize
- 1];
368 f(kFreeSid
, kEpochOver
, nullptr); // notify about part start
369 for (Event
*evp
= &part
->events
[0]; evp
< end
; evp
++) {
371 if (!evp
->is_access
&& !evp
->is_func
) {
373 case EventType::kTime
: {
374 auto *ev
= reinterpret_cast<EventTime
*>(evp
);
375 ev_sid
= static_cast<Sid
>(ev
->sid
);
376 ev_epoch
= static_cast<Epoch
>(ev
->epoch
);
377 if (ev_sid
== sid
&& ev_epoch
> epoch
)
381 case EventType::kAccessExt
:
383 case EventType::kAccessRange
:
385 case EventType::kLock
:
387 case EventType::kRLock
:
388 // These take 2 Event elements.
391 case EventType::kUnlock
:
392 // This takes 1 Event element.
396 CHECK_NE(ev_sid
, kFreeSid
);
397 CHECK_NE(ev_epoch
, kEpochOver
);
398 f(ev_sid
, ev_epoch
, evp0
);
402 part
= trace
->parts
.Next(part
);
408 static void RestoreStackMatch(VarSizeStackTrace
*pstk
, MutexSet
*pmset
,
409 Vector
<uptr
> *stack
, MutexSet
*mset
, uptr pc
,
411 DPrintf2(" MATCHED\n");
414 pstk
->Init(&(*stack
)[0], stack
->Size());
419 // Checks if addr1|size1 is fully contained in addr2|size2.
420 // We check for fully contained instread of just overlapping
421 // because a memory access is always traced once, but can be
422 // split into multiple accesses in the shadow.
423 static constexpr bool IsWithinAccess(uptr addr1
, uptr size1
, uptr addr2
,
425 return addr1
>= addr2
&& addr1
+ size1
<= addr2
+ size2
;
428 // Replays the trace of slot sid up to the target event identified
429 // by epoch/addr/size/typ and restores and returns tid, stack, mutex set
430 // and tag for that event. If there are multiple such events, it returns
431 // the last one. Returns false if the event is not present in the trace.
432 bool RestoreStack(EventType type
, Sid sid
, Epoch epoch
, uptr addr
, uptr size
,
433 AccessType typ
, Tid
*ptid
, VarSizeStackTrace
*pstk
,
434 MutexSet
*pmset
, uptr
*ptag
) {
435 // This function restores stack trace and mutex set for the thread/epoch.
436 // It does so by getting stack trace and mutex set at the beginning of
437 // trace part, and then replaying the trace till the given epoch.
438 DPrintf2("RestoreStack: sid=%u@%u addr=0x%zx/%zu typ=%x\n",
439 static_cast<int>(sid
), static_cast<int>(epoch
), addr
, size
,
440 static_cast<int>(typ
));
441 ctx
->slot_mtx
.CheckLocked(); // needed to prevent trace part recycling
442 ctx
->thread_registry
.CheckLocked();
443 TidSlot
*slot
= &ctx
->slots
[static_cast<uptr
>(sid
)];
444 Tid tid
= kInvalidTid
;
445 // Need to lock the slot mutex as it protects slot->journal.
446 slot
->mtx
.CheckLocked();
447 for (uptr i
= 0; i
< slot
->journal
.Size(); i
++) {
448 DPrintf2(" journal: epoch=%d tid=%d\n",
449 static_cast<int>(slot
->journal
[i
].epoch
), slot
->journal
[i
].tid
);
450 if (i
== slot
->journal
.Size() - 1 || slot
->journal
[i
+ 1].epoch
> epoch
) {
451 tid
= slot
->journal
[i
].tid
;
455 if (tid
== kInvalidTid
)
458 ThreadContext
*tctx
=
459 static_cast<ThreadContext
*>(ctx
->thread_registry
.GetThreadLocked(tid
));
460 Trace
*trace
= &tctx
->trace
;
461 // Snapshot first/last parts and the current position in the last part.
462 TracePart
*first_part
;
463 TracePart
*last_part
;
466 Lock
lock(&trace
->mtx
);
467 first_part
= trace
->parts
.Front();
469 DPrintf2("RestoreStack: tid=%d trace=%p no trace parts\n", tid
, trace
);
472 last_part
= trace
->parts
.Back();
473 last_pos
= trace
->final_pos
;
475 last_pos
= (Event
*)atomic_load_relaxed(&tctx
->thr
->trace_pos
);
477 DynamicMutexSet mset
;
481 bool is_read
= typ
& kAccessRead
;
482 bool is_atomic
= typ
& kAccessAtomic
;
483 bool is_free
= typ
& kAccessFree
;
484 DPrintf2("RestoreStack: tid=%d parts=[%p-%p] last_pos=%p\n", tid
,
485 trace
->parts
.Front(), last_part
, last_pos
);
487 trace
, last_part
, last_pos
, sid
, epoch
,
488 [&](Sid ev_sid
, Epoch ev_epoch
, Event
*evp
) {
489 if (evp
== nullptr) {
490 // Each trace part is self-consistent, so we reset state.
496 bool match
= ev_sid
== sid
&& ev_epoch
== epoch
;
497 if (evp
->is_access
) {
498 if (evp
->is_func
== 0 && evp
->type
== EventType::kAccessExt
&&
499 evp
->_
== 0) // NopEvent
501 auto *ev
= reinterpret_cast<EventAccess
*>(evp
);
502 uptr ev_addr
= RestoreAddr(ev
->addr
);
503 uptr ev_size
= 1 << ev
->size_log
;
505 prev_pc
+ ev
->pc_delta
- (1 << (EventAccess::kPCBits
- 1));
507 DPrintf2(" Access: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc
,
508 ev_addr
, ev_size
, ev
->is_read
, ev
->is_atomic
);
509 if (match
&& type
== EventType::kAccessExt
&&
510 IsWithinAccess(addr
, size
, ev_addr
, ev_size
) &&
511 is_read
== ev
->is_read
&& is_atomic
== ev
->is_atomic
&& !is_free
)
512 RestoreStackMatch(pstk
, pmset
, &stack
, mset
, ev_pc
, &found
);
516 auto *ev
= reinterpret_cast<EventFunc
*>(evp
);
518 DPrintf2(" FuncEnter: pc=0x%llx\n", ev
->pc
);
519 stack
.PushBack(ev
->pc
);
521 DPrintf2(" FuncExit\n");
522 // We don't log pathologically large stacks in each part,
523 // if the stack was truncated we can have more func exits than
531 case EventType::kAccessExt
: {
532 auto *ev
= reinterpret_cast<EventAccessExt
*>(evp
);
533 uptr ev_addr
= RestoreAddr(ev
->addr
);
534 uptr ev_size
= 1 << ev
->size_log
;
536 DPrintf2(" AccessExt: pc=0x%llx addr=0x%zx/%zu type=%u/%u\n",
537 ev
->pc
, ev_addr
, ev_size
, ev
->is_read
, ev
->is_atomic
);
538 if (match
&& type
== EventType::kAccessExt
&&
539 IsWithinAccess(addr
, size
, ev_addr
, ev_size
) &&
540 is_read
== ev
->is_read
&& is_atomic
== ev
->is_atomic
&&
542 RestoreStackMatch(pstk
, pmset
, &stack
, mset
, ev
->pc
, &found
);
545 case EventType::kAccessRange
: {
546 auto *ev
= reinterpret_cast<EventAccessRange
*>(evp
);
547 uptr ev_addr
= RestoreAddr(ev
->addr
);
549 (ev
->size_hi
<< EventAccessRange::kSizeLoBits
) + ev
->size_lo
;
550 uptr ev_pc
= RestoreAddr(ev
->pc
);
552 DPrintf2(" Range: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc
,
553 ev_addr
, ev_size
, ev
->is_read
, ev
->is_free
);
554 if (match
&& type
== EventType::kAccessExt
&&
555 IsWithinAccess(addr
, size
, ev_addr
, ev_size
) &&
556 is_read
== ev
->is_read
&& !is_atomic
&& is_free
== ev
->is_free
)
557 RestoreStackMatch(pstk
, pmset
, &stack
, mset
, ev_pc
, &found
);
560 case EventType::kLock
:
562 case EventType::kRLock
: {
563 auto *ev
= reinterpret_cast<EventLock
*>(evp
);
564 bool is_write
= ev
->type
== EventType::kLock
;
565 uptr ev_addr
= RestoreAddr(ev
->addr
);
566 uptr ev_pc
= RestoreAddr(ev
->pc
);
568 (ev
->stack_hi
<< EventLock::kStackIDLoBits
) + ev
->stack_lo
;
569 DPrintf2(" Lock: pc=0x%zx addr=0x%zx stack=%u write=%d\n", ev_pc
,
570 ev_addr
, stack_id
, is_write
);
571 mset
->AddAddr(ev_addr
, stack_id
, is_write
);
572 // Events with ev_pc == 0 are written to the beginning of trace
573 // part as initial mutex set (are not real).
574 if (match
&& type
== EventType::kLock
&& addr
== ev_addr
&& ev_pc
)
575 RestoreStackMatch(pstk
, pmset
, &stack
, mset
, ev_pc
, &found
);
578 case EventType::kUnlock
: {
579 auto *ev
= reinterpret_cast<EventUnlock
*>(evp
);
580 uptr ev_addr
= RestoreAddr(ev
->addr
);
581 DPrintf2(" Unlock: addr=0x%zx\n", ev_addr
);
582 mset
->DelAddr(ev_addr
);
585 case EventType::kTime
:
586 // TraceReplay already extracted sid/epoch from it,
587 // nothing else to do here.
591 ExtractTagFromStack(pstk
, ptag
);
595 bool RacyStacks::operator==(const RacyStacks
&other
) const {
596 if (hash
[0] == other
.hash
[0] && hash
[1] == other
.hash
[1])
598 if (hash
[0] == other
.hash
[1] && hash
[1] == other
.hash
[0])
603 static bool FindRacyStacks(const RacyStacks
&hash
) {
604 for (uptr i
= 0; i
< ctx
->racy_stacks
.Size(); i
++) {
605 if (hash
== ctx
->racy_stacks
[i
]) {
606 VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n");
613 static bool HandleRacyStacks(ThreadState
*thr
, VarSizeStackTrace traces
[2]) {
614 if (!flags()->suppress_equal_stacks
)
617 hash
.hash
[0] = md5_hash(traces
[0].trace
, traces
[0].size
* sizeof(uptr
));
618 hash
.hash
[1] = md5_hash(traces
[1].trace
, traces
[1].size
* sizeof(uptr
));
620 ReadLock
lock(&ctx
->racy_mtx
);
621 if (FindRacyStacks(hash
))
624 Lock
lock(&ctx
->racy_mtx
);
625 if (FindRacyStacks(hash
))
627 ctx
->racy_stacks
.PushBack(hash
);
631 bool OutputReport(ThreadState
*thr
, const ScopedReport
&srep
) {
632 // These should have been checked in ShouldReport.
633 // It's too late to check them here, we have already taken locks.
634 CHECK(flags()->report_bugs
);
635 CHECK(!thr
->suppress_reports
);
636 atomic_store_relaxed(&ctx
->last_symbolize_time_ns
, NanoTime());
637 const ReportDesc
*rep
= srep
.GetReport();
638 CHECK_EQ(thr
->current_report
, nullptr);
639 thr
->current_report
= rep
;
640 Suppression
*supp
= 0;
642 for (uptr i
= 0; pc_or_addr
== 0 && i
< rep
->mops
.Size(); i
++)
643 pc_or_addr
= IsSuppressed(rep
->typ
, rep
->mops
[i
]->stack
, &supp
);
644 for (uptr i
= 0; pc_or_addr
== 0 && i
< rep
->stacks
.Size(); i
++)
645 pc_or_addr
= IsSuppressed(rep
->typ
, rep
->stacks
[i
], &supp
);
646 for (uptr i
= 0; pc_or_addr
== 0 && i
< rep
->threads
.Size(); i
++)
647 pc_or_addr
= IsSuppressed(rep
->typ
, rep
->threads
[i
]->stack
, &supp
);
648 for (uptr i
= 0; pc_or_addr
== 0 && i
< rep
->locs
.Size(); i
++)
649 pc_or_addr
= IsSuppressed(rep
->typ
, rep
->locs
[i
], &supp
);
650 if (pc_or_addr
!= 0) {
651 Lock
lock(&ctx
->fired_suppressions_mtx
);
652 FiredSuppression s
= {srep
.GetReport()->typ
, pc_or_addr
, supp
};
653 ctx
->fired_suppressions
.push_back(s
);
656 bool suppressed
= OnReport(rep
, pc_or_addr
!= 0);
658 thr
->current_report
= nullptr;
663 __tsan_on_report(rep
);
665 if (flags()->halt_on_error
)
667 thr
->current_report
= nullptr;
671 bool IsFiredSuppression(Context
*ctx
, ReportType type
, StackTrace trace
) {
672 ReadLock
lock(&ctx
->fired_suppressions_mtx
);
673 for (uptr k
= 0; k
< ctx
->fired_suppressions
.size(); k
++) {
674 if (ctx
->fired_suppressions
[k
].type
!= type
)
676 for (uptr j
= 0; j
< trace
.size
; j
++) {
677 FiredSuppression
*s
= &ctx
->fired_suppressions
[k
];
678 if (trace
.trace
[j
] == s
->pc_or_addr
) {
680 atomic_fetch_add(&s
->supp
->hit_count
, 1, memory_order_relaxed
);
688 static bool IsFiredSuppression(Context
*ctx
, ReportType type
, uptr addr
) {
689 ReadLock
lock(&ctx
->fired_suppressions_mtx
);
690 for (uptr k
= 0; k
< ctx
->fired_suppressions
.size(); k
++) {
691 if (ctx
->fired_suppressions
[k
].type
!= type
)
693 FiredSuppression
*s
= &ctx
->fired_suppressions
[k
];
694 if (addr
== s
->pc_or_addr
) {
696 atomic_fetch_add(&s
->supp
->hit_count
, 1, memory_order_relaxed
);
703 static bool SpuriousRace(Shadow old
) {
704 Shadow
last(LoadShadow(&ctx
->last_spurious_race
));
705 return last
.sid() == old
.sid() && last
.epoch() == old
.epoch();
708 void ReportRace(ThreadState
*thr
, RawShadow
*shadow_mem
, Shadow cur
, Shadow old
,
710 CheckedMutex::CheckNoLocks();
712 // Symbolizer makes lots of intercepted calls. If we try to process them,
713 // at best it will cause deadlocks on internal mutexes.
714 ScopedIgnoreInterceptors ignore
;
716 uptr addr
= ShadowToMem(shadow_mem
);
717 DPrintf("#%d: ReportRace %p\n", thr
->tid
, (void *)addr
);
718 if (!ShouldReport(thr
, ReportTypeRace
))
720 uptr addr_off0
, size0
;
721 cur
.GetAccess(&addr_off0
, &size0
, nullptr);
722 uptr addr_off1
, size1
, typ1
;
723 old
.GetAccess(&addr_off1
, &size1
, &typ1
);
724 if (!flags()->report_atomic_races
&&
725 ((typ0
& kAccessAtomic
) || (typ1
& kAccessAtomic
)) &&
726 !(typ0
& kAccessFree
) && !(typ1
& kAccessFree
))
728 if (SpuriousRace(old
))
732 Shadow s
[kMop
] = {cur
, old
};
733 uptr addr0
= addr
+ addr_off0
;
734 uptr addr1
= addr
+ addr_off1
;
735 uptr end0
= addr0
+ size0
;
736 uptr end1
= addr1
+ size1
;
737 uptr addr_min
= min(addr0
, addr1
);
738 uptr addr_max
= max(end0
, end1
);
739 if (IsExpectedReport(addr_min
, addr_max
- addr_min
))
742 ReportType rep_typ
= ReportTypeRace
;
743 if ((typ0
& kAccessVptr
) && (typ1
& kAccessFree
))
744 rep_typ
= ReportTypeVptrUseAfterFree
;
745 else if (typ0
& kAccessVptr
)
746 rep_typ
= ReportTypeVptrRace
;
747 else if (typ1
& kAccessFree
)
748 rep_typ
= ReportTypeUseAfterFree
;
750 if (IsFiredSuppression(ctx
, rep_typ
, addr
))
753 VarSizeStackTrace traces
[kMop
];
754 Tid tids
[kMop
] = {thr
->tid
, kInvalidTid
};
755 uptr tags
[kMop
] = {kExternalTagNone
, kExternalTagNone
};
757 ObtainCurrentStack(thr
, thr
->trace_prev_pc
, &traces
[0], &tags
[0]);
758 if (IsFiredSuppression(ctx
, rep_typ
, traces
[0]))
761 DynamicMutexSet mset1
;
762 MutexSet
*mset
[kMop
] = {&thr
->mset
, mset1
};
764 // We need to lock the slot during RestoreStack because it protects
766 Lock
slot_lock(&ctx
->slots
[static_cast<uptr
>(s
[1].sid())].mtx
);
767 ThreadRegistryLock
l0(&ctx
->thread_registry
);
768 Lock
slots_lock(&ctx
->slot_mtx
);
769 if (SpuriousRace(old
))
771 if (!RestoreStack(EventType::kAccessExt
, s
[1].sid(), s
[1].epoch(), addr1
,
772 size1
, typ1
, &tids
[1], &traces
[1], mset
[1], &tags
[1])) {
773 StoreShadow(&ctx
->last_spurious_race
, old
.raw());
777 if (IsFiredSuppression(ctx
, rep_typ
, traces
[1]))
780 if (HandleRacyStacks(thr
, traces
))
783 // If any of the accesses has a tag, treat this as an "external" race.
784 uptr tag
= kExternalTagNone
;
785 for (uptr i
= 0; i
< kMop
; i
++) {
786 if (tags
[i
] != kExternalTagNone
) {
787 rep_typ
= ReportTypeExternalRace
;
793 ScopedReport
rep(rep_typ
, tag
);
794 for (uptr i
= 0; i
< kMop
; i
++)
795 rep
.AddMemoryAccess(addr
, tags
[i
], s
[i
], tids
[i
], traces
[i
], mset
[i
]);
797 for (uptr i
= 0; i
< kMop
; i
++) {
798 ThreadContext
*tctx
= static_cast<ThreadContext
*>(
799 ctx
->thread_registry
.GetThreadLocked(tids
[i
]));
803 rep
.AddLocation(addr_min
, addr_max
- addr_min
);
805 if (flags()->print_full_thread_history
) {
806 const ReportDesc
*rep_desc
= rep
.GetReport();
807 for (uptr i
= 0; i
< rep_desc
->threads
.Size(); i
++) {
808 Tid parent_tid
= rep_desc
->threads
[i
]->parent_tid
;
809 if (parent_tid
== kMainTid
|| parent_tid
== kInvalidTid
)
811 ThreadContext
*parent_tctx
= static_cast<ThreadContext
*>(
812 ctx
->thread_registry
.GetThreadLocked(parent_tid
));
813 rep
.AddThread(parent_tctx
);
818 if (!((typ0
| typ1
) & kAccessFree
) &&
819 s
[1].epoch() <= thr
->last_sleep_clock
.Get(s
[1].sid()))
820 rep
.AddSleep(thr
->last_sleep_stack_id
);
822 OutputReport(thr
, rep
);
825 void PrintCurrentStack(ThreadState
*thr
, uptr pc
) {
826 VarSizeStackTrace trace
;
827 ObtainCurrentStack(thr
, pc
, &trace
);
828 PrintStack(SymbolizeStack(trace
));
831 // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
832 // __sanitizer_print_stack_trace exists in the actual unwinded stack, but
833 // tail-call to PrintCurrentStackSlow breaks this assumption because
834 // __sanitizer_print_stack_trace disappears after tail-call.
835 // However, this solution is not reliable enough, please see dvyukov's comment
836 // http://reviews.llvm.org/D19148#406208
837 // Also see PR27280 comment 2 and 3 for breaking examples and analysis.
838 ALWAYS_INLINE USED
void PrintCurrentStackSlow(uptr pc
) {
840 uptr bp
= GET_CURRENT_FRAME();
841 auto *ptrace
= New
<BufferedStackTrace
>();
842 ptrace
->Unwind(pc
, bp
, nullptr, false);
844 for (uptr i
= 0; i
< ptrace
->size
/ 2; i
++) {
845 uptr tmp
= ptrace
->trace_buffer
[i
];
846 ptrace
->trace_buffer
[i
] = ptrace
->trace_buffer
[ptrace
->size
- i
- 1];
847 ptrace
->trace_buffer
[ptrace
->size
- i
- 1] = tmp
;
849 PrintStack(SymbolizeStack(*ptrace
));
853 } // namespace __tsan
855 using namespace __tsan
;
858 SANITIZER_INTERFACE_ATTRIBUTE
859 void __sanitizer_print_stack_trace() {
860 PrintCurrentStackSlow(StackTrace::GetCurrentPc());