1 //===-- tsan_rtl.cpp ------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 // Main file (entry points) for the TSan run-time.
12 //===----------------------------------------------------------------------===//
16 #include "sanitizer_common/sanitizer_atomic.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_file.h"
19 #include "sanitizer_common/sanitizer_interface_internal.h"
20 #include "sanitizer_common/sanitizer_libc.h"
21 #include "sanitizer_common/sanitizer_placement_new.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_symbolizer.h"
24 #include "tsan_defs.h"
25 #include "tsan_interface.h"
26 #include "tsan_mman.h"
27 #include "tsan_platform.h"
28 #include "tsan_suppressions.h"
29 #include "tsan_symbolize.h"
30 #include "ubsan/ubsan_init.h"
32 volatile int __tsan_resumed
= 0;
34 extern "C" void __tsan_resume() {
38 SANITIZER_WEAK_DEFAULT_IMPL
39 void __tsan_test_only_on_fork() {}
44 void (*on_initialize
)(void);
45 int (*on_finalize
)(int);
48 #if !SANITIZER_GO && !SANITIZER_APPLE
49 __attribute__((tls_model("initial-exec")))
50 THREADLOCAL
char cur_thread_placeholder
[sizeof(ThreadState
)] ALIGNED(
51 SANITIZER_CACHE_LINE_SIZE
);
53 static char ctx_placeholder
[sizeof(Context
)] ALIGNED(SANITIZER_CACHE_LINE_SIZE
);
56 // Can be overriden by a front-end.
57 #ifdef TSAN_EXTERNAL_HOOKS
58 bool OnFinalize(bool failed
);
61 SANITIZER_WEAK_CXX_DEFAULT_IMPL
62 bool OnFinalize(bool failed
) {
65 return on_finalize(failed
);
70 SANITIZER_WEAK_CXX_DEFAULT_IMPL
79 static TracePart
* TracePartAlloc(ThreadState
* thr
) {
80 TracePart
* part
= nullptr;
82 Lock
lock(&ctx
->slot_mtx
);
83 uptr max_parts
= Trace::kMinParts
+ flags()->history_size
;
84 Trace
* trace
= &thr
->tctx
->trace
;
85 if (trace
->parts_allocated
== max_parts
||
86 ctx
->trace_part_finished_excess
) {
87 part
= ctx
->trace_part_recycle
.PopFront();
88 DPrintf("#%d: TracePartAlloc: part=%p\n", thr
->tid
, part
);
89 if (part
&& part
->trace
) {
90 Trace
* trace1
= part
->trace
;
91 Lock
trace_lock(&trace1
->mtx
);
92 part
->trace
= nullptr;
93 TracePart
* part1
= trace1
->parts
.PopFront();
94 CHECK_EQ(part
, part1
);
95 if (trace1
->parts_allocated
> trace1
->parts
.Size()) {
96 ctx
->trace_part_finished_excess
+=
97 trace1
->parts_allocated
- trace1
->parts
.Size();
98 trace1
->parts_allocated
= trace1
->parts
.Size();
102 if (trace
->parts_allocated
< max_parts
) {
103 trace
->parts_allocated
++;
104 if (ctx
->trace_part_finished_excess
)
105 ctx
->trace_part_finished_excess
--;
108 ctx
->trace_part_total_allocated
++;
109 else if (ctx
->trace_part_recycle_finished
)
110 ctx
->trace_part_recycle_finished
--;
113 part
= new (MmapOrDie(sizeof(*part
), "TracePart")) TracePart();
117 static void TracePartFree(TracePart
* part
) SANITIZER_REQUIRES(ctx
->slot_mtx
) {
119 part
->trace
= nullptr;
120 ctx
->trace_part_recycle
.PushFront(part
);
123 void TraceResetForTesting() {
124 Lock
lock(&ctx
->slot_mtx
);
125 while (auto* part
= ctx
->trace_part_recycle
.PopFront()) {
126 if (auto trace
= part
->trace
)
127 CHECK_EQ(trace
->parts
.PopFront(), part
);
128 UnmapOrDie(part
, sizeof(*part
));
130 ctx
->trace_part_total_allocated
= 0;
131 ctx
->trace_part_recycle_finished
= 0;
132 ctx
->trace_part_finished_excess
= 0;
135 static void DoResetImpl(uptr epoch
) {
136 ThreadRegistryLock
lock0(&ctx
->thread_registry
);
137 Lock
lock1(&ctx
->slot_mtx
);
138 CHECK_EQ(ctx
->global_epoch
, epoch
);
140 CHECK(!ctx
->resetting
);
141 ctx
->resetting
= true;
142 for (u32 i
= ctx
->thread_registry
.NumThreadsLocked(); i
--;) {
143 ThreadContext
* tctx
= (ThreadContext
*)ctx
->thread_registry
.GetThreadLocked(
144 static_cast<Tid
>(i
));
145 // Potentially we could purge all ThreadStatusDead threads from the
146 // registry. Since we reset all shadow, they can't race with anything
147 // anymore. However, their tid's can still be stored in some aux places
148 // (e.g. tid of thread that created something).
149 auto trace
= &tctx
->trace
;
150 Lock
lock(&trace
->mtx
);
151 bool attached
= tctx
->thr
&& tctx
->thr
->slot
;
152 auto parts
= &trace
->parts
;
154 while (!parts
->Empty()) {
155 auto part
= parts
->Front();
156 local
= local
|| part
== trace
->local_head
;
158 CHECK(!ctx
->trace_part_recycle
.Queued(part
));
160 ctx
->trace_part_recycle
.Remove(part
);
161 if (attached
&& parts
->Size() == 1) {
162 // The thread is running and this is the last/current part.
163 // Set the trace position to the end of the current part
164 // to force the thread to call SwitchTracePart and re-attach
165 // to a new slot and allocate a new trace part.
166 // Note: the thread is concurrently modifying the position as well,
167 // so this is only best-effort. The thread can only modify position
168 // within this part, because switching parts is protected by
169 // slot/trace mutexes that we hold here.
170 atomic_store_relaxed(
171 &tctx
->thr
->trace_pos
,
172 reinterpret_cast<uptr
>(&part
->events
[TracePart::kSize
]));
178 CHECK_LE(parts
->Size(), 1);
179 trace
->local_head
= parts
->Front();
180 if (tctx
->thr
&& !tctx
->thr
->slot
) {
181 atomic_store_relaxed(&tctx
->thr
->trace_pos
, 0);
182 tctx
->thr
->trace_prev_pc
= 0;
184 if (trace
->parts_allocated
> trace
->parts
.Size()) {
185 ctx
->trace_part_finished_excess
+=
186 trace
->parts_allocated
- trace
->parts
.Size();
187 trace
->parts_allocated
= trace
->parts
.Size();
190 while (ctx
->slot_queue
.PopFront()) {
192 for (auto& slot
: ctx
->slots
) {
193 slot
.SetEpoch(kEpochZero
);
194 slot
.journal
.Reset();
196 ctx
->slot_queue
.PushBack(&slot
);
199 DPrintf("Resetting shadow...\n");
200 auto shadow_begin
= ShadowBeg();
201 auto shadow_end
= ShadowEnd();
203 CHECK_NE(0, ctx
->mapped_shadow_begin
);
204 shadow_begin
= ctx
->mapped_shadow_begin
;
205 shadow_end
= ctx
->mapped_shadow_end
;
206 VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n",
207 shadow_begin
, shadow_end
);
210 #if SANITIZER_WINDOWS
212 !ZeroMmapFixedRegion(shadow_begin
, shadow_end
- shadow_begin
);
215 !MmapFixedSuperNoReserve(shadow_begin
, shadow_end
-shadow_begin
, "shadow");
217 DontDumpShadow(shadow_begin
, shadow_end
- shadow_begin
);
221 Printf("failed to reset shadow memory\n");
224 DPrintf("Resetting meta shadow...\n");
225 ctx
->metamap
.ResetClocks();
226 StoreShadow(&ctx
->last_spurious_race
, Shadow::kEmpty
);
227 ctx
->resetting
= false;
230 // Clang does not understand locking all slots in the loop:
231 // error: expecting mutex 'slot.mtx' to be held at start of each loop
232 void DoReset(ThreadState
* thr
, uptr epoch
) SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
233 for (auto& slot
: ctx
->slots
) {
235 if (UNLIKELY(epoch
== 0))
236 epoch
= ctx
->global_epoch
;
237 if (UNLIKELY(epoch
!= ctx
->global_epoch
)) {
238 // Epoch can't change once we've locked the first slot.
239 CHECK_EQ(slot
.sid
, 0);
244 DPrintf("#%d: DoReset epoch=%lu\n", thr
? thr
->tid
: -1, epoch
);
246 for (auto& slot
: ctx
->slots
) slot
.mtx
.Unlock();
249 void FlushShadowMemory() { DoReset(nullptr, 0); }
251 static TidSlot
* FindSlotAndLock(ThreadState
* thr
)
252 SANITIZER_ACQUIRE(thr
->slot
->mtx
) SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
254 TidSlot
* slot
= nullptr;
258 Lock
lock(&ctx
->slot_mtx
);
259 epoch
= ctx
->global_epoch
;
261 // This is an exhausted slot from the previous iteration.
262 if (ctx
->slot_queue
.Queued(slot
))
263 ctx
->slot_queue
.Remove(slot
);
264 thr
->slot_locked
= false;
268 slot
= ctx
->slot_queue
.PopFront();
271 if (slot
->epoch() != kEpochLast
) {
272 ctx
->slot_queue
.PushBack(slot
);
282 CHECK(!thr
->slot_locked
);
283 thr
->slot_locked
= true;
285 DPrintf("#%d: preempting sid=%d tid=%d\n", thr
->tid
, (u32
)slot
->sid
,
287 slot
->SetEpoch(slot
->thr
->fast_state
.epoch());
290 if (slot
->epoch() != kEpochLast
)
295 void SlotAttachAndLock(ThreadState
* thr
) {
296 TidSlot
* slot
= FindSlotAndLock(thr
);
297 DPrintf("#%d: SlotAttach: slot=%u\n", thr
->tid
, static_cast<int>(slot
->sid
));
302 Epoch epoch
= EpochInc(slot
->epoch());
303 CHECK(!EpochOverflow(epoch
));
304 slot
->SetEpoch(epoch
);
305 thr
->fast_state
.SetSid(slot
->sid
);
306 thr
->fast_state
.SetEpoch(epoch
);
307 if (thr
->slot_epoch
!= ctx
->global_epoch
) {
308 thr
->slot_epoch
= ctx
->global_epoch
;
311 thr
->last_sleep_stack_id
= kInvalidStackID
;
312 thr
->last_sleep_clock
.Reset();
315 thr
->clock
.Set(slot
->sid
, epoch
);
316 slot
->journal
.PushBack({thr
->tid
, epoch
});
319 static void SlotDetachImpl(ThreadState
* thr
, bool exiting
) {
320 TidSlot
* slot
= thr
->slot
;
322 if (thr
!= slot
->thr
) {
323 slot
= nullptr; // we don't own the slot anymore
324 if (thr
->slot_epoch
!= ctx
->global_epoch
) {
325 TracePart
* part
= nullptr;
326 auto* trace
= &thr
->tctx
->trace
;
329 auto* parts
= &trace
->parts
;
330 // The trace can be completely empty in an unlikely event
331 // the thread is preempted right after it acquired the slot
332 // in ThreadStart and did not trace any events yet.
333 CHECK_LE(parts
->Size(), 1);
334 part
= parts
->PopFront();
335 thr
->tctx
->trace
.local_head
= nullptr;
336 atomic_store_relaxed(&thr
->trace_pos
, 0);
337 thr
->trace_prev_pc
= 0;
340 Lock
l(&ctx
->slot_mtx
);
346 CHECK(exiting
|| thr
->fast_state
.epoch() == kEpochLast
);
347 slot
->SetEpoch(thr
->fast_state
.epoch());
351 void SlotDetach(ThreadState
* thr
) {
352 Lock
lock(&thr
->slot
->mtx
);
353 SlotDetachImpl(thr
, true);
356 void SlotLock(ThreadState
* thr
) SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
357 DCHECK(!thr
->slot_locked
);
359 // Check these mutexes are not locked.
360 // We can call DoReset from SlotAttachAndLock, which will lock
361 // these mutexes, but it happens only every once in a while.
362 { ThreadRegistryLock
lock(&ctx
->thread_registry
); }
363 { Lock
lock(&ctx
->slot_mtx
); }
365 TidSlot
* slot
= thr
->slot
;
367 thr
->slot_locked
= true;
368 if (LIKELY(thr
== slot
->thr
&& thr
->fast_state
.epoch() != kEpochLast
))
370 SlotDetachImpl(thr
, false);
371 thr
->slot_locked
= false;
373 SlotAttachAndLock(thr
);
376 void SlotUnlock(ThreadState
* thr
) {
377 DCHECK(thr
->slot_locked
);
378 thr
->slot_locked
= false;
379 thr
->slot
->mtx
.Unlock();
384 report_mtx(MutexTypeReport
),
386 thread_registry([](Tid tid
) -> ThreadContextBase
* {
387 return new (Alloc(sizeof(ThreadContext
))) ThreadContext(tid
);
389 racy_mtx(MutexTypeRacy
),
391 fired_suppressions_mtx(MutexTypeFired
),
392 slot_mtx(MutexTypeSlots
),
394 fired_suppressions
.reserve(8);
395 for (uptr i
= 0; i
< ARRAY_SIZE(slots
); i
++) {
396 TidSlot
* slot
= &slots
[i
];
397 slot
->sid
= static_cast<Sid
>(i
);
398 slot_queue
.PushBack(slot
);
403 TidSlot::TidSlot() : mtx(MutexTypeSlot
) {}
405 // The objects are allocated in TLS, so one may rely on zero-initialization.
406 ThreadState::ThreadState(Tid tid
)
407 // Do not touch these, rely on zero initialization,
408 // they may be accessed before the ctor.
409 // ignore_reads_and_writes()
410 // ignore_interceptors()
412 CHECK_EQ(reinterpret_cast<uptr
>(this) % SANITIZER_CACHE_LINE_SIZE
, 0);
414 // C/C++ uses fixed size shadow stack.
415 const int kInitStackSize
= kShadowStackSize
;
416 shadow_stack
= static_cast<uptr
*>(
417 MmapNoReserveOrDie(kInitStackSize
* sizeof(uptr
), "shadow stack"));
418 SetShadowRegionHugePageMode(reinterpret_cast<uptr
>(shadow_stack
),
419 kInitStackSize
* sizeof(uptr
));
421 // Go uses malloc-allocated shadow stack with dynamic size.
422 const int kInitStackSize
= 8;
423 shadow_stack
= static_cast<uptr
*>(Alloc(kInitStackSize
* sizeof(uptr
)));
425 shadow_stack_pos
= shadow_stack
;
426 shadow_stack_end
= shadow_stack
+ kInitStackSize
;
430 void MemoryProfiler(u64 uptime
) {
431 if (ctx
->memprof_fd
== kInvalidFd
)
433 InternalMmapVector
<char> buf(4096);
434 WriteMemoryProfile(buf
.data(), buf
.size(), uptime
);
435 WriteToFile(ctx
->memprof_fd
, buf
.data(), internal_strlen(buf
.data()));
438 static bool InitializeMemoryProfiler() {
439 ctx
->memprof_fd
= kInvalidFd
;
440 const char *fname
= flags()->profile_memory
;
441 if (!fname
|| !fname
[0])
443 if (internal_strcmp(fname
, "stdout") == 0) {
445 } else if (internal_strcmp(fname
, "stderr") == 0) {
448 InternalScopedString filename
;
449 filename
.AppendF("%s.%d", fname
, (int)internal_getpid());
450 ctx
->memprof_fd
= OpenFile(filename
.data(), WrOnly
);
451 if (ctx
->memprof_fd
== kInvalidFd
) {
452 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
461 static void *BackgroundThread(void *arg
) {
462 // This is a non-initialized non-user thread, nothing to see here.
463 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
464 // enabled even when the thread function exits (e.g. during pthread thread
466 cur_thread_init()->ignore_interceptors
++;
467 const u64 kMs2Ns
= 1000 * 1000;
468 const u64 start
= NanoTime();
470 u64 last_flush
= start
;
472 while (!atomic_load_relaxed(&ctx
->stop_background_thread
)) {
474 u64 now
= NanoTime();
476 // Flush memory if requested.
477 if (flags()->flush_memory_ms
> 0) {
478 if (last_flush
+ flags()->flush_memory_ms
* kMs2Ns
< now
) {
479 VReport(1, "ThreadSanitizer: periodic memory flush\n");
481 now
= last_flush
= NanoTime();
484 if (flags()->memory_limit_mb
> 0) {
486 uptr limit
= uptr(flags()->memory_limit_mb
) << 20;
488 "ThreadSanitizer: memory flush check"
489 " RSS=%llu LAST=%llu LIMIT=%llu\n",
490 (u64
)rss
>> 20, (u64
)last_rss
>> 20, (u64
)limit
>> 20);
491 if (2 * rss
> limit
+ last_rss
) {
492 VReport(1, "ThreadSanitizer: flushing memory due to RSS\n");
496 VReport(1, "ThreadSanitizer: memory flushed RSS=%llu\n",
502 MemoryProfiler(now
- start
);
504 // Flush symbolizer cache if requested.
505 if (flags()->flush_symbolizer_ms
> 0) {
506 u64 last
= atomic_load(&ctx
->last_symbolize_time_ns
,
507 memory_order_relaxed
);
508 if (last
!= 0 && last
+ flags()->flush_symbolizer_ms
* kMs2Ns
< now
) {
509 Lock
l(&ctx
->report_mtx
);
510 ScopedErrorReportLock l2
;
512 atomic_store(&ctx
->last_symbolize_time_ns
, 0, memory_order_relaxed
);
519 static void StartBackgroundThread() {
520 ctx
->background_thread
= internal_start_thread(&BackgroundThread
, 0);
524 static void StopBackgroundThread() {
525 atomic_store(&ctx
->stop_background_thread
, 1, memory_order_relaxed
);
526 internal_join_thread(ctx
->background_thread
);
527 ctx
->background_thread
= 0;
532 void DontNeedShadowFor(uptr addr
, uptr size
) {
533 ReleaseMemoryPagesToOS(reinterpret_cast<uptr
>(MemToShadow(addr
)),
534 reinterpret_cast<uptr
>(MemToShadow(addr
+ size
)));
538 // We call UnmapShadow before the actual munmap, at that point we don't yet
539 // know if the provided address/size are sane. We can't call UnmapShadow
540 // after the actual munmap becuase at that point the memory range can
541 // already be reused for something else, so we can't rely on the munmap
542 // return value to understand is the values are sane.
543 // While calling munmap with insane values (non-canonical address, negative
544 // size, etc) is an error, the kernel won't crash. We must also try to not
545 // crash as the failure mode is very confusing (paging fault inside of the
546 // runtime on some derived shadow address).
547 static bool IsValidMmapRange(uptr addr
, uptr size
) {
550 if (static_cast<sptr
>(size
) < 0)
552 if (!IsAppMem(addr
) || !IsAppMem(addr
+ size
- 1))
554 // Check that if the start of the region belongs to one of app ranges,
555 // end of the region belongs to the same region.
556 const uptr ranges
[][2] = {
557 {LoAppMemBeg(), LoAppMemEnd()},
558 {MidAppMemBeg(), MidAppMemEnd()},
559 {HiAppMemBeg(), HiAppMemEnd()},
561 for (auto range
: ranges
) {
562 if (addr
>= range
[0] && addr
< range
[1])
563 return addr
+ size
<= range
[1];
568 void UnmapShadow(ThreadState
*thr
, uptr addr
, uptr size
) {
569 if (size
== 0 || !IsValidMmapRange(addr
, size
))
571 DontNeedShadowFor(addr
, size
);
572 ScopedGlobalProcessor sgp
;
573 SlotLocker
locker(thr
, true);
574 ctx
->metamap
.ResetRange(thr
->proc(), addr
, size
, true);
578 void MapShadow(uptr addr
, uptr size
) {
579 // Ensure thead registry lock held, so as to synchronize
580 // with DoReset, which also access the mapped_shadow_* ctxt fields.
581 ThreadRegistryLock
lock0(&ctx
->thread_registry
);
582 static bool data_mapped
= false;
585 // Global data is not 64K aligned, but there are no adjacent mappings,
586 // so we can get away with unaligned mapping.
587 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
588 const uptr kPageSize
= GetPageSizeCached();
589 uptr shadow_begin
= RoundDownTo((uptr
)MemToShadow(addr
), kPageSize
);
590 uptr shadow_end
= RoundUpTo((uptr
)MemToShadow(addr
+ size
), kPageSize
);
591 if (!MmapFixedNoReserve(shadow_begin
, shadow_end
- shadow_begin
, "shadow"))
594 uptr shadow_begin
= RoundDownTo((uptr
)MemToShadow(addr
), (64 << 10));
595 uptr shadow_end
= RoundUpTo((uptr
)MemToShadow(addr
+ size
), (64 << 10));
596 VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n",
597 addr
, addr
+ size
, shadow_begin
, shadow_end
);
600 // First call maps data+bss.
601 if (!MmapFixedSuperNoReserve(shadow_begin
, shadow_end
- shadow_begin
, "shadow"))
604 VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n",
605 ctx
->mapped_shadow_begin
, ctx
->mapped_shadow_end
);
606 // Second and subsequent calls map heap.
607 if (shadow_end
<= ctx
->mapped_shadow_end
)
609 if (!ctx
->mapped_shadow_begin
|| ctx
->mapped_shadow_begin
> shadow_begin
)
610 ctx
->mapped_shadow_begin
= shadow_begin
;
611 if (shadow_begin
< ctx
->mapped_shadow_end
)
612 shadow_begin
= ctx
->mapped_shadow_end
;
613 VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n",
614 shadow_begin
, shadow_end
);
615 if (!MmapFixedSuperNoReserve(shadow_begin
, shadow_end
- shadow_begin
,
618 ctx
->mapped_shadow_end
= shadow_end
;
622 // Meta shadow is 2:1, so tread carefully.
623 static uptr mapped_meta_end
= 0;
624 uptr meta_begin
= (uptr
)MemToMeta(addr
);
625 uptr meta_end
= (uptr
)MemToMeta(addr
+ size
);
626 meta_begin
= RoundDownTo(meta_begin
, 64 << 10);
627 meta_end
= RoundUpTo(meta_end
, 64 << 10);
629 // First call maps data+bss.
631 if (!MmapFixedSuperNoReserve(meta_begin
, meta_end
- meta_begin
,
635 // Mapping continuous heap.
636 // Windows wants 64K alignment.
637 meta_begin
= RoundDownTo(meta_begin
, 64 << 10);
638 meta_end
= RoundUpTo(meta_end
, 64 << 10);
639 CHECK_GT(meta_end
, mapped_meta_end
);
640 if (meta_begin
< mapped_meta_end
)
641 meta_begin
= mapped_meta_end
;
642 if (!MmapFixedSuperNoReserve(meta_begin
, meta_end
- meta_begin
,
645 mapped_meta_end
= meta_end
;
647 VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr
,
648 addr
+ size
, meta_begin
, meta_end
);
652 static void OnStackUnwind(const SignalContext
&sig
, const void *,
653 BufferedStackTrace
*stack
) {
654 stack
->Unwind(StackTrace::GetNextInstructionPc(sig
.pc
), sig
.bp
, sig
.context
,
655 common_flags()->fast_unwind_on_fatal
);
658 static void TsanOnDeadlySignal(int signo
, void *siginfo
, void *context
) {
659 HandleDeadlySignal(siginfo
, context
, GetTid(), &OnStackUnwind
, nullptr);
664 // There is high probability that interceptors will check-fail as well,
665 // on the other hand there is no sense in processing interceptors
666 // since we are going to die soon.
667 ScopedIgnoreInterceptors ignore
;
669 ThreadState
* thr
= cur_thread();
670 thr
->nomalloc
= false;
672 thr
->ignore_reads_and_writes
++;
673 atomic_store_relaxed(&thr
->in_signal_handler
, 0);
675 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
680 void Initialize(ThreadState
*thr
) {
681 // Thread safe because done before all threads exist.
684 is_initialized
= true;
685 // We are not ready to handle interceptors yet.
686 ScopedIgnoreInterceptors ignore
;
687 SanitizerToolName
= "ThreadSanitizer";
688 // Install tool-specific callbacks in sanitizer_common.
689 SetCheckUnwindCallback(CheckUnwind
);
691 ctx
= new(ctx_placeholder
) Context
;
692 const char *env_name
= SANITIZER_GO
? "GORACE" : "TSAN_OPTIONS";
693 const char *options
= GetEnv(env_name
);
696 InitializeFlags(&ctx
->flags
, options
, env_name
);
697 AvoidCVE_2016_2143();
698 __sanitizer::InitializePlatformEarly();
699 __tsan::InitializePlatformEarly();
702 InitializeAllocator();
703 ReplaceSystemMalloc();
705 if (common_flags()->detect_deadlocks
)
706 ctx
->dd
= DDetector::Create(flags());
707 Processor
*proc
= ProcCreate();
709 InitializeInterceptors();
710 InitializePlatform();
711 InitializeDynamicAnnotations();
713 InitializeShadowMemory();
714 InitializeAllocatorLate();
715 InstallDeadlySignalHandlers(TsanOnDeadlySignal
);
717 // Setup correct file descriptor for error reports.
718 __sanitizer_set_report_path(common_flags()->log_path
);
719 InitializeSuppressions();
721 InitializeLibIgnore();
722 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer
, ExitSymbolizer
);
725 VPrintf(1, "***** Running under ThreadSanitizer v3 (pid %d) *****\n",
726 (int)internal_getpid());
728 // Initialize thread 0.
729 Tid tid
= ThreadCreate(nullptr, 0, 0, true);
730 CHECK_EQ(tid
, kMainTid
);
731 ThreadStart(thr
, tid
, GetTid(), ThreadType::Regular
);
732 #if TSAN_CONTAINS_UBSAN
733 __ubsan::InitAsPlugin();
737 Symbolizer::LateInitialize();
738 if (InitializeMemoryProfiler() || flags()->force_background_thread
)
739 MaybeSpawnBackgroundThread();
741 ctx
->initialized
= true;
743 if (flags()->stop_on_start
) {
744 Printf("ThreadSanitizer is suspended at startup (pid %d)."
745 " Call __tsan_resume().\n",
746 (int)internal_getpid());
747 while (__tsan_resumed
== 0) {}
753 void MaybeSpawnBackgroundThread() {
754 // On MIPS, TSan initialization is run before
755 // __pthread_initialize_minimal_internal() is finished, so we can not spawn
757 #if !SANITIZER_GO && !defined(__mips__)
758 static atomic_uint32_t bg_thread
= {};
759 if (atomic_load(&bg_thread
, memory_order_relaxed
) == 0 &&
760 atomic_exchange(&bg_thread
, 1, memory_order_relaxed
) == 0) {
761 StartBackgroundThread();
762 SetSandboxingCallback(StopBackgroundThread
);
767 int Finalize(ThreadState
*thr
) {
771 if (common_flags()->print_module_map
== 1)
775 if (flags()->atexit_sleep_ms
> 0 && ThreadCount(thr
) > 1)
776 internal_usleep(u64(flags()->atexit_sleep_ms
) * 1000);
779 // Wait for pending reports.
780 ScopedErrorReportLock lock
;
784 if (Verbosity()) AllocatorPrintStats();
789 if (ctx
->nreported
) {
792 Printf("ThreadSanitizer: reported %d warnings\n", ctx
->nreported
);
794 Printf("Found %d data race(s)\n", ctx
->nreported
);
798 if (common_flags()->print_suppressions
)
799 PrintMatchedSuppressions();
801 failed
= OnFinalize(failed
);
803 return failed
? common_flags()->exitcode
: 0;
807 void ForkBefore(ThreadState
* thr
, uptr pc
) SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
808 GlobalProcessorLock();
809 // Detaching from the slot makes OnUserFree skip writing to the shadow.
810 // The slot will be locked so any attempts to use it will deadlock anyway.
812 for (auto& slot
: ctx
->slots
) slot
.mtx
.Lock();
813 ctx
->thread_registry
.Lock();
814 ctx
->slot_mtx
.Lock();
815 ScopedErrorReportLock::Lock();
817 // Suppress all reports in the pthread_atfork callbacks.
818 // Reports will deadlock on the report_mtx.
819 // We could ignore sync operations as well,
820 // but so far it's unclear if it will do more good or harm.
821 // Unnecessarily ignoring things can lead to false positives later.
822 thr
->suppress_reports
++;
823 // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
824 // we'll assert in CheckNoLocks() unless we ignore interceptors.
825 // On OS X libSystem_atfork_prepare/parent/child callbacks are called
826 // after/before our callbacks and they call free.
827 thr
->ignore_interceptors
++;
828 // Disables memory write in OnUserAlloc/Free.
829 thr
->ignore_reads_and_writes
++;
831 __tsan_test_only_on_fork();
834 static void ForkAfter(ThreadState
* thr
) SANITIZER_NO_THREAD_SAFETY_ANALYSIS
{
835 thr
->suppress_reports
--; // Enabled in ForkBefore.
836 thr
->ignore_interceptors
--;
837 thr
->ignore_reads_and_writes
--;
839 ScopedErrorReportLock::Unlock();
840 ctx
->slot_mtx
.Unlock();
841 ctx
->thread_registry
.Unlock();
842 for (auto& slot
: ctx
->slots
) slot
.mtx
.Unlock();
843 SlotAttachAndLock(thr
);
845 GlobalProcessorUnlock();
848 void ForkParentAfter(ThreadState
* thr
, uptr pc
) { ForkAfter(thr
); }
850 void ForkChildAfter(ThreadState
* thr
, uptr pc
, bool start_thread
) {
852 u32 nthread
= ctx
->thread_registry
.OnFork(thr
->tid
);
854 "ThreadSanitizer: forked new process with pid %d,"
855 " parent had %d threads\n",
856 (int)internal_getpid(), (int)nthread
);
859 StartBackgroundThread();
861 // We've just forked a multi-threaded process. We cannot reasonably function
862 // after that (some mutexes may be locked before fork). So just enable
863 // ignores for everything in the hope that we will exec soon.
864 ctx
->after_multithreaded_fork
= true;
865 thr
->ignore_interceptors
++;
866 thr
->suppress_reports
++;
867 ThreadIgnoreBegin(thr
, pc
);
868 ThreadIgnoreSyncBegin(thr
, pc
);
875 void GrowShadowStack(ThreadState
*thr
) {
876 const int sz
= thr
->shadow_stack_end
- thr
->shadow_stack
;
877 const int newsz
= 2 * sz
;
878 auto *newstack
= (uptr
*)Alloc(newsz
* sizeof(uptr
));
879 internal_memcpy(newstack
, thr
->shadow_stack
, sz
* sizeof(uptr
));
880 Free(thr
->shadow_stack
);
881 thr
->shadow_stack
= newstack
;
882 thr
->shadow_stack_pos
= newstack
+ sz
;
883 thr
->shadow_stack_end
= newstack
+ newsz
;
887 StackID
CurrentStackId(ThreadState
*thr
, uptr pc
) {
889 if (!thr
->is_inited
) // May happen during bootstrap.
890 return kInvalidStackID
;
894 DCHECK_LT(thr
->shadow_stack_pos
, thr
->shadow_stack_end
);
896 if (thr
->shadow_stack_pos
== thr
->shadow_stack_end
)
897 GrowShadowStack(thr
);
899 thr
->shadow_stack_pos
[0] = pc
;
900 thr
->shadow_stack_pos
++;
902 StackID id
= StackDepotPut(
903 StackTrace(thr
->shadow_stack
, thr
->shadow_stack_pos
- thr
->shadow_stack
));
905 thr
->shadow_stack_pos
--;
909 static bool TraceSkipGap(ThreadState
* thr
) {
910 Trace
*trace
= &thr
->tctx
->trace
;
911 Event
*pos
= reinterpret_cast<Event
*>(atomic_load_relaxed(&thr
->trace_pos
));
912 DCHECK_EQ(reinterpret_cast<uptr
>(pos
+ 1) & TracePart::kAlignment
, 0);
913 auto *part
= trace
->parts
.Back();
914 DPrintf("#%d: TraceSwitchPart enter trace=%p parts=%p-%p pos=%p\n", thr
->tid
,
915 trace
, trace
->parts
.Front(), part
, pos
);
918 // We can get here when we still have space in the current trace part.
919 // The fast-path check in TraceAcquire has false positives in the middle of
920 // the part. Check if we are indeed at the end of the current part or not,
921 // and fill any gaps with NopEvent's.
922 Event
* end
= &part
->events
[TracePart::kSize
];
923 DCHECK_GE(pos
, &part
->events
[0]);
926 if ((reinterpret_cast<uptr
>(pos
) & TracePart::kAlignment
) ==
927 TracePart::kAlignment
)
930 DCHECK_LE(pos
+ 2, end
);
931 atomic_store_relaxed(&thr
->trace_pos
, reinterpret_cast<uptr
>(pos
));
934 // We are indeed at the end.
935 for (; pos
< end
; pos
++) *pos
= NopEvent
;
940 void TraceSwitchPart(ThreadState
* thr
) {
941 if (TraceSkipGap(thr
))
944 if (ctx
->after_multithreaded_fork
) {
945 // We just need to survive till exec.
946 TracePart
* part
= thr
->tctx
->trace
.parts
.Back();
948 atomic_store_relaxed(&thr
->trace_pos
,
949 reinterpret_cast<uptr
>(&part
->events
[0]));
954 TraceSwitchPartImpl(thr
);
957 void TraceSwitchPartImpl(ThreadState
* thr
) {
958 SlotLocker
locker(thr
, true);
959 Trace
* trace
= &thr
->tctx
->trace
;
960 TracePart
* part
= TracePartAlloc(thr
);
962 thr
->trace_prev_pc
= 0;
963 TracePart
* recycle
= nullptr;
964 // Keep roughly half of parts local to the thread
965 // (not queued into the recycle queue).
966 uptr local_parts
= (Trace::kMinParts
+ flags()->history_size
+ 1) / 2;
968 Lock
lock(&trace
->mtx
);
969 if (trace
->parts
.Empty())
970 trace
->local_head
= part
;
971 if (trace
->parts
.Size() >= local_parts
) {
972 recycle
= trace
->local_head
;
973 trace
->local_head
= trace
->parts
.Next(recycle
);
975 trace
->parts
.PushBack(part
);
976 atomic_store_relaxed(&thr
->trace_pos
,
977 reinterpret_cast<uptr
>(&part
->events
[0]));
979 // Make this part self-sufficient by restoring the current stack
980 // and mutex set in the beginning of the trace.
983 // Pathologically large stacks may not fit into the part.
984 // In these cases we log only fixed number of top frames.
985 const uptr kMaxFrames
= 1000;
986 // Check that kMaxFrames won't consume the whole part.
987 static_assert(kMaxFrames
< TracePart::kSize
/ 2, "kMaxFrames is too big");
988 uptr
* pos
= Max(&thr
->shadow_stack
[0], thr
->shadow_stack_pos
- kMaxFrames
);
989 for (; pos
< thr
->shadow_stack_pos
; pos
++) {
990 if (TryTraceFunc(thr
, *pos
))
992 CHECK(TraceSkipGap(thr
));
993 CHECK(TryTraceFunc(thr
, *pos
));
996 for (uptr i
= 0; i
< thr
->mset
.Size(); i
++) {
997 MutexSet::Desc d
= thr
->mset
.Get(i
);
998 for (uptr i
= 0; i
< d
.count
; i
++)
999 TraceMutexLock(thr
, d
.write
? EventType::kLock
: EventType::kRLock
, 0,
1000 d
.addr
, d
.stack_id
);
1002 // Callers of TraceSwitchPart expect that TraceAcquire will always succeed
1003 // after the call. It's possible that TryTraceFunc/TraceMutexLock above
1004 // filled the trace part exactly up to the TracePart::kAlignment gap
1005 // and the next TraceAcquire won't succeed. Skip the gap to avoid that.
1007 if (!TraceAcquire(thr
, &ev
)) {
1008 CHECK(TraceSkipGap(thr
));
1009 CHECK(TraceAcquire(thr
, &ev
));
1012 Lock
lock(&ctx
->slot_mtx
);
1013 // There is a small chance that the slot may be not queued at this point.
1014 // This can happen if the slot has kEpochLast epoch and another thread
1015 // in FindSlotAndLock discovered that it's exhausted and removed it from
1016 // the slot queue. kEpochLast can happen in 2 cases: (1) if TraceSwitchPart
1017 // was called with the slot locked and epoch already at kEpochLast,
1018 // or (2) if we've acquired a new slot in SlotLock in the beginning
1019 // of the function and the slot was at kEpochLast - 1, so after increment
1020 // in SlotAttachAndLock it become kEpochLast.
1021 if (ctx
->slot_queue
.Queued(thr
->slot
)) {
1022 ctx
->slot_queue
.Remove(thr
->slot
);
1023 ctx
->slot_queue
.PushBack(thr
->slot
);
1026 ctx
->trace_part_recycle
.PushBack(recycle
);
1028 DPrintf("#%d: TraceSwitchPart exit parts=%p-%p pos=0x%zx\n", thr
->tid
,
1029 trace
->parts
.Front(), trace
->parts
.Back(),
1030 atomic_load_relaxed(&thr
->trace_pos
));
1033 void ThreadIgnoreBegin(ThreadState
* thr
, uptr pc
) {
1034 DPrintf("#%d: ThreadIgnoreBegin\n", thr
->tid
);
1035 thr
->ignore_reads_and_writes
++;
1036 CHECK_GT(thr
->ignore_reads_and_writes
, 0);
1037 thr
->fast_state
.SetIgnoreBit();
1039 if (pc
&& !ctx
->after_multithreaded_fork
)
1040 thr
->mop_ignore_set
.Add(CurrentStackId(thr
, pc
));
1044 void ThreadIgnoreEnd(ThreadState
*thr
) {
1045 DPrintf("#%d: ThreadIgnoreEnd\n", thr
->tid
);
1046 CHECK_GT(thr
->ignore_reads_and_writes
, 0);
1047 thr
->ignore_reads_and_writes
--;
1048 if (thr
->ignore_reads_and_writes
== 0) {
1049 thr
->fast_state
.ClearIgnoreBit();
1051 thr
->mop_ignore_set
.Reset();
1057 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
1058 uptr
__tsan_testonly_shadow_stack_current_size() {
1059 ThreadState
*thr
= cur_thread();
1060 return thr
->shadow_stack_pos
- thr
->shadow_stack
;
1064 void ThreadIgnoreSyncBegin(ThreadState
*thr
, uptr pc
) {
1065 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr
->tid
);
1067 CHECK_GT(thr
->ignore_sync
, 0);
1069 if (pc
&& !ctx
->after_multithreaded_fork
)
1070 thr
->sync_ignore_set
.Add(CurrentStackId(thr
, pc
));
1074 void ThreadIgnoreSyncEnd(ThreadState
*thr
) {
1075 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr
->tid
);
1076 CHECK_GT(thr
->ignore_sync
, 0);
1079 if (thr
->ignore_sync
== 0)
1080 thr
->sync_ignore_set
.Reset();
1084 bool MD5Hash::operator==(const MD5Hash
&other
) const {
1085 return hash
[0] == other
.hash
[0] && hash
[1] == other
.hash
[1];
1089 void build_consistency_debug() {}
1091 void build_consistency_release() {}
1093 } // namespace __tsan
1095 #if SANITIZER_CHECK_DEADLOCKS
1096 namespace __sanitizer
{
1097 using namespace __tsan
;
1098 MutexMeta mutex_meta
[] = {
1099 {MutexInvalid
, "Invalid", {}},
1100 {MutexThreadRegistry
,
1102 {MutexTypeSlots
, MutexTypeTrace
, MutexTypeReport
}},
1103 {MutexTypeReport
, "Report", {MutexTypeTrace
}},
1104 {MutexTypeSyncVar
, "SyncVar", {MutexTypeReport
, MutexTypeTrace
}},
1105 {MutexTypeAnnotations
, "Annotations", {}},
1106 {MutexTypeAtExit
, "AtExit", {}},
1107 {MutexTypeFired
, "Fired", {MutexLeaf
}},
1108 {MutexTypeRacy
, "Racy", {MutexLeaf
}},
1109 {MutexTypeGlobalProc
, "GlobalProc", {MutexTypeSlot
, MutexTypeSlots
}},
1110 {MutexTypeInternalAlloc
, "InternalAlloc", {MutexLeaf
}},
1111 {MutexTypeTrace
, "Trace", {}},
1114 {MutexMulti
, MutexTypeTrace
, MutexTypeSyncVar
, MutexThreadRegistry
,
1116 {MutexTypeSlots
, "Slots", {MutexTypeTrace
, MutexTypeReport
}},
1120 void PrintMutexPC(uptr pc
) { StackTrace(&pc
, 1).Print(); }
1122 } // namespace __sanitizer