1 //===-- asan_thread.cpp ---------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of AddressSanitizer, an address sanity checker.
11 // Thread-related code.
12 //===----------------------------------------------------------------------===//
13 #include "asan_allocator.h"
14 #include "asan_interceptors.h"
15 #include "asan_poisoning.h"
16 #include "asan_stack.h"
17 #include "asan_thread.h"
18 #include "asan_mapping.h"
19 #include "sanitizer_common/sanitizer_common.h"
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stackdepot.h"
22 #include "sanitizer_common/sanitizer_tls_get_addr.h"
23 #include "lsan/lsan_common.h"
27 // AsanThreadContext implementation.
29 void AsanThreadContext::OnCreated(void *arg
) {
30 CreateThreadContextArgs
*args
= static_cast<CreateThreadContextArgs
*>(arg
);
32 stack_id
= StackDepotPut(*args
->stack
);
33 thread
= args
->thread
;
34 thread
->set_context(this);
37 void AsanThreadContext::OnFinished() {
38 // Drop the link to the AsanThread object.
42 // MIPS requires aligned address
43 static ALIGNED(16) char thread_registry_placeholder
[sizeof(ThreadRegistry
)];
44 static ThreadRegistry
*asan_thread_registry
;
46 static Mutex mu_for_thread_context
;
47 static LowLevelAllocator allocator_for_thread_context
;
49 static ThreadContextBase
*GetAsanThreadContext(u32 tid
) {
50 Lock
lock(&mu_for_thread_context
);
51 return new(allocator_for_thread_context
) AsanThreadContext(tid
);
54 ThreadRegistry
&asanThreadRegistry() {
55 static bool initialized
;
56 // Don't worry about thread_safety - this should be called when there is
59 // Never reuse ASan threads: we store pointer to AsanThreadContext
60 // in TSD and can't reliably tell when no more TSD destructors will
61 // be called. It would be wrong to reuse AsanThreadContext for another
62 // thread before all TSD destructors will be called for it.
63 asan_thread_registry
=
64 new (thread_registry_placeholder
) ThreadRegistry(GetAsanThreadContext
);
67 return *asan_thread_registry
;
70 AsanThreadContext
*GetThreadContextByTidLocked(u32 tid
) {
71 return static_cast<AsanThreadContext
*>(
72 asanThreadRegistry().GetThreadLocked(tid
));
75 // AsanThread implementation.
77 AsanThread
*AsanThread::Create(thread_callback_t start_routine
, void *arg
,
78 u32 parent_tid
, StackTrace
*stack
,
80 uptr PageSize
= GetPageSizeCached();
81 uptr size
= RoundUpTo(sizeof(AsanThread
), PageSize
);
82 AsanThread
*thread
= (AsanThread
*)MmapOrDie(size
, __func__
);
83 thread
->start_routine_
= start_routine
;
85 AsanThreadContext::CreateThreadContextArgs args
= {thread
, stack
};
86 asanThreadRegistry().CreateThread(0, detached
, parent_tid
, &args
);
91 void AsanThread::TSDDtor(void *tsd
) {
92 AsanThreadContext
*context
= (AsanThreadContext
*)tsd
;
93 VReport(1, "T%d TSDDtor\n", context
->tid
);
95 context
->thread
->Destroy();
98 void AsanThread::Destroy() {
99 int tid
= this->tid();
100 VReport(1, "T%d exited\n", tid
);
103 (asanThreadRegistry().FinishThread(tid
) == ThreadStatusRunning
);
105 if (AsanThread
*thread
= GetCurrentThread())
106 CHECK_EQ(this, thread
);
107 malloc_storage().CommitBack();
108 if (common_flags()->use_sigaltstack
)
109 UnsetAlternateSignalStack();
110 FlushToDeadThreadStats(&stats_
);
111 // We also clear the shadow on thread destruction because
112 // some code may still be executing in later TSD destructors
113 // and we don't want it to have any poisoned stack.
114 ClearShadowForThreadStackAndTLS();
115 DeleteFakeStack(tid
);
117 CHECK_NE(this, GetCurrentThread());
119 uptr size
= RoundUpTo(sizeof(AsanThread
), GetPageSizeCached());
120 UnmapOrDie(this, size
);
125 void AsanThread::StartSwitchFiber(FakeStack
**fake_stack_save
, uptr bottom
,
127 if (atomic_load(&stack_switching_
, memory_order_relaxed
)) {
128 Report("ERROR: starting fiber switch while in fiber switch\n");
132 next_stack_bottom_
= bottom
;
133 next_stack_top_
= bottom
+ size
;
134 atomic_store(&stack_switching_
, 1, memory_order_release
);
136 FakeStack
*current_fake_stack
= fake_stack_
;
138 *fake_stack_save
= fake_stack_
;
139 fake_stack_
= nullptr;
140 SetTLSFakeStack(nullptr);
141 // if fake_stack_save is null, the fiber will die, delete the fakestack
142 if (!fake_stack_save
&& current_fake_stack
)
143 current_fake_stack
->Destroy(this->tid());
146 void AsanThread::FinishSwitchFiber(FakeStack
*fake_stack_save
,
149 if (!atomic_load(&stack_switching_
, memory_order_relaxed
)) {
150 Report("ERROR: finishing a fiber switch that has not started\n");
154 if (fake_stack_save
) {
155 SetTLSFakeStack(fake_stack_save
);
156 fake_stack_
= fake_stack_save
;
160 *bottom_old
= stack_bottom_
;
162 *size_old
= stack_top_
- stack_bottom_
;
163 stack_bottom_
= next_stack_bottom_
;
164 stack_top_
= next_stack_top_
;
165 atomic_store(&stack_switching_
, 0, memory_order_release
);
167 next_stack_bottom_
= 0;
170 inline AsanThread::StackBounds
AsanThread::GetStackBounds() const {
171 if (!atomic_load(&stack_switching_
, memory_order_acquire
)) {
172 // Make sure the stack bounds are fully initialized.
173 if (stack_bottom_
>= stack_top_
) return {0, 0};
174 return {stack_bottom_
, stack_top_
};
177 const uptr cur_stack
= (uptr
)&local
;
178 // Note: need to check next stack first, because FinishSwitchFiber
179 // may be in process of overwriting stack_top_/bottom_. But in such case
180 // we are already on the next stack.
181 if (cur_stack
>= next_stack_bottom_
&& cur_stack
< next_stack_top_
)
182 return {next_stack_bottom_
, next_stack_top_
};
183 return {stack_bottom_
, stack_top_
};
186 uptr
AsanThread::stack_top() {
187 return GetStackBounds().top
;
190 uptr
AsanThread::stack_bottom() {
191 return GetStackBounds().bottom
;
194 uptr
AsanThread::stack_size() {
195 const auto bounds
= GetStackBounds();
196 return bounds
.top
- bounds
.bottom
;
199 // We want to create the FakeStack lazily on the first use, but not earlier
200 // than the stack size is known and the procedure has to be async-signal safe.
201 FakeStack
*AsanThread::AsyncSignalSafeLazyInitFakeStack() {
202 uptr stack_size
= this->stack_size();
203 if (stack_size
== 0) // stack_size is not yet available, don't use FakeStack.
206 // fake_stack_ has 3 states:
207 // 0 -- not initialized
208 // 1 -- being initialized
209 // ptr -- initialized
210 // This CAS checks if the state was 0 and if so changes it to state 1,
211 // if that was successful, it initializes the pointer.
212 if (atomic_compare_exchange_strong(
213 reinterpret_cast<atomic_uintptr_t
*>(&fake_stack_
), &old_val
, 1UL,
214 memory_order_relaxed
)) {
215 uptr stack_size_log
= Log2(RoundUpToPowerOfTwo(stack_size
));
216 CHECK_LE(flags()->min_uar_stack_size_log
, flags()->max_uar_stack_size_log
);
218 Min(stack_size_log
, static_cast<uptr
>(flags()->max_uar_stack_size_log
));
220 Max(stack_size_log
, static_cast<uptr
>(flags()->min_uar_stack_size_log
));
221 fake_stack_
= FakeStack::Create(stack_size_log
);
222 DCHECK_EQ(GetCurrentThread(), this);
223 SetTLSFakeStack(fake_stack_
);
229 void AsanThread::Init(const InitOptions
*options
) {
230 DCHECK_NE(tid(), kInvalidTid
);
231 next_stack_top_
= next_stack_bottom_
= 0;
232 atomic_store(&stack_switching_
, false, memory_order_release
);
233 CHECK_EQ(this->stack_size(), 0U);
234 SetThreadStackAndTls(options
);
235 if (stack_top_
!= stack_bottom_
) {
236 CHECK_GT(this->stack_size(), 0U);
237 CHECK(AddrIsInMem(stack_bottom_
));
238 CHECK(AddrIsInMem(stack_top_
- 1));
240 ClearShadowForThreadStackAndTLS();
241 fake_stack_
= nullptr;
242 if (__asan_option_detect_stack_use_after_return
&&
243 tid() == GetCurrentTidOrInvalid()) {
244 // AsyncSignalSafeLazyInitFakeStack makes use of threadlocals and must be
245 // called from the context of the thread it is initializing, not its parent.
246 // Most platforms call AsanThread::Init on the newly-spawned thread, but
247 // Fuchsia calls this function from the parent thread. To support that
248 // approach, we avoid calling AsyncSignalSafeLazyInitFakeStack here; it will
249 // be called by the new thread when it first attempts to access the fake
251 AsyncSignalSafeLazyInitFakeStack();
254 VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
255 (void *)stack_bottom_
, (void *)stack_top_
, stack_top_
- stack_bottom_
,
259 // Fuchsia doesn't use ThreadStart.
260 // asan_fuchsia.c definies CreateMainThread and SetThreadStackAndTls.
261 #if !SANITIZER_FUCHSIA
263 thread_return_t
AsanThread::ThreadStart(tid_t os_id
) {
265 asanThreadRegistry().StartThread(tid(), os_id
, ThreadType::Regular
, nullptr);
267 if (common_flags()->use_sigaltstack
) SetAlternateSignalStack();
269 if (!start_routine_
) {
270 // start_routine_ == 0 if we're on the main thread or on one of the
271 // OS X libdispatch worker threads. But nobody is supposed to call
272 // ThreadStart() for the worker threads.
277 thread_return_t res
= start_routine_(arg_
);
279 // On POSIX systems we defer this to the TSD destructor. LSan will consider
280 // the thread's memory as non-live from the moment we call Destroy(), even
281 // though that memory might contain pointers to heap objects which will be
282 // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
283 // the TSD destructors have run might cause false positives in LSan.
284 if (!SANITIZER_POSIX
)
290 AsanThread
*CreateMainThread() {
291 AsanThread
*main_thread
= AsanThread::Create(
292 /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ kMainTid
,
293 /* stack */ nullptr, /* detached */ true);
294 SetCurrentThread(main_thread
);
295 main_thread
->ThreadStart(internal_getpid());
299 // This implementation doesn't use the argument, which is just passed down
300 // from the caller of Init (which see, above). It's only there to support
301 // OS-specific implementations that need more information passed through.
302 void AsanThread::SetThreadStackAndTls(const InitOptions
*options
) {
303 DCHECK_EQ(options
, nullptr);
306 GetThreadStackAndTls(tid() == kMainTid
, &stack_bottom_
, &stack_size
,
307 &tls_begin_
, &tls_size
);
308 stack_top_
= RoundDownTo(stack_bottom_
+ stack_size
, ASAN_SHADOW_GRANULARITY
);
309 tls_end_
= tls_begin_
+ tls_size
;
312 if (stack_top_
!= stack_bottom_
) {
314 CHECK(AddrIsInStack((uptr
)&local
));
318 #endif // !SANITIZER_FUCHSIA
320 void AsanThread::ClearShadowForThreadStackAndTLS() {
321 if (stack_top_
!= stack_bottom_
)
322 PoisonShadow(stack_bottom_
, stack_top_
- stack_bottom_
, 0);
323 if (tls_begin_
!= tls_end_
) {
324 uptr tls_begin_aligned
= RoundDownTo(tls_begin_
, ASAN_SHADOW_GRANULARITY
);
325 uptr tls_end_aligned
= RoundUpTo(tls_end_
, ASAN_SHADOW_GRANULARITY
);
326 FastPoisonShadow(tls_begin_aligned
, tls_end_aligned
- tls_begin_aligned
, 0);
330 bool AsanThread::GetStackFrameAccessByAddr(uptr addr
,
331 StackFrameAccess
*access
) {
332 if (stack_top_
== stack_bottom_
)
336 if (AddrIsInStack(addr
)) {
337 bottom
= stack_bottom();
338 } else if (FakeStack
*fake_stack
= get_fake_stack()) {
339 bottom
= fake_stack
->AddrIsInFakeStack(addr
);
341 access
->offset
= addr
- bottom
;
342 access
->frame_pc
= ((uptr
*)bottom
)[2];
343 access
->frame_descr
= (const char *)((uptr
*)bottom
)[1];
346 uptr aligned_addr
= RoundDownTo(addr
, SANITIZER_WORDSIZE
/ 8); // align addr.
347 uptr mem_ptr
= RoundDownTo(aligned_addr
, ASAN_SHADOW_GRANULARITY
);
348 u8
*shadow_ptr
= (u8
*)MemToShadow(aligned_addr
);
349 u8
*shadow_bottom
= (u8
*)MemToShadow(bottom
);
351 while (shadow_ptr
>= shadow_bottom
&&
352 *shadow_ptr
!= kAsanStackLeftRedzoneMagic
) {
354 mem_ptr
-= ASAN_SHADOW_GRANULARITY
;
357 while (shadow_ptr
>= shadow_bottom
&&
358 *shadow_ptr
== kAsanStackLeftRedzoneMagic
) {
360 mem_ptr
-= ASAN_SHADOW_GRANULARITY
;
363 if (shadow_ptr
< shadow_bottom
) {
367 uptr
*ptr
= (uptr
*)(mem_ptr
+ ASAN_SHADOW_GRANULARITY
);
368 CHECK(ptr
[0] == kCurrentStackFrameMagic
);
369 access
->offset
= addr
- (uptr
)ptr
;
370 access
->frame_pc
= ptr
[2];
371 access
->frame_descr
= (const char*)ptr
[1];
375 uptr
AsanThread::GetStackVariableShadowStart(uptr addr
) {
377 if (AddrIsInStack(addr
)) {
378 bottom
= stack_bottom();
379 } else if (FakeStack
*fake_stack
= get_fake_stack()) {
380 bottom
= fake_stack
->AddrIsInFakeStack(addr
);
388 uptr aligned_addr
= RoundDownTo(addr
, SANITIZER_WORDSIZE
/ 8); // align addr.
389 u8
*shadow_ptr
= (u8
*)MemToShadow(aligned_addr
);
390 u8
*shadow_bottom
= (u8
*)MemToShadow(bottom
);
392 while (shadow_ptr
>= shadow_bottom
&&
393 (*shadow_ptr
!= kAsanStackLeftRedzoneMagic
&&
394 *shadow_ptr
!= kAsanStackMidRedzoneMagic
&&
395 *shadow_ptr
!= kAsanStackRightRedzoneMagic
))
398 return (uptr
)shadow_ptr
+ 1;
401 bool AsanThread::AddrIsInStack(uptr addr
) {
402 const auto bounds
= GetStackBounds();
403 return addr
>= bounds
.bottom
&& addr
< bounds
.top
;
406 static bool ThreadStackContainsAddress(ThreadContextBase
*tctx_base
,
408 AsanThreadContext
*tctx
= static_cast<AsanThreadContext
*>(tctx_base
);
409 AsanThread
*t
= tctx
->thread
;
412 if (t
->AddrIsInStack((uptr
)addr
))
414 FakeStack
*fake_stack
= t
->get_fake_stack();
417 return fake_stack
->AddrIsInFakeStack((uptr
)addr
);
420 AsanThread
*GetCurrentThread() {
421 AsanThreadContext
*context
=
422 reinterpret_cast<AsanThreadContext
*>(AsanTSDGet());
424 if (SANITIZER_ANDROID
) {
425 // On Android, libc constructor is called _after_ asan_init, and cleans up
426 // TSD. Try to figure out if this is still the main thread by the stack
427 // address. We are not entirely sure that we have correct main thread
428 // limits, so only do this magic on Android, and only if the found thread
429 // is the main thread.
430 AsanThreadContext
*tctx
= GetThreadContextByTidLocked(kMainTid
);
431 if (tctx
&& ThreadStackContainsAddress(tctx
, &context
)) {
432 SetCurrentThread(tctx
->thread
);
438 return context
->thread
;
441 void SetCurrentThread(AsanThread
*t
) {
443 VReport(2, "SetCurrentThread: %p for thread %p\n", (void *)t
->context(),
444 (void *)GetThreadSelf());
445 // Make sure we do not reset the current AsanThread.
446 CHECK_EQ(0, AsanTSDGet());
447 AsanTSDSet(t
->context());
448 CHECK_EQ(t
->context(), AsanTSDGet());
451 u32
GetCurrentTidOrInvalid() {
452 AsanThread
*t
= GetCurrentThread();
453 return t
? t
->tid() : kInvalidTid
;
456 AsanThread
*FindThreadByStackAddress(uptr addr
) {
457 asanThreadRegistry().CheckLocked();
458 AsanThreadContext
*tctx
= static_cast<AsanThreadContext
*>(
459 asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress
,
461 return tctx
? tctx
->thread
: nullptr;
464 void EnsureMainThreadIDIsCorrect() {
465 AsanThreadContext
*context
=
466 reinterpret_cast<AsanThreadContext
*>(AsanTSDGet());
467 if (context
&& (context
->tid
== kMainTid
))
468 context
->os_id
= GetTid();
471 __asan::AsanThread
*GetAsanThreadByOsIDLocked(tid_t os_id
) {
472 __asan::AsanThreadContext
*context
= static_cast<__asan::AsanThreadContext
*>(
473 __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id
));
474 if (!context
) return nullptr;
475 return context
->thread
;
477 } // namespace __asan
479 // --- Implementation of LSan-specific functions --- {{{1
481 bool GetThreadRangesLocked(tid_t os_id
, uptr
*stack_begin
, uptr
*stack_end
,
482 uptr
*tls_begin
, uptr
*tls_end
, uptr
*cache_begin
,
483 uptr
*cache_end
, DTLS
**dtls
) {
484 __asan::AsanThread
*t
= __asan::GetAsanThreadByOsIDLocked(os_id
);
485 if (!t
) return false;
486 *stack_begin
= t
->stack_bottom();
487 *stack_end
= t
->stack_top();
488 *tls_begin
= t
->tls_begin();
489 *tls_end
= t
->tls_end();
490 // ASan doesn't keep allocator caches in TLS, so these are unused.
497 void GetAllThreadAllocatorCachesLocked(InternalMmapVector
<uptr
> *caches
) {}
499 void ForEachExtraStackRange(tid_t os_id
, RangeIteratorCallback callback
,
501 __asan::AsanThread
*t
= __asan::GetAsanThreadByOsIDLocked(os_id
);
504 __asan::FakeStack
*fake_stack
= t
->get_fake_stack();
507 fake_stack
->ForEachFakeFrame(callback
, arg
);
510 void LockThreadRegistry() {
511 __asan::asanThreadRegistry().Lock();
514 void UnlockThreadRegistry() {
515 __asan::asanThreadRegistry().Unlock();
518 ThreadRegistry
*GetThreadRegistryLocked() {
519 __asan::asanThreadRegistry().CheckLocked();
520 return &__asan::asanThreadRegistry();
523 void EnsureMainThreadIDIsCorrect() {
524 __asan::EnsureMainThreadIDIsCorrect();
526 } // namespace __lsan
528 // ---------------------- Interface ---------------- {{{1
529 using namespace __asan
;
532 SANITIZER_INTERFACE_ATTRIBUTE
533 void __sanitizer_start_switch_fiber(void **fakestacksave
, const void *bottom
,
535 AsanThread
*t
= GetCurrentThread();
537 VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
540 t
->StartSwitchFiber((FakeStack
**)fakestacksave
, (uptr
)bottom
, size
);
543 SANITIZER_INTERFACE_ATTRIBUTE
544 void __sanitizer_finish_switch_fiber(void* fakestack
,
545 const void **bottom_old
,
547 AsanThread
*t
= GetCurrentThread();
549 VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
552 t
->FinishSwitchFiber((FakeStack
*)fakestack
,