1 //===-- asan_thread.cpp ---------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of AddressSanitizer, an address sanity checker.
11 // Thread-related code.
12 //===----------------------------------------------------------------------===//
13 #include "asan_thread.h"
15 #include "asan_allocator.h"
16 #include "asan_interceptors.h"
17 #include "asan_mapping.h"
18 #include "asan_poisoning.h"
19 #include "asan_stack.h"
20 #include "lsan/lsan_common.h"
21 #include "sanitizer_common/sanitizer_common.h"
22 #include "sanitizer_common/sanitizer_placement_new.h"
23 #include "sanitizer_common/sanitizer_stackdepot.h"
24 #include "sanitizer_common/sanitizer_tls_get_addr.h"
28 // AsanThreadContext implementation.
30 void AsanThreadContext::OnCreated(void *arg
) {
31 CreateThreadContextArgs
*args
= static_cast<CreateThreadContextArgs
*>(arg
);
33 stack_id
= StackDepotPut(*args
->stack
);
34 thread
= args
->thread
;
35 thread
->set_context(this);
38 void AsanThreadContext::OnFinished() {
39 // Drop the link to the AsanThread object.
43 static ThreadRegistry
*asan_thread_registry
;
44 static ThreadArgRetval
*thread_data
;
46 static Mutex mu_for_thread_context
;
48 static ThreadContextBase
*GetAsanThreadContext(u32 tid
) {
49 Lock
lock(&mu_for_thread_context
);
50 return new (GetGlobalLowLevelAllocator()) AsanThreadContext(tid
);
53 static void InitThreads() {
54 static bool initialized
;
55 // Don't worry about thread_safety - this should be called when there is
57 if (LIKELY(initialized
))
59 // Never reuse ASan threads: we store pointer to AsanThreadContext
60 // in TSD and can't reliably tell when no more TSD destructors will
61 // be called. It would be wrong to reuse AsanThreadContext for another
62 // thread before all TSD destructors will be called for it.
64 // MIPS requires aligned address
65 static ALIGNED(alignof(
66 ThreadRegistry
)) char thread_registry_placeholder
[sizeof(ThreadRegistry
)];
67 static ALIGNED(alignof(
68 ThreadArgRetval
)) char thread_data_placeholder
[sizeof(ThreadArgRetval
)];
70 asan_thread_registry
=
71 new (thread_registry_placeholder
) ThreadRegistry(GetAsanThreadContext
);
72 thread_data
= new (thread_data_placeholder
) ThreadArgRetval();
76 ThreadRegistry
&asanThreadRegistry() {
78 return *asan_thread_registry
;
81 ThreadArgRetval
&asanThreadArgRetval() {
86 AsanThreadContext
*GetThreadContextByTidLocked(u32 tid
) {
87 return static_cast<AsanThreadContext
*>(
88 asanThreadRegistry().GetThreadLocked(tid
));
91 // AsanThread implementation.
93 AsanThread
*AsanThread::Create(const void *start_data
, uptr data_size
,
94 u32 parent_tid
, StackTrace
*stack
,
96 uptr PageSize
= GetPageSizeCached();
97 uptr size
= RoundUpTo(sizeof(AsanThread
), PageSize
);
98 AsanThread
*thread
= (AsanThread
*)MmapOrDie(size
, __func__
);
100 uptr availible_size
= (uptr
)thread
+ size
- (uptr
)(thread
->start_data_
);
101 CHECK_LE(data_size
, availible_size
);
102 internal_memcpy(thread
->start_data_
, start_data
, data_size
);
104 AsanThreadContext::CreateThreadContextArgs args
= {thread
, stack
};
105 asanThreadRegistry().CreateThread(0, detached
, parent_tid
, &args
);
110 void AsanThread::GetStartData(void *out
, uptr out_size
) const {
111 internal_memcpy(out
, start_data_
, out_size
);
114 void AsanThread::TSDDtor(void *tsd
) {
115 AsanThreadContext
*context
= (AsanThreadContext
*)tsd
;
116 VReport(1, "T%d TSDDtor\n", context
->tid
);
118 context
->thread
->Destroy();
121 void AsanThread::Destroy() {
122 int tid
= this->tid();
123 VReport(1, "T%d exited\n", tid
);
126 (asanThreadRegistry().FinishThread(tid
) == ThreadStatusRunning
);
128 if (AsanThread
*thread
= GetCurrentThread())
129 CHECK_EQ(this, thread
);
130 malloc_storage().CommitBack();
131 if (common_flags()->use_sigaltstack
)
132 UnsetAlternateSignalStack();
133 FlushToDeadThreadStats(&stats_
);
134 // We also clear the shadow on thread destruction because
135 // some code may still be executing in later TSD destructors
136 // and we don't want it to have any poisoned stack.
137 ClearShadowForThreadStackAndTLS();
138 DeleteFakeStack(tid
);
140 CHECK_NE(this, GetCurrentThread());
142 uptr size
= RoundUpTo(sizeof(AsanThread
), GetPageSizeCached());
143 UnmapOrDie(this, size
);
148 void AsanThread::StartSwitchFiber(FakeStack
**fake_stack_save
, uptr bottom
,
150 if (atomic_load(&stack_switching_
, memory_order_relaxed
)) {
151 Report("ERROR: starting fiber switch while in fiber switch\n");
155 next_stack_bottom_
= bottom
;
156 next_stack_top_
= bottom
+ size
;
157 atomic_store(&stack_switching_
, 1, memory_order_release
);
159 FakeStack
*current_fake_stack
= fake_stack_
;
161 *fake_stack_save
= fake_stack_
;
162 fake_stack_
= nullptr;
163 SetTLSFakeStack(nullptr);
164 // if fake_stack_save is null, the fiber will die, delete the fakestack
165 if (!fake_stack_save
&& current_fake_stack
)
166 current_fake_stack
->Destroy(this->tid());
169 void AsanThread::FinishSwitchFiber(FakeStack
*fake_stack_save
, uptr
*bottom_old
,
171 if (!atomic_load(&stack_switching_
, memory_order_relaxed
)) {
172 Report("ERROR: finishing a fiber switch that has not started\n");
176 if (fake_stack_save
) {
177 SetTLSFakeStack(fake_stack_save
);
178 fake_stack_
= fake_stack_save
;
182 *bottom_old
= stack_bottom_
;
184 *size_old
= stack_top_
- stack_bottom_
;
185 stack_bottom_
= next_stack_bottom_
;
186 stack_top_
= next_stack_top_
;
187 atomic_store(&stack_switching_
, 0, memory_order_release
);
189 next_stack_bottom_
= 0;
192 inline AsanThread::StackBounds
AsanThread::GetStackBounds() const {
193 if (!atomic_load(&stack_switching_
, memory_order_acquire
)) {
194 // Make sure the stack bounds are fully initialized.
195 if (stack_bottom_
>= stack_top_
)
197 return {stack_bottom_
, stack_top_
};
200 const uptr cur_stack
= (uptr
)&local
;
201 // Note: need to check next stack first, because FinishSwitchFiber
202 // may be in process of overwriting stack_top_/bottom_. But in such case
203 // we are already on the next stack.
204 if (cur_stack
>= next_stack_bottom_
&& cur_stack
< next_stack_top_
)
205 return {next_stack_bottom_
, next_stack_top_
};
206 return {stack_bottom_
, stack_top_
};
209 uptr
AsanThread::stack_top() { return GetStackBounds().top
; }
211 uptr
AsanThread::stack_bottom() { return GetStackBounds().bottom
; }
213 uptr
AsanThread::stack_size() {
214 const auto bounds
= GetStackBounds();
215 return bounds
.top
- bounds
.bottom
;
218 // We want to create the FakeStack lazily on the first use, but not earlier
219 // than the stack size is known and the procedure has to be async-signal safe.
220 FakeStack
*AsanThread::AsyncSignalSafeLazyInitFakeStack() {
221 uptr stack_size
= this->stack_size();
222 if (stack_size
== 0) // stack_size is not yet available, don't use FakeStack.
225 // fake_stack_ has 3 states:
226 // 0 -- not initialized
227 // 1 -- being initialized
228 // ptr -- initialized
229 // This CAS checks if the state was 0 and if so changes it to state 1,
230 // if that was successful, it initializes the pointer.
231 if (atomic_compare_exchange_strong(
232 reinterpret_cast<atomic_uintptr_t
*>(&fake_stack_
), &old_val
, 1UL,
233 memory_order_relaxed
)) {
234 uptr stack_size_log
= Log2(RoundUpToPowerOfTwo(stack_size
));
235 CHECK_LE(flags()->min_uar_stack_size_log
, flags()->max_uar_stack_size_log
);
237 Min(stack_size_log
, static_cast<uptr
>(flags()->max_uar_stack_size_log
));
239 Max(stack_size_log
, static_cast<uptr
>(flags()->min_uar_stack_size_log
));
240 fake_stack_
= FakeStack::Create(stack_size_log
);
241 DCHECK_EQ(GetCurrentThread(), this);
242 SetTLSFakeStack(fake_stack_
);
248 void AsanThread::Init(const InitOptions
*options
) {
249 DCHECK_NE(tid(), kInvalidTid
);
250 next_stack_top_
= next_stack_bottom_
= 0;
251 atomic_store(&stack_switching_
, false, memory_order_release
);
252 CHECK_EQ(this->stack_size(), 0U);
253 SetThreadStackAndTls(options
);
254 if (stack_top_
!= stack_bottom_
) {
255 CHECK_GT(this->stack_size(), 0U);
256 CHECK(AddrIsInMem(stack_bottom_
));
257 CHECK(AddrIsInMem(stack_top_
- 1));
259 ClearShadowForThreadStackAndTLS();
260 fake_stack_
= nullptr;
261 if (__asan_option_detect_stack_use_after_return
&&
262 tid() == GetCurrentTidOrInvalid()) {
263 // AsyncSignalSafeLazyInitFakeStack makes use of threadlocals and must be
264 // called from the context of the thread it is initializing, not its parent.
265 // Most platforms call AsanThread::Init on the newly-spawned thread, but
266 // Fuchsia calls this function from the parent thread. To support that
267 // approach, we avoid calling AsyncSignalSafeLazyInitFakeStack here; it will
268 // be called by the new thread when it first attempts to access the fake
270 AsyncSignalSafeLazyInitFakeStack();
273 VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
274 (void *)stack_bottom_
, (void *)stack_top_
, stack_top_
- stack_bottom_
,
278 // Fuchsia doesn't use ThreadStart.
279 // asan_fuchsia.c definies CreateMainThread and SetThreadStackAndTls.
280 #if !SANITIZER_FUCHSIA
282 void AsanThread::ThreadStart(tid_t os_id
) {
284 asanThreadRegistry().StartThread(tid(), os_id
, ThreadType::Regular
, nullptr);
286 if (common_flags()->use_sigaltstack
)
287 SetAlternateSignalStack();
290 AsanThread
*CreateMainThread() {
291 AsanThread
*main_thread
= AsanThread::Create(
292 /* parent_tid */ kMainTid
,
293 /* stack */ nullptr, /* detached */ true);
294 SetCurrentThread(main_thread
);
295 main_thread
->ThreadStart(internal_getpid());
299 // This implementation doesn't use the argument, which is just passed down
300 // from the caller of Init (which see, above). It's only there to support
301 // OS-specific implementations that need more information passed through.
302 void AsanThread::SetThreadStackAndTls(const InitOptions
*options
) {
303 DCHECK_EQ(options
, nullptr);
306 GetThreadStackAndTls(tid() == kMainTid
, &stack_bottom_
, &stack_size
,
307 &tls_begin_
, &tls_size
);
308 stack_top_
= RoundDownTo(stack_bottom_
+ stack_size
, ASAN_SHADOW_GRANULARITY
);
309 stack_bottom_
= RoundDownTo(stack_bottom_
, ASAN_SHADOW_GRANULARITY
);
310 tls_end_
= tls_begin_
+ tls_size
;
313 if (stack_top_
!= stack_bottom_
) {
315 CHECK(AddrIsInStack((uptr
)&local
));
319 #endif // !SANITIZER_FUCHSIA
321 void AsanThread::ClearShadowForThreadStackAndTLS() {
322 if (stack_top_
!= stack_bottom_
)
323 PoisonShadow(stack_bottom_
, stack_top_
- stack_bottom_
, 0);
324 if (tls_begin_
!= tls_end_
) {
325 uptr tls_begin_aligned
= RoundDownTo(tls_begin_
, ASAN_SHADOW_GRANULARITY
);
326 uptr tls_end_aligned
= RoundUpTo(tls_end_
, ASAN_SHADOW_GRANULARITY
);
327 FastPoisonShadow(tls_begin_aligned
, tls_end_aligned
- tls_begin_aligned
, 0);
331 bool AsanThread::GetStackFrameAccessByAddr(uptr addr
,
332 StackFrameAccess
*access
) {
333 if (stack_top_
== stack_bottom_
)
337 if (AddrIsInStack(addr
)) {
338 bottom
= stack_bottom();
339 } else if (FakeStack
*fake_stack
= get_fake_stack()) {
340 bottom
= fake_stack
->AddrIsInFakeStack(addr
);
342 access
->offset
= addr
- bottom
;
343 access
->frame_pc
= ((uptr
*)bottom
)[2];
344 access
->frame_descr
= (const char *)((uptr
*)bottom
)[1];
347 uptr aligned_addr
= RoundDownTo(addr
, SANITIZER_WORDSIZE
/ 8); // align addr.
348 uptr mem_ptr
= RoundDownTo(aligned_addr
, ASAN_SHADOW_GRANULARITY
);
349 u8
*shadow_ptr
= (u8
*)MemToShadow(aligned_addr
);
350 u8
*shadow_bottom
= (u8
*)MemToShadow(bottom
);
352 while (shadow_ptr
>= shadow_bottom
&&
353 *shadow_ptr
!= kAsanStackLeftRedzoneMagic
) {
355 mem_ptr
-= ASAN_SHADOW_GRANULARITY
;
358 while (shadow_ptr
>= shadow_bottom
&&
359 *shadow_ptr
== kAsanStackLeftRedzoneMagic
) {
361 mem_ptr
-= ASAN_SHADOW_GRANULARITY
;
364 if (shadow_ptr
< shadow_bottom
) {
368 uptr
*ptr
= (uptr
*)(mem_ptr
+ ASAN_SHADOW_GRANULARITY
);
369 CHECK(ptr
[0] == kCurrentStackFrameMagic
);
370 access
->offset
= addr
- (uptr
)ptr
;
371 access
->frame_pc
= ptr
[2];
372 access
->frame_descr
= (const char *)ptr
[1];
376 uptr
AsanThread::GetStackVariableShadowStart(uptr addr
) {
378 if (AddrIsInStack(addr
)) {
379 bottom
= stack_bottom();
380 } else if (FakeStack
*fake_stack
= get_fake_stack()) {
381 bottom
= fake_stack
->AddrIsInFakeStack(addr
);
389 uptr aligned_addr
= RoundDownTo(addr
, SANITIZER_WORDSIZE
/ 8); // align addr.
390 u8
*shadow_ptr
= (u8
*)MemToShadow(aligned_addr
);
391 u8
*shadow_bottom
= (u8
*)MemToShadow(bottom
);
393 while (shadow_ptr
>= shadow_bottom
&&
394 (*shadow_ptr
!= kAsanStackLeftRedzoneMagic
&&
395 *shadow_ptr
!= kAsanStackMidRedzoneMagic
&&
396 *shadow_ptr
!= kAsanStackRightRedzoneMagic
))
399 return (uptr
)shadow_ptr
+ 1;
402 bool AsanThread::AddrIsInStack(uptr addr
) {
403 const auto bounds
= GetStackBounds();
404 return addr
>= bounds
.bottom
&& addr
< bounds
.top
;
407 static bool ThreadStackContainsAddress(ThreadContextBase
*tctx_base
,
409 AsanThreadContext
*tctx
= static_cast<AsanThreadContext
*>(tctx_base
);
410 AsanThread
*t
= tctx
->thread
;
413 if (t
->AddrIsInStack((uptr
)addr
))
415 FakeStack
*fake_stack
= t
->get_fake_stack();
418 return fake_stack
->AddrIsInFakeStack((uptr
)addr
);
421 AsanThread
*GetCurrentThread() {
422 AsanThreadContext
*context
=
423 reinterpret_cast<AsanThreadContext
*>(AsanTSDGet());
425 if (SANITIZER_ANDROID
) {
426 // On Android, libc constructor is called _after_ asan_init, and cleans up
427 // TSD. Try to figure out if this is still the main thread by the stack
428 // address. We are not entirely sure that we have correct main thread
429 // limits, so only do this magic on Android, and only if the found thread
430 // is the main thread.
431 AsanThreadContext
*tctx
= GetThreadContextByTidLocked(kMainTid
);
432 if (tctx
&& ThreadStackContainsAddress(tctx
, &context
)) {
433 SetCurrentThread(tctx
->thread
);
439 return context
->thread
;
442 void SetCurrentThread(AsanThread
*t
) {
444 VReport(2, "SetCurrentThread: %p for thread %p\n", (void *)t
->context(),
445 (void *)GetThreadSelf());
446 // Make sure we do not reset the current AsanThread.
447 CHECK_EQ(0, AsanTSDGet());
448 AsanTSDSet(t
->context());
449 CHECK_EQ(t
->context(), AsanTSDGet());
452 u32
GetCurrentTidOrInvalid() {
453 AsanThread
*t
= GetCurrentThread();
454 return t
? t
->tid() : kInvalidTid
;
457 AsanThread
*FindThreadByStackAddress(uptr addr
) {
458 asanThreadRegistry().CheckLocked();
459 AsanThreadContext
*tctx
= static_cast<AsanThreadContext
*>(
460 asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress
,
462 return tctx
? tctx
->thread
: nullptr;
465 void EnsureMainThreadIDIsCorrect() {
466 AsanThreadContext
*context
=
467 reinterpret_cast<AsanThreadContext
*>(AsanTSDGet());
468 if (context
&& (context
->tid
== kMainTid
))
469 context
->os_id
= GetTid();
472 __asan::AsanThread
*GetAsanThreadByOsIDLocked(tid_t os_id
) {
473 __asan::AsanThreadContext
*context
= static_cast<__asan::AsanThreadContext
*>(
474 __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id
));
477 return context
->thread
;
479 } // namespace __asan
481 // --- Implementation of LSan-specific functions --- {{{1
484 __asan::asanThreadRegistry().Lock();
485 __asan::asanThreadArgRetval().Lock();
488 void UnlockThreads() {
489 __asan::asanThreadArgRetval().Unlock();
490 __asan::asanThreadRegistry().Unlock();
493 static ThreadRegistry
*GetAsanThreadRegistryLocked() {
494 __asan::asanThreadRegistry().CheckLocked();
495 return &__asan::asanThreadRegistry();
498 void EnsureMainThreadIDIsCorrect() { __asan::EnsureMainThreadIDIsCorrect(); }
500 bool GetThreadRangesLocked(tid_t os_id
, uptr
*stack_begin
, uptr
*stack_end
,
501 uptr
*tls_begin
, uptr
*tls_end
, uptr
*cache_begin
,
502 uptr
*cache_end
, DTLS
**dtls
) {
503 __asan::AsanThread
*t
= __asan::GetAsanThreadByOsIDLocked(os_id
);
506 *stack_begin
= t
->stack_bottom();
507 *stack_end
= t
->stack_top();
508 *tls_begin
= t
->tls_begin();
509 *tls_end
= t
->tls_end();
510 // ASan doesn't keep allocator caches in TLS, so these are unused.
517 void GetAllThreadAllocatorCachesLocked(InternalMmapVector
<uptr
> *caches
) {}
519 void GetThreadExtraStackRangesLocked(tid_t os_id
,
520 InternalMmapVector
<Range
> *ranges
) {
521 __asan::AsanThread
*t
= __asan::GetAsanThreadByOsIDLocked(os_id
);
524 __asan::FakeStack
*fake_stack
= t
->get_fake_stack();
528 fake_stack
->ForEachFakeFrame(
529 [](uptr begin
, uptr end
, void *arg
) {
530 reinterpret_cast<InternalMmapVector
<Range
> *>(arg
)->push_back(
536 void GetThreadExtraStackRangesLocked(InternalMmapVector
<Range
> *ranges
) {
537 GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
538 [](ThreadContextBase
*tctx
, void *arg
) {
539 GetThreadExtraStackRangesLocked(
540 tctx
->os_id
, reinterpret_cast<InternalMmapVector
<Range
> *>(arg
));
545 void GetAdditionalThreadContextPtrsLocked(InternalMmapVector
<uptr
> *ptrs
) {
546 __asan::asanThreadArgRetval().GetAllPtrsLocked(ptrs
);
549 void GetRunningThreadsLocked(InternalMmapVector
<tid_t
> *threads
) {
550 GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
551 [](ThreadContextBase
*tctx
, void *threads
) {
552 if (tctx
->status
== ThreadStatusRunning
)
553 reinterpret_cast<InternalMmapVector
<tid_t
> *>(threads
)->push_back(
559 } // namespace __lsan
561 // ---------------------- Interface ---------------- {{{1
562 using namespace __asan
;
565 SANITIZER_INTERFACE_ATTRIBUTE
566 void __sanitizer_start_switch_fiber(void **fakestacksave
, const void *bottom
,
568 AsanThread
*t
= GetCurrentThread();
570 VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
573 t
->StartSwitchFiber((FakeStack
**)fakestacksave
, (uptr
)bottom
, size
);
576 SANITIZER_INTERFACE_ATTRIBUTE
577 void __sanitizer_finish_switch_fiber(void *fakestack
, const void **bottom_old
,
579 AsanThread
*t
= GetCurrentThread();
581 VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
584 t
->FinishSwitchFiber((FakeStack
*)fakestack
, (uptr
*)bottom_old
,