1 //===-- asan_thread.cpp ---------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of AddressSanitizer, an address sanity checker.
11 // Thread-related code.
12 //===----------------------------------------------------------------------===//
13 #include "asan_thread.h"
15 #include "asan_allocator.h"
16 #include "asan_interceptors.h"
17 #include "asan_mapping.h"
18 #include "asan_poisoning.h"
19 #include "asan_stack.h"
20 #include "lsan/lsan_common.h"
21 #include "sanitizer_common/sanitizer_common.h"
22 #include "sanitizer_common/sanitizer_placement_new.h"
23 #include "sanitizer_common/sanitizer_stackdepot.h"
24 #include "sanitizer_common/sanitizer_tls_get_addr.h"
28 // AsanThreadContext implementation.
30 void AsanThreadContext::OnCreated(void *arg
) {
31 CreateThreadContextArgs
*args
= static_cast<CreateThreadContextArgs
*>(arg
);
33 stack_id
= StackDepotPut(*args
->stack
);
34 thread
= args
->thread
;
35 thread
->set_context(this);
38 void AsanThreadContext::OnFinished() {
39 // Drop the link to the AsanThread object.
43 static ThreadRegistry
*asan_thread_registry
;
44 static ThreadArgRetval
*thread_data
;
46 static Mutex mu_for_thread_context
;
47 // TODO(leonardchan@): It should be possible to make LowLevelAllocator
48 // threadsafe and consolidate this one into the GlobalLoweLevelAllocator.
49 // We should be able to do something similar to what's in
50 // sanitizer_stack_store.cpp.
51 static LowLevelAllocator allocator_for_thread_context
;
53 static ThreadContextBase
*GetAsanThreadContext(u32 tid
) {
54 Lock
lock(&mu_for_thread_context
);
55 return new (allocator_for_thread_context
) AsanThreadContext(tid
);
58 static void InitThreads() {
59 static bool initialized
;
60 // Don't worry about thread_safety - this should be called when there is
62 if (LIKELY(initialized
))
64 // Never reuse ASan threads: we store pointer to AsanThreadContext
65 // in TSD and can't reliably tell when no more TSD destructors will
66 // be called. It would be wrong to reuse AsanThreadContext for another
67 // thread before all TSD destructors will be called for it.
69 // MIPS requires aligned address
70 alignas(alignof(ThreadRegistry
)) static char
71 thread_registry_placeholder
[sizeof(ThreadRegistry
)];
72 alignas(alignof(ThreadArgRetval
)) static char
73 thread_data_placeholder
[sizeof(ThreadArgRetval
)];
75 asan_thread_registry
=
76 new (thread_registry_placeholder
) ThreadRegistry(GetAsanThreadContext
);
77 thread_data
= new (thread_data_placeholder
) ThreadArgRetval();
81 ThreadRegistry
&asanThreadRegistry() {
83 return *asan_thread_registry
;
86 ThreadArgRetval
&asanThreadArgRetval() {
91 AsanThreadContext
*GetThreadContextByTidLocked(u32 tid
) {
92 return static_cast<AsanThreadContext
*>(
93 asanThreadRegistry().GetThreadLocked(tid
));
96 // AsanThread implementation.
98 AsanThread
*AsanThread::Create(const void *start_data
, uptr data_size
,
99 u32 parent_tid
, StackTrace
*stack
,
101 uptr PageSize
= GetPageSizeCached();
102 uptr size
= RoundUpTo(sizeof(AsanThread
), PageSize
);
103 AsanThread
*thread
= (AsanThread
*)MmapOrDie(size
, __func__
);
105 uptr availible_size
= (uptr
)thread
+ size
- (uptr
)(thread
->start_data_
);
106 CHECK_LE(data_size
, availible_size
);
107 internal_memcpy(thread
->start_data_
, start_data
, data_size
);
109 AsanThreadContext::CreateThreadContextArgs args
= {thread
, stack
};
110 asanThreadRegistry().CreateThread(0, detached
, parent_tid
, &args
);
115 void AsanThread::GetStartData(void *out
, uptr out_size
) const {
116 internal_memcpy(out
, start_data_
, out_size
);
119 void AsanThread::TSDDtor(void *tsd
) {
120 AsanThreadContext
*context
= (AsanThreadContext
*)tsd
;
121 VReport(1, "T%d TSDDtor\n", context
->tid
);
123 context
->thread
->Destroy();
126 void AsanThread::Destroy() {
127 int tid
= this->tid();
128 VReport(1, "T%d exited\n", tid
);
131 (asanThreadRegistry().FinishThread(tid
) == ThreadStatusRunning
);
133 if (AsanThread
*thread
= GetCurrentThread())
134 CHECK_EQ(this, thread
);
135 malloc_storage().CommitBack();
136 if (common_flags()->use_sigaltstack
)
137 UnsetAlternateSignalStack();
138 FlushToDeadThreadStats(&stats_
);
139 // We also clear the shadow on thread destruction because
140 // some code may still be executing in later TSD destructors
141 // and we don't want it to have any poisoned stack.
142 ClearShadowForThreadStackAndTLS();
143 DeleteFakeStack(tid
);
145 CHECK_NE(this, GetCurrentThread());
147 uptr size
= RoundUpTo(sizeof(AsanThread
), GetPageSizeCached());
148 UnmapOrDie(this, size
);
153 void AsanThread::StartSwitchFiber(FakeStack
**fake_stack_save
, uptr bottom
,
155 if (atomic_load(&stack_switching_
, memory_order_relaxed
)) {
156 Report("ERROR: starting fiber switch while in fiber switch\n");
160 next_stack_bottom_
= bottom
;
161 next_stack_top_
= bottom
+ size
;
162 atomic_store(&stack_switching_
, 1, memory_order_release
);
164 FakeStack
*current_fake_stack
= fake_stack_
;
166 *fake_stack_save
= fake_stack_
;
167 fake_stack_
= nullptr;
168 SetTLSFakeStack(nullptr);
169 // if fake_stack_save is null, the fiber will die, delete the fakestack
170 if (!fake_stack_save
&& current_fake_stack
)
171 current_fake_stack
->Destroy(this->tid());
174 void AsanThread::FinishSwitchFiber(FakeStack
*fake_stack_save
, uptr
*bottom_old
,
176 if (!atomic_load(&stack_switching_
, memory_order_relaxed
)) {
177 Report("ERROR: finishing a fiber switch that has not started\n");
181 if (fake_stack_save
) {
182 SetTLSFakeStack(fake_stack_save
);
183 fake_stack_
= fake_stack_save
;
187 *bottom_old
= stack_bottom_
;
189 *size_old
= stack_top_
- stack_bottom_
;
190 stack_bottom_
= next_stack_bottom_
;
191 stack_top_
= next_stack_top_
;
192 atomic_store(&stack_switching_
, 0, memory_order_release
);
194 next_stack_bottom_
= 0;
197 inline AsanThread::StackBounds
AsanThread::GetStackBounds() const {
198 if (!atomic_load(&stack_switching_
, memory_order_acquire
)) {
199 // Make sure the stack bounds are fully initialized.
200 if (stack_bottom_
>= stack_top_
)
202 return {stack_bottom_
, stack_top_
};
205 const uptr cur_stack
= (uptr
)&local
;
206 // Note: need to check next stack first, because FinishSwitchFiber
207 // may be in process of overwriting stack_top_/bottom_. But in such case
208 // we are already on the next stack.
209 if (cur_stack
>= next_stack_bottom_
&& cur_stack
< next_stack_top_
)
210 return {next_stack_bottom_
, next_stack_top_
};
211 return {stack_bottom_
, stack_top_
};
214 uptr
AsanThread::stack_top() { return GetStackBounds().top
; }
216 uptr
AsanThread::stack_bottom() { return GetStackBounds().bottom
; }
218 uptr
AsanThread::stack_size() {
219 const auto bounds
= GetStackBounds();
220 return bounds
.top
- bounds
.bottom
;
223 // We want to create the FakeStack lazily on the first use, but not earlier
224 // than the stack size is known and the procedure has to be async-signal safe.
225 FakeStack
*AsanThread::AsyncSignalSafeLazyInitFakeStack() {
226 uptr stack_size
= this->stack_size();
227 if (stack_size
== 0) // stack_size is not yet available, don't use FakeStack.
230 // fake_stack_ has 3 states:
231 // 0 -- not initialized
232 // 1 -- being initialized
233 // ptr -- initialized
234 // This CAS checks if the state was 0 and if so changes it to state 1,
235 // if that was successful, it initializes the pointer.
236 if (atomic_compare_exchange_strong(
237 reinterpret_cast<atomic_uintptr_t
*>(&fake_stack_
), &old_val
, 1UL,
238 memory_order_relaxed
)) {
239 uptr stack_size_log
= Log2(RoundUpToPowerOfTwo(stack_size
));
240 CHECK_LE(flags()->min_uar_stack_size_log
, flags()->max_uar_stack_size_log
);
242 Min(stack_size_log
, static_cast<uptr
>(flags()->max_uar_stack_size_log
));
244 Max(stack_size_log
, static_cast<uptr
>(flags()->min_uar_stack_size_log
));
245 fake_stack_
= FakeStack::Create(stack_size_log
);
246 DCHECK_EQ(GetCurrentThread(), this);
247 SetTLSFakeStack(fake_stack_
);
253 void AsanThread::Init(const InitOptions
*options
) {
254 DCHECK_NE(tid(), kInvalidTid
);
255 next_stack_top_
= next_stack_bottom_
= 0;
256 atomic_store(&stack_switching_
, false, memory_order_release
);
257 CHECK_EQ(this->stack_size(), 0U);
258 SetThreadStackAndTls(options
);
259 if (stack_top_
!= stack_bottom_
) {
260 CHECK_GT(this->stack_size(), 0U);
261 CHECK(AddrIsInMem(stack_bottom_
));
262 CHECK(AddrIsInMem(stack_top_
- 1));
264 ClearShadowForThreadStackAndTLS();
265 fake_stack_
= nullptr;
266 if (__asan_option_detect_stack_use_after_return
&&
267 tid() == GetCurrentTidOrInvalid()) {
268 // AsyncSignalSafeLazyInitFakeStack makes use of threadlocals and must be
269 // called from the context of the thread it is initializing, not its parent.
270 // Most platforms call AsanThread::Init on the newly-spawned thread, but
271 // Fuchsia calls this function from the parent thread. To support that
272 // approach, we avoid calling AsyncSignalSafeLazyInitFakeStack here; it will
273 // be called by the new thread when it first attempts to access the fake
275 AsyncSignalSafeLazyInitFakeStack();
278 VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
279 (void *)stack_bottom_
, (void *)stack_top_
, stack_top_
- stack_bottom_
,
283 // Fuchsia doesn't use ThreadStart.
284 // asan_fuchsia.c definies CreateMainThread and SetThreadStackAndTls.
285 #if !SANITIZER_FUCHSIA
287 void AsanThread::ThreadStart(tid_t os_id
) {
289 asanThreadRegistry().StartThread(tid(), os_id
, ThreadType::Regular
, nullptr);
291 if (common_flags()->use_sigaltstack
)
292 SetAlternateSignalStack();
295 AsanThread
*CreateMainThread() {
296 AsanThread
*main_thread
= AsanThread::Create(
297 /* parent_tid */ kMainTid
,
298 /* stack */ nullptr, /* detached */ true);
299 SetCurrentThread(main_thread
);
300 main_thread
->ThreadStart(internal_getpid());
304 // This implementation doesn't use the argument, which is just passed down
305 // from the caller of Init (which see, above). It's only there to support
306 // OS-specific implementations that need more information passed through.
307 void AsanThread::SetThreadStackAndTls(const InitOptions
*options
) {
308 DCHECK_EQ(options
, nullptr);
311 GetThreadStackAndTls(tid() == kMainTid
, &stack_bottom_
, &stack_size
,
312 &tls_begin_
, &tls_size
);
313 stack_top_
= RoundDownTo(stack_bottom_
+ stack_size
, ASAN_SHADOW_GRANULARITY
);
314 stack_bottom_
= RoundDownTo(stack_bottom_
, ASAN_SHADOW_GRANULARITY
);
315 tls_end_
= tls_begin_
+ tls_size
;
318 if (stack_top_
!= stack_bottom_
) {
320 CHECK(AddrIsInStack((uptr
)&local
));
324 #endif // !SANITIZER_FUCHSIA
326 void AsanThread::ClearShadowForThreadStackAndTLS() {
327 if (stack_top_
!= stack_bottom_
)
328 PoisonShadow(stack_bottom_
, stack_top_
- stack_bottom_
, 0);
329 if (tls_begin_
!= tls_end_
) {
330 uptr tls_begin_aligned
= RoundDownTo(tls_begin_
, ASAN_SHADOW_GRANULARITY
);
331 uptr tls_end_aligned
= RoundUpTo(tls_end_
, ASAN_SHADOW_GRANULARITY
);
332 FastPoisonShadow(tls_begin_aligned
, tls_end_aligned
- tls_begin_aligned
, 0);
336 bool AsanThread::GetStackFrameAccessByAddr(uptr addr
,
337 StackFrameAccess
*access
) {
338 if (stack_top_
== stack_bottom_
)
342 if (AddrIsInStack(addr
)) {
343 bottom
= stack_bottom();
344 } else if (FakeStack
*fake_stack
= get_fake_stack()) {
345 bottom
= fake_stack
->AddrIsInFakeStack(addr
);
347 access
->offset
= addr
- bottom
;
348 access
->frame_pc
= ((uptr
*)bottom
)[2];
349 access
->frame_descr
= (const char *)((uptr
*)bottom
)[1];
352 uptr aligned_addr
= RoundDownTo(addr
, SANITIZER_WORDSIZE
/ 8); // align addr.
353 uptr mem_ptr
= RoundDownTo(aligned_addr
, ASAN_SHADOW_GRANULARITY
);
354 u8
*shadow_ptr
= (u8
*)MemToShadow(aligned_addr
);
355 u8
*shadow_bottom
= (u8
*)MemToShadow(bottom
);
357 while (shadow_ptr
>= shadow_bottom
&&
358 *shadow_ptr
!= kAsanStackLeftRedzoneMagic
) {
360 mem_ptr
-= ASAN_SHADOW_GRANULARITY
;
363 while (shadow_ptr
>= shadow_bottom
&&
364 *shadow_ptr
== kAsanStackLeftRedzoneMagic
) {
366 mem_ptr
-= ASAN_SHADOW_GRANULARITY
;
369 if (shadow_ptr
< shadow_bottom
) {
373 uptr
*ptr
= (uptr
*)(mem_ptr
+ ASAN_SHADOW_GRANULARITY
);
374 CHECK(ptr
[0] == kCurrentStackFrameMagic
);
375 access
->offset
= addr
- (uptr
)ptr
;
376 access
->frame_pc
= ptr
[2];
377 access
->frame_descr
= (const char *)ptr
[1];
381 uptr
AsanThread::GetStackVariableShadowStart(uptr addr
) {
383 if (AddrIsInStack(addr
)) {
384 bottom
= stack_bottom();
385 } else if (FakeStack
*fake_stack
= get_fake_stack()) {
386 bottom
= fake_stack
->AddrIsInFakeStack(addr
);
394 uptr aligned_addr
= RoundDownTo(addr
, SANITIZER_WORDSIZE
/ 8); // align addr.
395 u8
*shadow_ptr
= (u8
*)MemToShadow(aligned_addr
);
396 u8
*shadow_bottom
= (u8
*)MemToShadow(bottom
);
398 while (shadow_ptr
>= shadow_bottom
&&
399 (*shadow_ptr
!= kAsanStackLeftRedzoneMagic
&&
400 *shadow_ptr
!= kAsanStackMidRedzoneMagic
&&
401 *shadow_ptr
!= kAsanStackRightRedzoneMagic
))
404 return (uptr
)shadow_ptr
+ 1;
407 bool AsanThread::AddrIsInStack(uptr addr
) {
408 const auto bounds
= GetStackBounds();
409 return addr
>= bounds
.bottom
&& addr
< bounds
.top
;
412 static bool ThreadStackContainsAddress(ThreadContextBase
*tctx_base
,
414 AsanThreadContext
*tctx
= static_cast<AsanThreadContext
*>(tctx_base
);
415 AsanThread
*t
= tctx
->thread
;
418 if (t
->AddrIsInStack((uptr
)addr
))
420 FakeStack
*fake_stack
= t
->get_fake_stack();
423 return fake_stack
->AddrIsInFakeStack((uptr
)addr
);
426 AsanThread
*GetCurrentThread() {
427 AsanThreadContext
*context
=
428 reinterpret_cast<AsanThreadContext
*>(AsanTSDGet());
430 if (SANITIZER_ANDROID
) {
431 // On Android, libc constructor is called _after_ asan_init, and cleans up
432 // TSD. Try to figure out if this is still the main thread by the stack
433 // address. We are not entirely sure that we have correct main thread
434 // limits, so only do this magic on Android, and only if the found thread
435 // is the main thread.
436 AsanThreadContext
*tctx
= GetThreadContextByTidLocked(kMainTid
);
437 if (tctx
&& ThreadStackContainsAddress(tctx
, &context
)) {
438 SetCurrentThread(tctx
->thread
);
444 return context
->thread
;
447 void SetCurrentThread(AsanThread
*t
) {
449 VReport(2, "SetCurrentThread: %p for thread %p\n", (void *)t
->context(),
450 (void *)GetThreadSelf());
451 // Make sure we do not reset the current AsanThread.
452 CHECK_EQ(0, AsanTSDGet());
453 AsanTSDSet(t
->context());
454 CHECK_EQ(t
->context(), AsanTSDGet());
457 u32
GetCurrentTidOrInvalid() {
458 AsanThread
*t
= GetCurrentThread();
459 return t
? t
->tid() : kInvalidTid
;
462 AsanThread
*FindThreadByStackAddress(uptr addr
) {
463 asanThreadRegistry().CheckLocked();
464 AsanThreadContext
*tctx
= static_cast<AsanThreadContext
*>(
465 asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress
,
467 return tctx
? tctx
->thread
: nullptr;
470 void EnsureMainThreadIDIsCorrect() {
471 AsanThreadContext
*context
=
472 reinterpret_cast<AsanThreadContext
*>(AsanTSDGet());
473 if (context
&& (context
->tid
== kMainTid
))
474 context
->os_id
= GetTid();
477 __asan::AsanThread
*GetAsanThreadByOsIDLocked(tid_t os_id
) {
478 __asan::AsanThreadContext
*context
= static_cast<__asan::AsanThreadContext
*>(
479 __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id
));
482 return context
->thread
;
484 } // namespace __asan
486 // --- Implementation of LSan-specific functions --- {{{1
489 __asan::asanThreadRegistry().Lock();
490 __asan::asanThreadArgRetval().Lock();
493 void UnlockThreads() {
494 __asan::asanThreadArgRetval().Unlock();
495 __asan::asanThreadRegistry().Unlock();
498 static ThreadRegistry
*GetAsanThreadRegistryLocked() {
499 __asan::asanThreadRegistry().CheckLocked();
500 return &__asan::asanThreadRegistry();
503 void EnsureMainThreadIDIsCorrect() { __asan::EnsureMainThreadIDIsCorrect(); }
505 bool GetThreadRangesLocked(tid_t os_id
, uptr
*stack_begin
, uptr
*stack_end
,
506 uptr
*tls_begin
, uptr
*tls_end
, uptr
*cache_begin
,
507 uptr
*cache_end
, DTLS
**dtls
) {
508 __asan::AsanThread
*t
= __asan::GetAsanThreadByOsIDLocked(os_id
);
511 *stack_begin
= t
->stack_bottom();
512 *stack_end
= t
->stack_top();
513 *tls_begin
= t
->tls_begin();
514 *tls_end
= t
->tls_end();
515 // ASan doesn't keep allocator caches in TLS, so these are unused.
522 void GetAllThreadAllocatorCachesLocked(InternalMmapVector
<uptr
> *caches
) {}
524 void GetThreadExtraStackRangesLocked(tid_t os_id
,
525 InternalMmapVector
<Range
> *ranges
) {
526 __asan::AsanThread
*t
= __asan::GetAsanThreadByOsIDLocked(os_id
);
529 __asan::FakeStack
*fake_stack
= t
->get_fake_stack();
533 fake_stack
->ForEachFakeFrame(
534 [](uptr begin
, uptr end
, void *arg
) {
535 reinterpret_cast<InternalMmapVector
<Range
> *>(arg
)->push_back(
541 void GetThreadExtraStackRangesLocked(InternalMmapVector
<Range
> *ranges
) {
542 GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
543 [](ThreadContextBase
*tctx
, void *arg
) {
544 GetThreadExtraStackRangesLocked(
545 tctx
->os_id
, reinterpret_cast<InternalMmapVector
<Range
> *>(arg
));
550 void GetAdditionalThreadContextPtrsLocked(InternalMmapVector
<uptr
> *ptrs
) {
551 __asan::asanThreadArgRetval().GetAllPtrsLocked(ptrs
);
554 void GetRunningThreadsLocked(InternalMmapVector
<tid_t
> *threads
) {
555 GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
556 [](ThreadContextBase
*tctx
, void *threads
) {
557 if (tctx
->status
== ThreadStatusRunning
)
558 reinterpret_cast<InternalMmapVector
<tid_t
> *>(threads
)->push_back(
564 } // namespace __lsan
566 // ---------------------- Interface ---------------- {{{1
567 using namespace __asan
;
570 SANITIZER_INTERFACE_ATTRIBUTE
571 void __sanitizer_start_switch_fiber(void **fakestacksave
, const void *bottom
,
573 AsanThread
*t
= GetCurrentThread();
575 VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
578 t
->StartSwitchFiber((FakeStack
**)fakestacksave
, (uptr
)bottom
, size
);
581 SANITIZER_INTERFACE_ATTRIBUTE
582 void __sanitizer_finish_switch_fiber(void *fakestack
, const void **bottom_old
,
584 AsanThread
*t
= GetCurrentThread();
586 VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
589 t
->FinishSwitchFiber((FakeStack
*)fakestack
, (uptr
*)bottom_old
,