2 #include "hwasan_thread.h"
5 #include "hwasan_interface_internal.h"
6 #include "hwasan_mapping.h"
7 #include "hwasan_poisoning.h"
8 #include "hwasan_thread_list.h"
9 #include "sanitizer_common/sanitizer_atomic.h"
10 #include "sanitizer_common/sanitizer_file.h"
11 #include "sanitizer_common/sanitizer_placement_new.h"
12 #include "sanitizer_common/sanitizer_tls_get_addr.h"
16 static u32
RandomSeed() {
19 if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&seed
), sizeof(seed
),
20 /*blocking=*/false))) {
21 seed
= static_cast<u32
>(
23 (reinterpret_cast<uptr
>(__builtin_frame_address(0)) >> 4));
29 void Thread::InitRandomState() {
30 random_state_
= flags()->random_tags
? RandomSeed() : unique_id_
;
31 random_state_inited_
= true;
33 // Push a random number of zeros onto the ring buffer so that the first stack
34 // tag base will be random.
35 for (tag_t i
= 0, e
= GenerateRandomTag(); i
!= e
; ++i
)
36 stack_allocations_
->push(0);
39 void Thread::Init(uptr stack_buffer_start
, uptr stack_buffer_size
,
40 const InitState
*state
) {
41 CHECK_EQ(0, unique_id_
); // try to catch bad stack reuse
42 CHECK_EQ(0, stack_top_
);
43 CHECK_EQ(0, stack_bottom_
);
45 static atomic_uint64_t unique_id
;
46 unique_id_
= atomic_fetch_add(&unique_id
, 1, memory_order_relaxed
);
50 if (auto sz
= flags()->heap_history_size
)
51 heap_allocations_
= HeapAllocationsRingBuffer::New(sz
);
53 #if !SANITIZER_FUCHSIA
54 // Do not initialize the stack ring buffer just yet on Fuchsia. Threads will
55 // be initialized before we enter the thread itself, so we will instead call
57 InitStackRingBuffer(stack_buffer_start
, stack_buffer_size
);
59 InitStackAndTls(state
);
63 void Thread::InitStackRingBuffer(uptr stack_buffer_start
,
64 uptr stack_buffer_size
) {
65 HwasanTSDThreadInit(); // Only needed with interceptors.
66 uptr
*ThreadLong
= GetCurrentThreadLongPtr();
67 // The following implicitly sets (this) as the current thread.
68 stack_allocations_
= new (ThreadLong
)
69 StackAllocationsRingBuffer((void *)stack_buffer_start
, stack_buffer_size
);
70 // Check that it worked.
71 CHECK_EQ(GetCurrentThread(), this);
73 // ScopedTaggingDisable needs GetCurrentThread to be set up.
74 ScopedTaggingDisabler disabler
;
78 CHECK(AddrIsInStack((uptr
)&local
));
79 CHECK(MemIsApp(stack_bottom_
));
80 CHECK(MemIsApp(stack_top_
- 1));
83 if (flags()->verbose_threads
) {
85 Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
86 sizeof(Thread
), heap_allocations_
->SizeInBytes(),
87 stack_allocations_
->size() * sizeof(uptr
));
93 void Thread::ClearShadowForThreadStackAndTLS() {
94 if (stack_top_
!= stack_bottom_
)
95 TagMemory(stack_bottom_
, stack_top_
- stack_bottom_
, 0);
96 if (tls_begin_
!= tls_end_
)
97 TagMemory(tls_begin_
, tls_end_
- tls_begin_
, 0);
100 void Thread::Destroy() {
101 if (flags()->verbose_threads
)
102 Print("Destroying: ");
103 AllocatorSwallowThreadLocalCache(allocator_cache());
104 ClearShadowForThreadStackAndTLS();
105 if (heap_allocations_
)
106 heap_allocations_
->Delete();
108 // Unregister this as the current thread.
109 // Instrumented code can not run on this thread from this point onwards, but
110 // malloc/free can still be served. Glibc may call free() very late, after all
111 // TSD destructors are done.
112 CHECK_EQ(GetCurrentThread(), this);
113 *GetCurrentThreadLongPtr() = 0;
116 void Thread::Print(const char *Prefix
) {
117 Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix
, unique_id_
,
118 (void *)this, stack_bottom(), stack_top(),
119 stack_top() - stack_bottom(), tls_begin(), tls_end());
122 static u32
xorshift(u32 state
) {
123 state
^= state
<< 13;
124 state
^= state
>> 17;
129 // Generate a (pseudo-)random non-zero tag.
130 tag_t
Thread::GenerateRandomTag(uptr num_bits
) {
131 DCHECK_GT(num_bits
, 0);
132 if (tagging_disabled_
)
135 const uptr tag_mask
= (1ULL << num_bits
) - 1;
137 if (flags()->random_tags
) {
138 if (!random_buffer_
) {
139 EnsureRandomStateInited();
140 random_buffer_
= random_state_
= xorshift(random_state_
);
142 CHECK(random_buffer_
);
143 tag
= random_buffer_
& tag_mask
;
144 random_buffer_
>>= num_bits
;
146 EnsureRandomStateInited();
148 tag
= random_state_
& tag_mask
;
154 void EnsureMainThreadIDIsCorrect() {
155 auto *t
= __hwasan::GetCurrentThread();
156 if (t
&& (t
->IsMainThread()))
157 t
->set_os_id(GetTid());
160 } // namespace __hwasan
162 // --- Implementation of LSan-specific functions --- {{{1
165 static __hwasan::HwasanThreadList
*GetHwasanThreadListLocked() {
166 auto &tl
= __hwasan::hwasanThreadList();
171 static __hwasan::Thread
*GetThreadByOsIDLocked(tid_t os_id
) {
172 return GetHwasanThreadListLocked()->FindThreadLocked(
173 [os_id
](__hwasan::Thread
*t
) { return t
->os_id() == os_id
; });
176 void LockThreadRegistry() { __hwasan::hwasanThreadList().Lock(); }
178 void UnlockThreadRegistry() { __hwasan::hwasanThreadList().Unlock(); }
180 void EnsureMainThreadIDIsCorrect() { __hwasan::EnsureMainThreadIDIsCorrect(); }
182 bool GetThreadRangesLocked(tid_t os_id
, uptr
*stack_begin
, uptr
*stack_end
,
183 uptr
*tls_begin
, uptr
*tls_end
, uptr
*cache_begin
,
184 uptr
*cache_end
, DTLS
**dtls
) {
185 auto *t
= GetThreadByOsIDLocked(os_id
);
188 *stack_begin
= t
->stack_bottom();
189 *stack_end
= t
->stack_top();
190 *tls_begin
= t
->tls_begin();
191 *tls_end
= t
->tls_end();
192 // Fixme: is this correct for HWASan.
199 void GetAllThreadAllocatorCachesLocked(InternalMmapVector
<uptr
> *caches
) {}
201 void GetThreadExtraStackRangesLocked(tid_t os_id
,
202 InternalMmapVector
<Range
> *ranges
) {}
203 void GetThreadExtraStackRangesLocked(InternalMmapVector
<Range
> *ranges
) {}
205 void GetAdditionalThreadContextPtrsLocked(InternalMmapVector
<uptr
> *ptrs
) {}
206 void GetRunningThreadsLocked(InternalMmapVector
<tid_t
> *threads
) {}
208 } // namespace __lsan