2 #include "hwasan_thread.h"
5 #include "hwasan_interface_internal.h"
6 #include "hwasan_mapping.h"
7 #include "hwasan_poisoning.h"
8 #include "hwasan_thread_list.h"
9 #include "sanitizer_common/sanitizer_atomic.h"
10 #include "sanitizer_common/sanitizer_file.h"
11 #include "sanitizer_common/sanitizer_placement_new.h"
12 #include "sanitizer_common/sanitizer_tls_get_addr.h"
16 static u32
RandomSeed() {
19 if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&seed
), sizeof(seed
),
20 /*blocking=*/false))) {
21 seed
= static_cast<u32
>(
23 (reinterpret_cast<uptr
>(__builtin_frame_address(0)) >> 4));
29 void Thread::InitRandomState() {
30 random_state_
= flags()->random_tags
? RandomSeed() : unique_id_
;
31 random_state_inited_
= true;
33 // Push a random number of zeros onto the ring buffer so that the first stack
34 // tag base will be random.
35 for (tag_t i
= 0, e
= GenerateRandomTag(); i
!= e
; ++i
)
36 stack_allocations_
->push(0);
39 void Thread::Init(uptr stack_buffer_start
, uptr stack_buffer_size
,
40 const InitState
*state
) {
41 CHECK_EQ(0, unique_id_
); // try to catch bad stack reuse
42 CHECK_EQ(0, stack_top_
);
43 CHECK_EQ(0, stack_bottom_
);
45 static atomic_uint64_t unique_id
;
46 unique_id_
= atomic_fetch_add(&unique_id
, 1, memory_order_relaxed
);
50 if (auto sz
= flags()->heap_history_size
)
51 heap_allocations_
= HeapAllocationsRingBuffer::New(sz
);
53 #if !SANITIZER_FUCHSIA
54 // Do not initialize the stack ring buffer just yet on Fuchsia. Threads will
55 // be initialized before we enter the thread itself, so we will instead call
57 InitStackRingBuffer(stack_buffer_start
, stack_buffer_size
);
59 InitStackAndTls(state
);
61 AllocatorThreadStart(allocator_cache());
63 if (flags()->verbose_threads
) {
65 Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
66 sizeof(Thread
), heap_allocations_
->SizeInBytes(),
67 stack_allocations_
->size() * sizeof(uptr
));
73 void Thread::InitStackRingBuffer(uptr stack_buffer_start
,
74 uptr stack_buffer_size
) {
75 HwasanTSDThreadInit(); // Only needed with interceptors.
76 uptr
*ThreadLong
= GetCurrentThreadLongPtr();
77 // The following implicitly sets (this) as the current thread.
78 stack_allocations_
= new (ThreadLong
)
79 StackAllocationsRingBuffer((void *)stack_buffer_start
, stack_buffer_size
);
80 // Check that it worked.
81 CHECK_EQ(GetCurrentThread(), this);
83 // ScopedTaggingDisable needs GetCurrentThread to be set up.
84 ScopedTaggingDisabler disabler
;
88 CHECK(AddrIsInStack((uptr
)&local
));
89 CHECK(MemIsApp(stack_bottom_
));
90 CHECK(MemIsApp(stack_top_
- 1));
94 void Thread::ClearShadowForThreadStackAndTLS() {
95 if (stack_top_
!= stack_bottom_
)
96 TagMemory(UntagAddr(stack_bottom_
),
97 UntagAddr(stack_top_
) - UntagAddr(stack_bottom_
),
98 GetTagFromPointer(stack_top_
));
99 if (tls_begin_
!= tls_end_
)
100 TagMemory(UntagAddr(tls_begin_
),
101 UntagAddr(tls_end_
) - UntagAddr(tls_begin_
),
102 GetTagFromPointer(tls_begin_
));
105 void Thread::Destroy() {
106 if (flags()->verbose_threads
)
107 Print("Destroying: ");
108 AllocatorThreadFinish(allocator_cache());
109 ClearShadowForThreadStackAndTLS();
110 if (heap_allocations_
)
111 heap_allocations_
->Delete();
113 // Unregister this as the current thread.
114 // Instrumented code can not run on this thread from this point onwards, but
115 // malloc/free can still be served. Glibc may call free() very late, after all
116 // TSD destructors are done.
117 CHECK_EQ(GetCurrentThread(), this);
118 *GetCurrentThreadLongPtr() = 0;
121 void Thread::Print(const char *Prefix
) {
122 Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix
, unique_id_
,
123 (void *)this, stack_bottom(), stack_top(),
124 stack_top() - stack_bottom(), tls_begin(), tls_end());
127 static u32
xorshift(u32 state
) {
128 state
^= state
<< 13;
129 state
^= state
>> 17;
134 // Generate a (pseudo-)random non-zero tag.
135 tag_t
Thread::GenerateRandomTag(uptr num_bits
) {
136 DCHECK_GT(num_bits
, 0);
137 if (tagging_disabled_
)
140 const uptr tag_mask
= (1ULL << num_bits
) - 1;
142 if (flags()->random_tags
) {
143 if (!random_buffer_
) {
144 EnsureRandomStateInited();
145 random_buffer_
= random_state_
= xorshift(random_state_
);
147 CHECK(random_buffer_
);
148 tag
= random_buffer_
& tag_mask
;
149 random_buffer_
>>= num_bits
;
151 EnsureRandomStateInited();
153 tag
= random_state_
& tag_mask
;
159 void EnsureMainThreadIDIsCorrect() {
160 auto *t
= __hwasan::GetCurrentThread();
161 if (t
&& (t
->IsMainThread()))
162 t
->set_os_id(GetTid());
165 } // namespace __hwasan
167 // --- Implementation of LSan-specific functions --- {{{1
170 static __hwasan::HwasanThreadList
*GetHwasanThreadListLocked() {
171 auto &tl
= __hwasan::hwasanThreadList();
176 static __hwasan::Thread
*GetThreadByOsIDLocked(tid_t os_id
) {
177 return GetHwasanThreadListLocked()->FindThreadLocked(
178 [os_id
](__hwasan::Thread
*t
) { return t
->os_id() == os_id
; });
182 __hwasan::hwasanThreadList().Lock();
183 __hwasan::hwasanThreadArgRetval().Lock();
186 void UnlockThreads() {
187 __hwasan::hwasanThreadArgRetval().Unlock();
188 __hwasan::hwasanThreadList().Unlock();
191 void EnsureMainThreadIDIsCorrect() { __hwasan::EnsureMainThreadIDIsCorrect(); }
193 bool GetThreadRangesLocked(tid_t os_id
, uptr
*stack_begin
, uptr
*stack_end
,
194 uptr
*tls_begin
, uptr
*tls_end
, uptr
*cache_begin
,
195 uptr
*cache_end
, DTLS
**dtls
) {
196 auto *t
= GetThreadByOsIDLocked(os_id
);
199 *stack_begin
= t
->stack_bottom();
200 *stack_end
= t
->stack_top();
201 *tls_begin
= t
->tls_begin();
202 *tls_end
= t
->tls_end();
203 // Fixme: is this correct for HWASan.
210 void GetAllThreadAllocatorCachesLocked(InternalMmapVector
<uptr
> *caches
) {}
212 void GetThreadExtraStackRangesLocked(tid_t os_id
,
213 InternalMmapVector
<Range
> *ranges
) {}
214 void GetThreadExtraStackRangesLocked(InternalMmapVector
<Range
> *ranges
) {}
216 void GetAdditionalThreadContextPtrsLocked(InternalMmapVector
<uptr
> *ptrs
) {
217 __hwasan::hwasanThreadArgRetval().GetAllPtrsLocked(ptrs
);
220 void GetRunningThreadsLocked(InternalMmapVector
<tid_t
> *threads
) {}
222 } // namespace __lsan