1 //===-- hwasan_thread_list.h ------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of HWAddressSanitizer.
11 //===----------------------------------------------------------------------===//
13 // HwasanThreadList is a registry for live threads, as well as an allocator for
14 // HwasanThread objects and their stack history ring buffers. There are
15 // constraints on memory layout of the shadow region and CompactRingBuffer that
16 // are part of the ABI contract between compiler-rt and llvm.
18 // * Start of the shadow memory region is aligned to 2**kShadowBaseAlignment.
19 // * All stack ring buffers are located within (2**kShadowBaseAlignment)
20 // sized region below and adjacent to the shadow region.
21 // * Each ring buffer has a size of (2**N)*4096 where N is in [0, 8), and is
22 // aligned to twice its size. The value of N can be different for each buffer.
24 // These constrains guarantee that, given an address A of any element of the
26 // A_next = (A + sizeof(uptr)) & ~((1 << (N + 13)) - 1)
27 // is the address of the next element of that ring buffer (with wrap-around).
28 // And, with K = kShadowBaseAlignment,
29 // S = (A | ((1 << K) - 1)) + 1
30 // (align up to kShadowBaseAlignment) is the start of the shadow region.
32 // These calculations are used in compiler instrumentation to update the ring
33 // buffer and obtain the base address of shadow using only two inputs: address
34 // of the current element of the ring buffer, and N (i.e. size of the ring
35 // buffer). Since the value of N is very limited, we pack both inputs into a
36 // single thread-local word as
37 // (1 << (N + 56)) | A
38 // See the implementation of class CompactRingBuffer, which is what is stored in
39 // said thread-local word.
41 // Note the unusual way of aligning up the address of the shadow:
42 // (A | ((1 << K) - 1)) + 1
43 // It is only correct if A is not already equal to the shadow base address, but
44 // it saves 2 instructions on AArch64.
47 #include "hwasan_allocator.h"
48 #include "hwasan_flags.h"
49 #include "hwasan_thread.h"
50 #include "sanitizer_common/sanitizer_placement_new.h"
51 #include "sanitizer_common/sanitizer_thread_arg_retval.h"
55 static uptr
RingBufferSize() {
56 uptr desired_bytes
= flags()->stack_history_size
* sizeof(uptr
);
57 // FIXME: increase the limit to 8 once this bug is fixed:
58 // https://bugs.llvm.org/show_bug.cgi?id=39030
59 for (int shift
= 1; shift
< 7; ++shift
) {
60 uptr size
= 4096 * (1ULL << shift
);
61 if (size
>= desired_bytes
)
64 Printf("stack history size too large: %d\n", flags()->stack_history_size
);
71 uptr total_stack_size
;
74 class SANITIZER_MUTEX HwasanThreadList
{
76 HwasanThreadList(uptr storage
, uptr size
)
77 : free_space_(storage
), free_space_end_(storage
+ size
) {
78 // [storage, storage + size) is used as a vector of
79 // thread_alloc_size_-sized, ring_buffer_size_*2-aligned elements.
80 // Each element contains
81 // * a ring buffer at offset 0,
82 // * a Thread object at offset ring_buffer_size_.
83 ring_buffer_size_
= RingBufferSize();
85 RoundUpTo(ring_buffer_size_
+ sizeof(Thread
), ring_buffer_size_
* 2);
88 Thread
*CreateCurrentThread(const Thread::InitState
*state
= nullptr)
89 SANITIZER_EXCLUDES(free_list_mutex_
, live_list_mutex_
) {
92 SpinMutexLock
l(&free_list_mutex_
);
93 if (!free_list_
.empty()) {
94 t
= free_list_
.back();
95 free_list_
.pop_back();
99 uptr start
= (uptr
)t
- ring_buffer_size_
;
100 internal_memset((void *)start
, 0, ring_buffer_size_
+ sizeof(Thread
));
105 SpinMutexLock
l(&live_list_mutex_
);
106 live_list_
.push_back(t
);
108 t
->Init((uptr
)t
- ring_buffer_size_
, ring_buffer_size_
, state
);
113 void DontNeedThread(Thread
*t
) {
114 uptr start
= (uptr
)t
- ring_buffer_size_
;
115 ReleaseMemoryPagesToOS(start
, start
+ thread_alloc_size_
);
118 void RemoveThreadFromLiveList(Thread
*t
)
119 SANITIZER_EXCLUDES(live_list_mutex_
) {
120 SpinMutexLock
l(&live_list_mutex_
);
121 for (Thread
*&t2
: live_list_
)
123 // To remove t2, copy the last element of the list in t2's position, and
124 // pop_back(). This works even if t2 is itself the last element.
125 t2
= live_list_
.back();
126 live_list_
.pop_back();
129 CHECK(0 && "thread not found in live list");
132 void ReleaseThread(Thread
*t
) SANITIZER_EXCLUDES(free_list_mutex_
) {
133 RemoveThreadStats(t
);
134 RemoveThreadFromLiveList(t
);
137 SpinMutexLock
l(&free_list_mutex_
);
138 free_list_
.push_back(t
);
141 Thread
*GetThreadByBufferAddress(uptr p
) {
142 return (Thread
*)(RoundDownTo(p
, ring_buffer_size_
* 2) +
146 uptr
MemoryUsedPerThread() {
147 uptr res
= sizeof(Thread
) + ring_buffer_size_
;
148 if (auto sz
= flags()->heap_history_size
)
149 res
+= HeapAllocationsRingBuffer::SizeInBytes(sz
);
154 void VisitAllLiveThreads(CB cb
) SANITIZER_EXCLUDES(live_list_mutex_
) {
155 SpinMutexLock
l(&live_list_mutex_
);
156 for (Thread
*t
: live_list_
) cb(t
);
160 Thread
*FindThreadLocked(CB cb
) SANITIZER_CHECK_LOCKED(live_list_mutex_
) {
162 for (Thread
*t
: live_list_
)
168 void AddThreadStats(Thread
*t
) SANITIZER_EXCLUDES(stats_mutex_
) {
169 SpinMutexLock
l(&stats_mutex_
);
170 stats_
.n_live_threads
++;
171 stats_
.total_stack_size
+= t
->stack_size();
174 void RemoveThreadStats(Thread
*t
) SANITIZER_EXCLUDES(stats_mutex_
) {
175 SpinMutexLock
l(&stats_mutex_
);
176 stats_
.n_live_threads
--;
177 stats_
.total_stack_size
-= t
->stack_size();
180 ThreadStats
GetThreadStats() SANITIZER_EXCLUDES(stats_mutex_
) {
181 SpinMutexLock
l(&stats_mutex_
);
185 uptr
GetRingBufferSize() const { return ring_buffer_size_
; }
187 void Lock() SANITIZER_ACQUIRE(live_list_mutex_
) { live_list_mutex_
.Lock(); }
188 void CheckLocked() const SANITIZER_CHECK_LOCKED(live_list_mutex_
) {
189 live_list_mutex_
.CheckLocked();
191 void Unlock() SANITIZER_RELEASE(live_list_mutex_
) {
192 live_list_mutex_
.Unlock();
196 Thread
*AllocThread() {
197 SpinMutexLock
l(&free_space_mutex_
);
198 uptr align
= ring_buffer_size_
* 2;
199 CHECK(IsAligned(free_space_
, align
));
200 Thread
*t
= (Thread
*)(free_space_
+ ring_buffer_size_
);
201 free_space_
+= thread_alloc_size_
;
202 CHECK_LE(free_space_
, free_space_end_
);
206 SpinMutex free_space_mutex_
;
208 uptr free_space_end_
;
209 uptr ring_buffer_size_
;
210 uptr thread_alloc_size_
;
212 SpinMutex free_list_mutex_
;
213 InternalMmapVector
<Thread
*> free_list_
214 SANITIZER_GUARDED_BY(free_list_mutex_
);
215 SpinMutex live_list_mutex_
;
216 InternalMmapVector
<Thread
*> live_list_
217 SANITIZER_GUARDED_BY(live_list_mutex_
);
219 SpinMutex stats_mutex_
;
220 ThreadStats stats_
SANITIZER_GUARDED_BY(stats_mutex_
);
223 void InitThreadList(uptr storage
, uptr size
);
224 HwasanThreadList
&hwasanThreadList();
225 ThreadArgRetval
&hwasanThreadArgRetval();
227 } // namespace __hwasan