1 //===-- hwasan_thread_list.h ------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of HWAddressSanitizer.
11 //===----------------------------------------------------------------------===//
13 // HwasanThreadList is a registry for live threads, as well as an allocator for
14 // HwasanThread objects and their stack history ring buffers. There are
15 // constraints on memory layout of the shadow region and CompactRingBuffer that
16 // are part of the ABI contract between compiler-rt and llvm.
18 // * Start of the shadow memory region is aligned to 2**kShadowBaseAlignment.
19 // * All stack ring buffers are located within (2**kShadowBaseAlignment)
20 // sized region below and adjacent to the shadow region.
21 // * Each ring buffer has a size of (2**N)*4096 where N is in [0, 7), and is
22 // aligned to twice its size. The value of N can be different for each buffer.
24 // These constrains guarantee that, given an address A of any element of the
26 // A_next = (A + sizeof(uptr)) & ~((1 << (N + 13)) - 1)
27 // is the address of the next element of that ring buffer (with wrap-around).
28 // And, with K = kShadowBaseAlignment,
29 // S = (A | ((1 << K) - 1)) + 1
30 // (align up to kShadowBaseAlignment) is the start of the shadow region.
32 // These calculations are used in compiler instrumentation to update the ring
33 // buffer and obtain the base address of shadow using only two inputs: address
34 // of the current element of the ring buffer, and N (i.e. size of the ring
35 // buffer). Since the value of N is very limited, we pack both inputs into a
36 // single thread-local word as
37 // (1 << (N + 56)) | A
38 // See the implementation of class CompactRingBuffer, which is what is stored in
39 // said thread-local word.
41 // Note the unusual way of aligning up the address of the shadow:
42 // (A | ((1 << K) - 1)) + 1
43 // It is only correct if A is not already equal to the shadow base address, but
44 // it saves 2 instructions on AArch64.
47 #include "hwasan_allocator.h"
48 #include "hwasan_flags.h"
49 #include "hwasan_thread.h"
50 #include "sanitizer_common/sanitizer_thread_arg_retval.h"
54 static uptr
RingBufferSize() {
55 uptr desired_bytes
= flags()->stack_history_size
* sizeof(uptr
);
56 // FIXME: increase the limit to 8 once this bug is fixed:
57 // https://bugs.llvm.org/show_bug.cgi?id=39030
58 // Note that we *cannot* do that on Android, as the runtime will indefinitely
59 // have to support code that is compiled with ashr, which only works with
61 for (int shift
= 0; shift
< 7; ++shift
) {
62 uptr size
= 4096 * (1ULL << shift
);
63 if (size
>= desired_bytes
)
66 Printf("stack history size too large: %d\n", flags()->stack_history_size
);
73 uptr total_stack_size
;
76 class SANITIZER_MUTEX HwasanThreadList
{
78 HwasanThreadList(uptr storage
, uptr size
)
79 : free_space_(storage
), free_space_end_(storage
+ size
) {
80 // [storage, storage + size) is used as a vector of
81 // thread_alloc_size_-sized, ring_buffer_size_*2-aligned elements.
82 // Each element contains
83 // * a ring buffer at offset 0,
84 // * a Thread object at offset ring_buffer_size_.
85 ring_buffer_size_
= RingBufferSize();
87 RoundUpTo(ring_buffer_size_
+ sizeof(Thread
), ring_buffer_size_
* 2);
90 Thread
*CreateCurrentThread(const Thread::InitState
*state
= nullptr)
91 SANITIZER_EXCLUDES(free_list_mutex_
, live_list_mutex_
) {
94 SpinMutexLock
l(&free_list_mutex_
);
95 if (!free_list_
.empty()) {
96 t
= free_list_
.back();
97 free_list_
.pop_back();
101 uptr start
= (uptr
)t
- ring_buffer_size_
;
102 internal_memset((void *)start
, 0, ring_buffer_size_
+ sizeof(Thread
));
107 SpinMutexLock
l(&live_list_mutex_
);
108 live_list_
.push_back(t
);
110 t
->Init((uptr
)t
- ring_buffer_size_
, ring_buffer_size_
, state
);
115 void DontNeedThread(Thread
*t
) {
116 uptr start
= (uptr
)t
- ring_buffer_size_
;
117 ReleaseMemoryPagesToOS(start
, start
+ thread_alloc_size_
);
120 void RemoveThreadFromLiveList(Thread
*t
)
121 SANITIZER_EXCLUDES(live_list_mutex_
) {
122 SpinMutexLock
l(&live_list_mutex_
);
123 for (Thread
*&t2
: live_list_
)
125 // To remove t2, copy the last element of the list in t2's position, and
126 // pop_back(). This works even if t2 is itself the last element.
127 t2
= live_list_
.back();
128 live_list_
.pop_back();
131 CHECK(0 && "thread not found in live list");
134 void ReleaseThread(Thread
*t
) SANITIZER_EXCLUDES(free_list_mutex_
) {
135 RemoveThreadStats(t
);
136 RemoveThreadFromLiveList(t
);
139 SpinMutexLock
l(&free_list_mutex_
);
140 free_list_
.push_back(t
);
143 Thread
*GetThreadByBufferAddress(uptr p
) {
144 return (Thread
*)(RoundDownTo(p
, ring_buffer_size_
* 2) +
148 uptr
MemoryUsedPerThread() {
149 uptr res
= sizeof(Thread
) + ring_buffer_size_
;
150 if (auto sz
= flags()->heap_history_size
)
151 res
+= HeapAllocationsRingBuffer::SizeInBytes(sz
);
156 void VisitAllLiveThreads(CB cb
) SANITIZER_EXCLUDES(live_list_mutex_
) {
157 SpinMutexLock
l(&live_list_mutex_
);
158 for (Thread
*t
: live_list_
) cb(t
);
162 Thread
*FindThreadLocked(CB cb
) SANITIZER_CHECK_LOCKED(live_list_mutex_
) {
164 for (Thread
*t
: live_list_
)
170 void AddThreadStats(Thread
*t
) SANITIZER_EXCLUDES(stats_mutex_
) {
171 SpinMutexLock
l(&stats_mutex_
);
172 stats_
.n_live_threads
++;
173 stats_
.total_stack_size
+= t
->stack_size();
176 void RemoveThreadStats(Thread
*t
) SANITIZER_EXCLUDES(stats_mutex_
) {
177 SpinMutexLock
l(&stats_mutex_
);
178 stats_
.n_live_threads
--;
179 stats_
.total_stack_size
-= t
->stack_size();
182 ThreadStats
GetThreadStats() SANITIZER_EXCLUDES(stats_mutex_
) {
183 SpinMutexLock
l(&stats_mutex_
);
187 uptr
GetRingBufferSize() const { return ring_buffer_size_
; }
189 void Lock() SANITIZER_ACQUIRE(live_list_mutex_
) { live_list_mutex_
.Lock(); }
190 void CheckLocked() const SANITIZER_CHECK_LOCKED(live_list_mutex_
) {
191 live_list_mutex_
.CheckLocked();
193 void Unlock() SANITIZER_RELEASE(live_list_mutex_
) {
194 live_list_mutex_
.Unlock();
198 Thread
*AllocThread() {
199 SpinMutexLock
l(&free_space_mutex_
);
200 uptr align
= ring_buffer_size_
* 2;
201 CHECK(IsAligned(free_space_
, align
));
202 Thread
*t
= (Thread
*)(free_space_
+ ring_buffer_size_
);
203 free_space_
+= thread_alloc_size_
;
204 CHECK_LE(free_space_
, free_space_end_
);
208 SpinMutex free_space_mutex_
;
210 uptr free_space_end_
;
211 uptr ring_buffer_size_
;
212 uptr thread_alloc_size_
;
214 SpinMutex free_list_mutex_
;
215 InternalMmapVector
<Thread
*> free_list_
216 SANITIZER_GUARDED_BY(free_list_mutex_
);
217 SpinMutex live_list_mutex_
;
218 InternalMmapVector
<Thread
*> live_list_
219 SANITIZER_GUARDED_BY(live_list_mutex_
);
221 SpinMutex stats_mutex_
;
222 ThreadStats stats_
SANITIZER_GUARDED_BY(stats_mutex_
);
225 void InitThreadList(uptr storage
, uptr size
);
226 HwasanThreadList
&hwasanThreadList();
227 ThreadArgRetval
&hwasanThreadArgRetval();
229 } // namespace __hwasan