1 //===-- sanitizer_stackdepot.cpp ------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_stackdepot.h"
15 #include "sanitizer_atomic.h"
16 #include "sanitizer_common.h"
17 #include "sanitizer_hash.h"
18 #include "sanitizer_mutex.h"
19 #include "sanitizer_stack_store.h"
20 #include "sanitizer_stackdepotbase.h"
22 namespace __sanitizer
{
24 struct StackDepotNode
{
25 using hash_type
= u64
;
28 StackStore::Id store_id
;
30 static const u32 kTabSizeLog
= SANITIZER_ANDROID
? 16 : 20;
32 typedef StackTrace args_type
;
33 bool eq(hash_type hash
, const args_type
&args
) const {
34 return hash
== stack_hash
;
36 static uptr
allocated();
37 static hash_type
hash(const args_type
&args
) {
38 MurMur2Hash64Builder
H(args
.size
* sizeof(uptr
));
39 for (uptr i
= 0; i
< args
.size
; i
++) H
.add(args
.trace
[i
]);
43 static bool is_valid(const args_type
&args
) {
44 return args
.size
> 0 && args
.trace
;
46 void store(u32 id
, const args_type
&args
, hash_type hash
);
47 args_type
load(u32 id
) const;
48 static StackDepotHandle
get_handle(u32 id
);
50 typedef StackDepotHandle handle_type
;
53 static StackStore stackStore
;
55 // FIXME(dvyukov): this single reserved bit is used in TSan.
56 typedef StackDepotBase
<StackDepotNode
, 1, StackDepotNode::kTabSizeLog
>
58 static StackDepot theDepot
;
59 // Keep mutable data out of frequently access nodes to improve caching
61 static TwoLevelMap
<atomic_uint32_t
, StackDepot::kNodesSize1
,
62 StackDepot::kNodesSize2
>
65 int StackDepotHandle::use_count() const {
66 return atomic_load_relaxed(&useCounts
[id_
]);
69 void StackDepotHandle::inc_use_count_unsafe() {
70 atomic_fetch_add(&useCounts
[id_
], 1, memory_order_relaxed
);
73 uptr
StackDepotNode::allocated() {
74 return stackStore
.Allocated() + useCounts
.MemoryUsage();
77 static void CompressStackStore() {
78 u64 start
= Verbosity() >= 1 ? MonotonicNanoTime() : 0;
79 uptr diff
= stackStore
.Pack(static_cast<StackStore::Compression
>(
80 Abs(common_flags()->compress_stack_depot
)));
83 if (Verbosity() >= 1) {
84 u64 finish
= MonotonicNanoTime();
85 uptr total_before
= theDepot
.GetStats().allocated
+ diff
;
86 VPrintf(1, "%s: StackDepot released %zu KiB out of %zu KiB in %llu ms\n",
87 SanitizerToolName
, diff
>> 10, total_before
>> 10,
88 (finish
- start
) / 1000000);
94 class CompressThread
{
96 constexpr CompressThread() = default;
99 void LockAndStop() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
;
100 void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS
;
114 return atomic_load(&run_
, memory_order_acquire
);
117 Semaphore semaphore_
= {};
118 StaticSpinMutex mutex_
= {};
119 State state_
SANITIZER_GUARDED_BY(mutex_
) = State::NotStarted
;
120 void *thread_
SANITIZER_GUARDED_BY(mutex_
) = nullptr;
121 atomic_uint8_t run_
= {};
124 static CompressThread compress_thread
;
126 void CompressThread::NewWorkNotify() {
127 int compress
= common_flags()->compress_stack_depot
;
130 if (compress
> 0 /* for testing or debugging */) {
131 SpinMutexLock
l(&mutex_
);
132 if (state_
== State::NotStarted
) {
133 atomic_store(&run_
, 1, memory_order_release
);
134 CHECK_EQ(nullptr, thread_
);
135 thread_
= internal_start_thread(
136 [](void *arg
) -> void * {
137 reinterpret_cast<CompressThread
*>(arg
)->Run();
141 state_
= thread_
? State::Started
: State::Failed
;
143 if (state_
== State::Started
) {
148 CompressStackStore();
151 void CompressThread::Run() {
152 VPrintf(1, "%s: StackDepot compression thread started\n", SanitizerToolName
);
153 while (WaitForWork()) CompressStackStore();
154 VPrintf(1, "%s: StackDepot compression thread stopped\n", SanitizerToolName
);
157 void CompressThread::Stop() {
160 SpinMutexLock
l(&mutex_
);
161 if (state_
!= State::Started
)
163 state_
= State::Stopped
;
164 CHECK_NE(nullptr, thread_
);
168 atomic_store(&run_
, 0, memory_order_release
);
170 internal_join_thread(t
);
173 void CompressThread::LockAndStop() {
175 if (state_
!= State::Started
)
177 CHECK_NE(nullptr, thread_
);
179 atomic_store(&run_
, 0, memory_order_release
);
181 internal_join_thread(thread_
);
182 // Allow to restart after Unlock() if needed.
183 state_
= State::NotStarted
;
187 void CompressThread::Unlock() { mutex_
.Unlock(); }
191 void StackDepotNode::store(u32 id
, const args_type
&args
, hash_type hash
) {
194 store_id
= stackStore
.Store(args
, &pack
);
197 compress_thread
.NewWorkNotify();
200 StackDepotNode::args_type
StackDepotNode::load(u32 id
) const {
203 return stackStore
.Load(store_id
);
206 StackDepotStats
StackDepotGetStats() { return theDepot
.GetStats(); }
208 u32
StackDepotPut(StackTrace stack
) { return theDepot
.Put(stack
); }
210 StackDepotHandle
StackDepotPut_WithHandle(StackTrace stack
) {
211 return StackDepotNode::get_handle(theDepot
.Put(stack
));
214 StackTrace
StackDepotGet(u32 id
) {
215 return theDepot
.Get(id
);
218 void StackDepotLockBeforeFork() {
219 theDepot
.LockBeforeFork();
220 compress_thread
.LockAndStop();
221 stackStore
.LockAll();
224 void StackDepotUnlockAfterFork(bool fork_child
) {
225 stackStore
.UnlockAll();
226 compress_thread
.Unlock();
227 theDepot
.UnlockAfterFork(fork_child
);
230 void StackDepotPrintAll() {
236 void StackDepotStopBackgroundThread() { compress_thread
.Stop(); }
238 StackDepotHandle
StackDepotNode::get_handle(u32 id
) {
239 return StackDepotHandle(&theDepot
.nodes
[id
], id
);
242 void StackDepotTestOnlyUnmap() {
243 theDepot
.TestOnlyUnmap();
244 stackStore
.TestOnlyUnmap();
247 } // namespace __sanitizer