1 //===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_TSD_EXCLUSIVE_H_
10 #define SCUDO_TSD_EXCLUSIVE_H_
14 #include "string_utils.h"
19 bool DisableMemInit
: 1;
27 template <class Allocator
> void teardownThread(void *Ptr
);
29 template <class Allocator
> struct TSDRegistryExT
{
30 using ThisT
= TSDRegistryExT
<Allocator
>;
33 ALWAYS_INLINE
ScopedTSD(ThisT
&TSDRegistry
) {
34 CurrentTSD
= TSDRegistry
.getTSDAndLock(&UnlockRequired
);
35 DCHECK_NE(CurrentTSD
, nullptr);
39 if (UNLIKELY(UnlockRequired
))
43 TSD
<Allocator
> &operator*() { return *CurrentTSD
; }
45 TSD
<Allocator
> *operator->() {
46 CurrentTSD
->assertLocked(/*BypassCheck=*/!UnlockRequired
);
51 TSD
<Allocator
> *CurrentTSD
;
55 void init(Allocator
*Instance
) REQUIRES(Mutex
) {
58 CHECK_EQ(pthread_key_create(&PThreadKey
, teardownThread
<Allocator
>), 0);
59 FallbackTSD
.init(Instance
);
63 void initOnceMaybe(Allocator
*Instance
) EXCLUDES(Mutex
) {
65 if (LIKELY(Initialized
))
67 init(Instance
); // Sets Initialized.
70 void unmapTestOnly(Allocator
*Instance
) EXCLUDES(Mutex
) {
72 if (reinterpret_cast<Allocator
*>(pthread_getspecific(PThreadKey
))) {
73 DCHECK_EQ(reinterpret_cast<Allocator
*>(pthread_getspecific(PThreadKey
)),
75 ThreadTSD
.commitBack(Instance
);
78 CHECK_EQ(pthread_key_delete(PThreadKey
), 0);
80 FallbackTSD
.commitBack(Instance
);
87 void drainCaches(Allocator
*Instance
) {
88 // We don't have a way to iterate all thread local `ThreadTSD`s. Simply
89 // drain the `ThreadTSD` of current thread and `FallbackTSD`.
90 Instance
->drainCache(&ThreadTSD
);
92 Instance
->drainCache(&FallbackTSD
);
96 ALWAYS_INLINE
void initThreadMaybe(Allocator
*Instance
, bool MinimalInit
) {
97 if (LIKELY(State
.InitState
!= ThreadState::NotInitialized
))
99 initThread(Instance
, MinimalInit
);
102 // To disable the exclusive TSD registry, we effectively lock the fallback TSD
103 // and force all threads to attempt to use it instead of their local one.
104 void disable() NO_THREAD_SAFETY_ANALYSIS
{
107 atomic_store(&Disabled
, 1U, memory_order_release
);
110 void enable() NO_THREAD_SAFETY_ANALYSIS
{
111 atomic_store(&Disabled
, 0U, memory_order_release
);
112 FallbackTSD
.unlock();
116 bool setOption(Option O
, sptr Value
) {
117 if (O
== Option::ThreadDisableMemInit
)
118 State
.DisableMemInit
= Value
;
119 if (O
== Option::MaxTSDsCount
)
124 bool getDisableMemInit() { return State
.DisableMemInit
; }
126 void getStats(ScopedString
*Str
) {
127 // We don't have a way to iterate all thread local `ThreadTSD`s. Instead of
128 // printing only self `ThreadTSD` which may mislead the usage, we just skip
130 Str
->append("Exclusive TSD don't support iterating each TSD\n");
134 ALWAYS_INLINE TSD
<Allocator
> *
135 getTSDAndLock(bool *UnlockRequired
) NO_THREAD_SAFETY_ANALYSIS
{
136 if (LIKELY(State
.InitState
== ThreadState::Initialized
&&
137 !atomic_load(&Disabled
, memory_order_acquire
))) {
138 *UnlockRequired
= false;
142 *UnlockRequired
= true;
146 // Using minimal initialization allows for global initialization while keeping
147 // the thread specific structure untouched. The fallback structure will be
149 NOINLINE
void initThread(Allocator
*Instance
, bool MinimalInit
) {
150 initOnceMaybe(Instance
);
151 if (UNLIKELY(MinimalInit
))
154 pthread_setspecific(PThreadKey
, reinterpret_cast<void *>(Instance
)), 0);
155 ThreadTSD
.init(Instance
);
156 State
.InitState
= ThreadState::Initialized
;
157 Instance
->callPostInitCallback();
160 pthread_key_t PThreadKey
= {};
161 bool Initialized
GUARDED_BY(Mutex
) = false;
162 atomic_u8 Disabled
= {};
163 TSD
<Allocator
> FallbackTSD
;
165 static thread_local ThreadState State
;
166 static thread_local TSD
<Allocator
> ThreadTSD
;
168 friend void teardownThread
<Allocator
>(void *Ptr
);
171 template <class Allocator
>
172 thread_local TSD
<Allocator
> TSDRegistryExT
<Allocator
>::ThreadTSD
;
173 template <class Allocator
>
174 thread_local ThreadState TSDRegistryExT
<Allocator
>::State
;
176 template <class Allocator
>
177 void teardownThread(void *Ptr
) NO_THREAD_SAFETY_ANALYSIS
{
178 typedef TSDRegistryExT
<Allocator
> TSDRegistryT
;
179 Allocator
*Instance
= reinterpret_cast<Allocator
*>(Ptr
);
180 // The glibc POSIX thread-local-storage deallocation routine calls user
181 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
182 // We want to be called last since other destructors might call free and the
183 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
184 // quarantine and swallowing the cache.
185 if (TSDRegistryT::ThreadTSD
.DestructorIterations
> 1) {
186 TSDRegistryT::ThreadTSD
.DestructorIterations
--;
187 // If pthread_setspecific fails, we will go ahead with the teardown.
188 if (LIKELY(pthread_setspecific(Instance
->getTSDRegistry()->PThreadKey
,
192 TSDRegistryT::ThreadTSD
.commitBack(Instance
);
193 TSDRegistryT::State
.InitState
= ThreadState::TornDown
;
198 #endif // SCUDO_TSD_EXCLUSIVE_H_