1 //===-- tsd_shared.h --------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_TSD_SHARED_H_
10 #define SCUDO_TSD_SHARED_H_
14 #include "string_utils.h"
16 #if SCUDO_HAS_PLATFORM_TLS_SLOT
17 // This is a platform-provided header that needs to be on the include path when
18 // Scudo is compiled. It must declare a function with the prototype:
19 // uintptr_t *getPlatformAllocatorTlsSlot()
20 // that returns the address of a thread-local word of storage reserved for
21 // Scudo, that must be zero-initialized in newly created threads.
22 #include "scudo_platform_tls_slot.h"
27 template <class Allocator
, u32 TSDsArraySize
, u32 DefaultTSDCount
>
28 struct TSDRegistrySharedT
{
29 void init(Allocator
*Instance
) REQUIRES(Mutex
) {
32 for (u32 I
= 0; I
< TSDsArraySize
; I
++)
33 TSDs
[I
].init(Instance
);
34 const u32 NumberOfCPUs
= getNumberOfCPUs();
35 setNumberOfTSDs((NumberOfCPUs
== 0) ? DefaultTSDCount
36 : Min(NumberOfCPUs
, DefaultTSDCount
));
40 void initOnceMaybe(Allocator
*Instance
) EXCLUDES(Mutex
) {
42 if (LIKELY(Initialized
))
44 init(Instance
); // Sets Initialized.
47 void unmapTestOnly(Allocator
*Instance
) EXCLUDES(Mutex
) {
48 for (u32 I
= 0; I
< TSDsArraySize
; I
++) {
49 TSDs
[I
].commitBack(Instance
);
52 setCurrentTSD(nullptr);
57 void drainCaches(Allocator
*Instance
) {
58 ScopedLock
L(MutexTSDs
);
59 for (uptr I
= 0; I
< NumberOfTSDs
; ++I
) {
61 Instance
->drainCache(&TSDs
[I
]);
66 ALWAYS_INLINE
void initThreadMaybe(Allocator
*Instance
,
67 UNUSED
bool MinimalInit
) {
68 if (LIKELY(getCurrentTSD()))
73 // TSDs is an array of locks and which is not supported for marking
74 // thread-safety capability.
75 ALWAYS_INLINE TSD
<Allocator
> *
76 getTSDAndLock(bool *UnlockRequired
) NO_THREAD_SAFETY_ANALYSIS
{
77 TSD
<Allocator
> *TSD
= getCurrentTSD();
79 *UnlockRequired
= true;
80 // Try to lock the currently associated context.
83 // If that fails, go down the slow path.
84 if (TSDsArraySize
== 1U) {
85 // Only 1 TSD, not need to go any further.
86 // The compiler will optimize this one way or the other.
90 return getTSDAndLockSlow(TSD
);
93 void disable() NO_THREAD_SAFETY_ANALYSIS
{
95 for (u32 I
= 0; I
< TSDsArraySize
; I
++)
99 void enable() NO_THREAD_SAFETY_ANALYSIS
{
100 for (s32 I
= static_cast<s32
>(TSDsArraySize
- 1); I
>= 0; I
--)
105 bool setOption(Option O
, sptr Value
) {
106 if (O
== Option::MaxTSDsCount
)
107 return setNumberOfTSDs(static_cast<u32
>(Value
));
108 if (O
== Option::ThreadDisableMemInit
)
109 setDisableMemInit(Value
);
110 // Not supported by the TSD Registry, but not an error either.
114 bool getDisableMemInit() const { return *getTlsPtr() & 1; }
116 void getStats(ScopedString
*Str
) EXCLUDES(MutexTSDs
) {
117 ScopedLock
L(MutexTSDs
);
119 Str
->append("Stats: SharedTSDs: %u available; total %u\n", NumberOfTSDs
,
121 for (uptr I
= 0; I
< NumberOfTSDs
; ++I
) {
123 // Theoretically, we want to mark TSD::lock()/TSD::unlock() with proper
124 // thread annotations. However, given the TSD is only locked on shared
125 // path, do the assertion in a separate path to avoid confusing the
127 TSDs
[I
].assertLocked(/*BypassCheck=*/true);
128 Str
->append(" Shared TSD[%zu]:\n", I
);
129 TSDs
[I
].getCache().getStats(Str
);
135 ALWAYS_INLINE uptr
*getTlsPtr() const {
136 #if SCUDO_HAS_PLATFORM_TLS_SLOT
137 return reinterpret_cast<uptr
*>(getPlatformAllocatorTlsSlot());
139 static thread_local uptr ThreadTSD
;
144 static_assert(alignof(TSD
<Allocator
>) >= 2, "");
146 ALWAYS_INLINE
void setCurrentTSD(TSD
<Allocator
> *CurrentTSD
) {
148 *getTlsPtr() |= reinterpret_cast<uptr
>(CurrentTSD
);
151 ALWAYS_INLINE TSD
<Allocator
> *getCurrentTSD() {
152 return reinterpret_cast<TSD
<Allocator
> *>(*getTlsPtr() & ~1ULL);
155 bool setNumberOfTSDs(u32 N
) EXCLUDES(MutexTSDs
) {
156 ScopedLock
L(MutexTSDs
);
157 if (N
< NumberOfTSDs
)
159 if (N
> TSDsArraySize
)
162 NumberOfCoPrimes
= 0;
163 // Compute all the coprimes of NumberOfTSDs. This will be used to walk the
164 // array of TSDs in a random order. For details, see:
165 // https://lemire.me/blog/2017/09/18/visiting-all-values-in-an-array-exactly-once-in-random-order/
166 for (u32 I
= 0; I
< N
; I
++) {
169 // Find the GCD between I + 1 and N. If 1, they are coprimes.
176 CoPrimes
[NumberOfCoPrimes
++] = I
+ 1;
181 void setDisableMemInit(bool B
) {
182 *getTlsPtr() &= ~1ULL;
186 NOINLINE
void initThread(Allocator
*Instance
) NO_THREAD_SAFETY_ANALYSIS
{
187 initOnceMaybe(Instance
);
188 // Initial context assignment is done in a plain round-robin fashion.
189 const u32 Index
= atomic_fetch_add(&CurrentIndex
, 1U, memory_order_relaxed
);
190 setCurrentTSD(&TSDs
[Index
% NumberOfTSDs
]);
191 Instance
->callPostInitCallback();
194 // TSDs is an array of locks which is not supported for marking thread-safety
196 NOINLINE TSD
<Allocator
> *getTSDAndLockSlow(TSD
<Allocator
> *CurrentTSD
)
197 EXCLUDES(MutexTSDs
) {
198 // Use the Precedence of the current TSD as our random seed. Since we are
199 // in the slow path, it means that tryLock failed, and as a result it's
200 // very likely that said Precedence is non-zero.
201 const u32 R
= static_cast<u32
>(CurrentTSD
->getPrecedence());
204 ScopedLock
L(MutexTSDs
);
206 DCHECK_NE(NumberOfCoPrimes
, 0U);
207 Inc
= CoPrimes
[R
% NumberOfCoPrimes
];
211 uptr LowestPrecedence
= UINTPTR_MAX
;
212 TSD
<Allocator
> *CandidateTSD
= nullptr;
213 // Go randomly through at most 4 contexts and find a candidate.
214 for (u32 I
= 0; I
< Min(4U, N
); I
++) {
215 if (TSDs
[Index
].tryLock()) {
216 setCurrentTSD(&TSDs
[Index
]);
219 const uptr Precedence
= TSDs
[Index
].getPrecedence();
220 // A 0 precedence here means another thread just locked this TSD.
221 if (Precedence
&& Precedence
< LowestPrecedence
) {
222 CandidateTSD
= &TSDs
[Index
];
223 LowestPrecedence
= Precedence
;
230 CandidateTSD
->lock();
231 setCurrentTSD(CandidateTSD
);
235 // Last resort, stick with the current one.
240 atomic_u32 CurrentIndex
= {};
241 u32 NumberOfTSDs
GUARDED_BY(MutexTSDs
) = 0;
242 u32 NumberOfCoPrimes
GUARDED_BY(MutexTSDs
) = 0;
243 u32 CoPrimes
[TSDsArraySize
] GUARDED_BY(MutexTSDs
) = {};
244 bool Initialized
GUARDED_BY(Mutex
) = false;
246 HybridMutex MutexTSDs
;
247 TSD
<Allocator
> TSDs
[TSDsArraySize
];
252 #endif // SCUDO_TSD_SHARED_H_