1 //===-- quarantine.h --------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_QUARANTINE_H_
10 #define SCUDO_QUARANTINE_H_
14 #include "string_utils.h"
15 #include "thread_annotations.h"
19 struct QuarantineBatch
{
20 // With the following count, a batch (and the header that protects it) occupy
21 // 4096 bytes on 32-bit platforms, and 8192 bytes on 64-bit.
22 static const u32 MaxCount
= 1019;
23 QuarantineBatch
*Next
;
26 void *Batch
[MaxCount
];
28 void init(void *Ptr
, uptr Size
) {
31 this->Size
= Size
+ sizeof(QuarantineBatch
); // Account for the Batch Size.
34 // The total size of quarantined nodes recorded in this batch.
35 uptr
getQuarantinedSize() const { return Size
- sizeof(QuarantineBatch
); }
37 void push_back(void *Ptr
, uptr Size
) {
38 DCHECK_LT(Count
, MaxCount
);
43 bool canMerge(const QuarantineBatch
*const From
) const {
44 return Count
+ From
->Count
<= MaxCount
;
47 void merge(QuarantineBatch
*const From
) {
48 DCHECK_LE(Count
+ From
->Count
, MaxCount
);
49 DCHECK_GE(Size
, sizeof(QuarantineBatch
));
51 for (uptr I
= 0; I
< From
->Count
; ++I
)
52 Batch
[Count
+ I
] = From
->Batch
[I
];
54 Size
+= From
->getQuarantinedSize();
57 From
->Size
= sizeof(QuarantineBatch
);
60 void shuffle(u32 State
) { ::scudo::shuffle(Batch
, Count
, &State
); }
63 static_assert(sizeof(QuarantineBatch
) <= (1U << 13), ""); // 8Kb.
65 // Per-thread cache of memory blocks.
66 template <typename Callback
> class QuarantineCache
{
68 void init() { DCHECK_EQ(atomic_load_relaxed(&Size
), 0U); }
70 // Total memory used, including internal accounting.
71 uptr
getSize() const { return atomic_load_relaxed(&Size
); }
72 // Memory used for internal accounting.
73 uptr
getOverheadSize() const { return List
.size() * sizeof(QuarantineBatch
); }
75 void enqueue(Callback Cb
, void *Ptr
, uptr Size
) {
76 if (List
.empty() || List
.back()->Count
== QuarantineBatch::MaxCount
) {
78 reinterpret_cast<QuarantineBatch
*>(Cb
.allocate(sizeof(*B
)));
83 List
.back()->push_back(Ptr
, Size
);
88 void transfer(QuarantineCache
*From
) {
89 List
.append_back(&From
->List
);
90 addToSize(From
->getSize());
91 atomic_store_relaxed(&From
->Size
, 0);
94 void enqueueBatch(QuarantineBatch
*B
) {
99 QuarantineBatch
*dequeueBatch() {
102 QuarantineBatch
*B
= List
.front();
104 subFromSize(B
->Size
);
108 void mergeBatches(QuarantineCache
*ToDeallocate
) {
109 uptr ExtractedSize
= 0;
110 QuarantineBatch
*Current
= List
.front();
111 while (Current
&& Current
->Next
) {
112 if (Current
->canMerge(Current
->Next
)) {
113 QuarantineBatch
*Extracted
= Current
->Next
;
114 // Move all the chunks into the current batch.
115 Current
->merge(Extracted
);
116 DCHECK_EQ(Extracted
->Count
, 0);
117 DCHECK_EQ(Extracted
->Size
, sizeof(QuarantineBatch
));
118 // Remove the next batch From the list and account for its Size.
119 List
.extract(Current
, Extracted
);
120 ExtractedSize
+= Extracted
->Size
;
121 // Add it to deallocation list.
122 ToDeallocate
->enqueueBatch(Extracted
);
124 Current
= Current
->Next
;
127 subFromSize(ExtractedSize
);
130 void getStats(ScopedString
*Str
) const {
132 uptr TotalOverheadBytes
= 0;
134 uptr TotalQuarantineChunks
= 0;
135 for (const QuarantineBatch
&Batch
: List
) {
137 TotalBytes
+= Batch
.Size
;
138 TotalOverheadBytes
+= Batch
.Size
- Batch
.getQuarantinedSize();
139 TotalQuarantineChunks
+= Batch
.Count
;
141 const uptr QuarantineChunksCapacity
=
142 BatchCount
* QuarantineBatch::MaxCount
;
143 const uptr ChunksUsagePercent
=
144 (QuarantineChunksCapacity
== 0)
146 : TotalQuarantineChunks
* 100 / QuarantineChunksCapacity
;
147 const uptr TotalQuarantinedBytes
= TotalBytes
- TotalOverheadBytes
;
148 const uptr MemoryOverheadPercent
=
149 (TotalQuarantinedBytes
== 0)
151 : TotalOverheadBytes
* 100 / TotalQuarantinedBytes
;
153 "Stats: Quarantine: batches: %zu; bytes: %zu (user: %zu); chunks: %zu "
154 "(capacity: %zu); %zu%% chunks used; %zu%% memory overhead\n",
155 BatchCount
, TotalBytes
, TotalQuarantinedBytes
, TotalQuarantineChunks
,
156 QuarantineChunksCapacity
, ChunksUsagePercent
, MemoryOverheadPercent
);
160 SinglyLinkedList
<QuarantineBatch
> List
;
161 atomic_uptr Size
= {};
163 void addToSize(uptr add
) { atomic_store_relaxed(&Size
, getSize() + add
); }
164 void subFromSize(uptr sub
) { atomic_store_relaxed(&Size
, getSize() - sub
); }
167 // The callback interface is:
168 // void Callback::recycle(Node *Ptr);
169 // void *Callback::allocate(uptr Size);
170 // void Callback::deallocate(void *Ptr);
171 template <typename Callback
, typename Node
> class GlobalQuarantine
{
173 typedef QuarantineCache
<Callback
> CacheT
;
174 using ThisT
= GlobalQuarantine
<Callback
, Node
>;
176 void init(uptr Size
, uptr CacheSize
) NO_THREAD_SAFETY_ANALYSIS
{
177 DCHECK(isAligned(reinterpret_cast<uptr
>(this), alignof(ThisT
)));
178 DCHECK_EQ(atomic_load_relaxed(&MaxSize
), 0U);
179 DCHECK_EQ(atomic_load_relaxed(&MinSize
), 0U);
180 DCHECK_EQ(atomic_load_relaxed(&MaxCacheSize
), 0U);
181 // Thread local quarantine size can be zero only when global quarantine size
182 // is zero (it allows us to perform just one atomic read per put() call).
183 CHECK((Size
== 0 && CacheSize
== 0) || CacheSize
!= 0);
185 atomic_store_relaxed(&MaxSize
, Size
);
186 atomic_store_relaxed(&MinSize
, Size
/ 10 * 9); // 90% of max size.
187 atomic_store_relaxed(&MaxCacheSize
, CacheSize
);
192 uptr
getMaxSize() const { return atomic_load_relaxed(&MaxSize
); }
193 uptr
getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize
); }
195 // This is supposed to be used in test only.
197 ScopedLock
L(CacheMutex
);
198 return Cache
.getSize() == 0U;
201 void put(CacheT
*C
, Callback Cb
, Node
*Ptr
, uptr Size
) {
202 C
->enqueue(Cb
, Ptr
, Size
);
203 if (C
->getSize() > getCacheSize())
207 void NOINLINE
drain(CacheT
*C
, Callback Cb
) EXCLUDES(CacheMutex
) {
208 bool needRecycle
= false;
210 ScopedLock
L(CacheMutex
);
212 needRecycle
= Cache
.getSize() > getMaxSize();
215 if (needRecycle
&& RecycleMutex
.tryLock())
216 recycle(atomic_load_relaxed(&MinSize
), Cb
);
219 void NOINLINE
drainAndRecycle(CacheT
*C
, Callback Cb
) EXCLUDES(CacheMutex
) {
221 ScopedLock
L(CacheMutex
);
228 void getStats(ScopedString
*Str
) EXCLUDES(CacheMutex
) {
229 ScopedLock
L(CacheMutex
);
230 // It assumes that the world is stopped, just as the allocator's printStats.
232 Str
->append("Quarantine limits: global: %zuK; thread local: %zuK\n",
233 getMaxSize() >> 10, getCacheSize() >> 10);
236 void disable() NO_THREAD_SAFETY_ANALYSIS
{
237 // RecycleMutex must be locked 1st since we grab CacheMutex within recycle.
242 void enable() NO_THREAD_SAFETY_ANALYSIS
{
244 RecycleMutex
.unlock();
249 alignas(SCUDO_CACHE_LINE_SIZE
) HybridMutex CacheMutex
;
250 CacheT Cache
GUARDED_BY(CacheMutex
);
251 alignas(SCUDO_CACHE_LINE_SIZE
) HybridMutex RecycleMutex
;
252 atomic_uptr MinSize
= {};
253 atomic_uptr MaxSize
= {};
254 alignas(SCUDO_CACHE_LINE_SIZE
) atomic_uptr MaxCacheSize
= {};
256 void NOINLINE
recycle(uptr MinSize
, Callback Cb
) RELEASE(RecycleMutex
)
257 EXCLUDES(CacheMutex
) {
261 ScopedLock
L(CacheMutex
);
262 // Go over the batches and merge partially filled ones to
263 // save some memory, otherwise batches themselves (since the memory used
264 // by them is counted against quarantine limit) can overcome the actual
265 // user's quarantined chunks, which diminishes the purpose of the
267 const uptr CacheSize
= Cache
.getSize();
268 const uptr OverheadSize
= Cache
.getOverheadSize();
269 DCHECK_GE(CacheSize
, OverheadSize
);
270 // Do the merge only when overhead exceeds this predefined limit (might
271 // require some tuning). It saves us merge attempt when the batch list
272 // quarantine is unlikely to contain batches suitable for merge.
273 constexpr uptr OverheadThresholdPercents
= 100;
274 if (CacheSize
> OverheadSize
&&
275 OverheadSize
* (100 + OverheadThresholdPercents
) >
276 CacheSize
* OverheadThresholdPercents
) {
277 Cache
.mergeBatches(&Tmp
);
279 // Extract enough chunks from the quarantine to get below the max
280 // quarantine size and leave some leeway for the newly quarantined chunks.
281 while (Cache
.getSize() > MinSize
)
282 Tmp
.enqueueBatch(Cache
.dequeueBatch());
284 RecycleMutex
.unlock();
288 void NOINLINE
doRecycle(CacheT
*C
, Callback Cb
) {
289 while (QuarantineBatch
*B
= C
->dequeueBatch()) {
290 const u32 Seed
= static_cast<u32
>(
291 (reinterpret_cast<uptr
>(B
) ^ reinterpret_cast<uptr
>(C
)) >> 4);
293 constexpr uptr NumberOfPrefetch
= 8UL;
294 CHECK(NumberOfPrefetch
<= ARRAY_SIZE(B
->Batch
));
295 for (uptr I
= 0; I
< NumberOfPrefetch
; I
++)
296 PREFETCH(B
->Batch
[I
]);
297 for (uptr I
= 0, Count
= B
->Count
; I
< Count
; I
++) {
298 if (I
+ NumberOfPrefetch
< Count
)
299 PREFETCH(B
->Batch
[I
+ NumberOfPrefetch
]);
300 Cb
.recycle(reinterpret_cast<Node
*>(B
->Batch
[I
]));
309 #endif // SCUDO_QUARANTINE_H_