[Clang][SME2] Enable multi-vector loads & stores for SME2 (#75821)
[llvm-project.git] / compiler-rt / lib / scudo / standalone / primary32.h
blob4d03b282d000def38f574a054893d1151e4f12ce
1 //===-- primary32.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_PRIMARY32_H_
10 #define SCUDO_PRIMARY32_H_
12 #include "allocator_common.h"
13 #include "bytemap.h"
14 #include "common.h"
15 #include "list.h"
16 #include "local_cache.h"
17 #include "options.h"
18 #include "release.h"
19 #include "report.h"
20 #include "stats.h"
21 #include "string_utils.h"
22 #include "thread_annotations.h"
24 namespace scudo {
26 // SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
28 // It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
29 // boundary, and keeps a bytemap of the mappable address space to track the size
30 // class they are associated with.
32 // Mapped regions are split into equally sized Blocks according to the size
33 // class they belong to, and the associated pointers are shuffled to prevent any
34 // predictable address pattern (the predictability increases with the block
35 // size).
37 // Regions for size class 0 are special and used to hold TransferBatches, which
38 // allow to transfer arrays of pointers from the global size class freelist to
39 // the thread specific freelist for said class, and back.
41 // Memory used by this allocator is never unmapped but can be partially
42 // reclaimed if the platform allows for it.
44 template <typename Config> class SizeClassAllocator32 {
45 public:
46 typedef typename Config::Primary::CompactPtrT CompactPtrT;
47 typedef typename Config::Primary::SizeClassMap SizeClassMap;
48 static const uptr GroupSizeLog = Config::Primary::GroupSizeLog;
49 // The bytemap can only track UINT8_MAX - 1 classes.
50 static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
51 // Regions should be large enough to hold the largest Block.
52 static_assert((1UL << Config::Primary::RegionSizeLog) >=
53 SizeClassMap::MaxSize,
54 "");
55 typedef SizeClassAllocator32<Config> ThisT;
56 typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
57 typedef TransferBatch<ThisT> TransferBatchT;
58 typedef BatchGroup<ThisT> BatchGroupT;
60 static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
61 "BatchGroupT uses the same class size as TransferBatchT");
63 static uptr getSizeByClassId(uptr ClassId) {
64 return (ClassId == SizeClassMap::BatchClassId)
65 ? sizeof(TransferBatchT)
66 : SizeClassMap::getSizeByClassId(ClassId);
69 static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
71 void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
72 if (SCUDO_FUCHSIA)
73 reportError("SizeClassAllocator32 is not supported on Fuchsia");
75 if (SCUDO_TRUSTY)
76 reportError("SizeClassAllocator32 is not supported on Trusty");
78 DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
79 PossibleRegions.init();
80 u32 Seed;
81 const u64 Time = getMonotonicTimeFast();
82 if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
83 Seed = static_cast<u32>(
84 Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
85 for (uptr I = 0; I < NumClasses; I++) {
86 SizeClassInfo *Sci = getSizeClassInfo(I);
87 Sci->RandState = getRandomU32(&Seed);
88 // Sci->MaxRegionIndex is already initialized to 0.
89 Sci->MinRegionIndex = NumRegions;
90 Sci->ReleaseInfo.LastReleaseAtNs = Time;
92 setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
95 void unmapTestOnly() {
97 ScopedLock L(RegionsStashMutex);
98 while (NumberOfStashedRegions > 0) {
99 unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
100 RegionSize);
104 uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
105 for (uptr I = 0; I < NumClasses; I++) {
106 SizeClassInfo *Sci = getSizeClassInfo(I);
107 ScopedLock L(Sci->Mutex);
108 if (Sci->MinRegionIndex < MinRegionIndex)
109 MinRegionIndex = Sci->MinRegionIndex;
110 if (Sci->MaxRegionIndex > MaxRegionIndex)
111 MaxRegionIndex = Sci->MaxRegionIndex;
112 *Sci = {};
115 ScopedLock L(ByteMapMutex);
116 for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
117 if (PossibleRegions[I])
118 unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
119 PossibleRegions.unmapTestOnly();
122 // When all blocks are freed, it has to be the same size as `AllocatedUser`.
123 void verifyAllBlocksAreReleasedTestOnly() {
124 // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
125 uptr BatchClassUsedInFreeLists = 0;
126 for (uptr I = 0; I < NumClasses; I++) {
127 // We have to count BatchClassUsedInFreeLists in other regions first.
128 if (I == SizeClassMap::BatchClassId)
129 continue;
130 SizeClassInfo *Sci = getSizeClassInfo(I);
131 ScopedLock L1(Sci->Mutex);
132 uptr TotalBlocks = 0;
133 for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
134 // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
135 BatchClassUsedInFreeLists += BG.Batches.size() + 1;
136 for (const auto &It : BG.Batches)
137 TotalBlocks += It.getCount();
140 const uptr BlockSize = getSizeByClassId(I);
141 DCHECK_EQ(TotalBlocks, Sci->AllocatedUser / BlockSize);
142 DCHECK_EQ(Sci->FreeListInfo.PushedBlocks, Sci->FreeListInfo.PoppedBlocks);
145 SizeClassInfo *Sci = getSizeClassInfo(SizeClassMap::BatchClassId);
146 ScopedLock L1(Sci->Mutex);
147 uptr TotalBlocks = 0;
148 for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
149 if (LIKELY(!BG.Batches.empty())) {
150 for (const auto &It : BG.Batches)
151 TotalBlocks += It.getCount();
152 } else {
153 // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
154 // itself.
155 ++TotalBlocks;
159 const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
160 DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
161 Sci->AllocatedUser / BlockSize);
162 const uptr BlocksInUse =
163 Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
164 DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
167 CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const {
168 return static_cast<CompactPtrT>(Ptr);
171 void *decompactPtr(UNUSED uptr ClassId, CompactPtrT CompactPtr) const {
172 return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr));
175 uptr compactPtrGroupBase(CompactPtrT CompactPtr) {
176 const uptr Mask = (static_cast<uptr>(1) << GroupSizeLog) - 1;
177 return CompactPtr & ~Mask;
180 uptr decompactGroupBase(uptr CompactPtrGroupBase) {
181 return CompactPtrGroupBase;
184 ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) {
185 const uptr PageSize = getPageSizeCached();
186 return BlockSize < PageSize / 16U;
189 ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) {
190 const uptr PageSize = getPageSizeCached();
191 return BlockSize > PageSize;
194 // Note that the `MaxBlockCount` will be used when we support arbitrary blocks
195 // count. Now it's the same as the number of blocks stored in the
196 // `TransferBatch`.
197 u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
198 UNUSED const u16 MaxBlockCount) {
199 TransferBatchT *B = popBatch(C, ClassId);
200 if (!B)
201 return 0;
203 const u16 Count = B->getCount();
204 DCHECK_GT(Count, 0U);
205 B->moveToArray(ToArray);
207 if (ClassId != SizeClassMap::BatchClassId)
208 C->deallocate(SizeClassMap::BatchClassId, B);
210 return Count;
213 TransferBatchT *popBatch(CacheT *C, uptr ClassId) {
214 DCHECK_LT(ClassId, NumClasses);
215 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
216 ScopedLock L(Sci->Mutex);
217 TransferBatchT *B = popBatchImpl(C, ClassId, Sci);
218 if (UNLIKELY(!B)) {
219 if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
220 return nullptr;
221 B = popBatchImpl(C, ClassId, Sci);
222 // if `populateFreeList` succeeded, we are supposed to get free blocks.
223 DCHECK_NE(B, nullptr);
225 return B;
228 // Push the array of free blocks to the designated batch group.
229 void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) {
230 DCHECK_LT(ClassId, NumClasses);
231 DCHECK_GT(Size, 0);
233 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
234 if (ClassId == SizeClassMap::BatchClassId) {
235 ScopedLock L(Sci->Mutex);
236 pushBatchClassBlocks(Sci, Array, Size);
237 return;
240 // TODO(chiahungduan): Consider not doing grouping if the group size is not
241 // greater than the block size with a certain scale.
243 // Sort the blocks so that blocks belonging to the same group can be pushed
244 // together.
245 bool SameGroup = true;
246 for (u32 I = 1; I < Size; ++I) {
247 if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I]))
248 SameGroup = false;
249 CompactPtrT Cur = Array[I];
250 u32 J = I;
251 while (J > 0 &&
252 compactPtrGroupBase(Cur) < compactPtrGroupBase(Array[J - 1])) {
253 Array[J] = Array[J - 1];
254 --J;
256 Array[J] = Cur;
259 ScopedLock L(Sci->Mutex);
260 pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup);
263 void disable() NO_THREAD_SAFETY_ANALYSIS {
264 // The BatchClassId must be locked last since other classes can use it.
265 for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
266 if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
267 continue;
268 getSizeClassInfo(static_cast<uptr>(I))->Mutex.lock();
270 getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
271 RegionsStashMutex.lock();
272 ByteMapMutex.lock();
275 void enable() NO_THREAD_SAFETY_ANALYSIS {
276 ByteMapMutex.unlock();
277 RegionsStashMutex.unlock();
278 getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
279 for (uptr I = 0; I < NumClasses; I++) {
280 if (I == SizeClassMap::BatchClassId)
281 continue;
282 getSizeClassInfo(I)->Mutex.unlock();
286 template <typename F> void iterateOverBlocks(F Callback) {
287 uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
288 for (uptr I = 0; I < NumClasses; I++) {
289 SizeClassInfo *Sci = getSizeClassInfo(I);
290 // TODO: The call of `iterateOverBlocks` requires disabling
291 // SizeClassAllocator32. We may consider locking each region on demand
292 // only.
293 Sci->Mutex.assertHeld();
294 if (Sci->MinRegionIndex < MinRegionIndex)
295 MinRegionIndex = Sci->MinRegionIndex;
296 if (Sci->MaxRegionIndex > MaxRegionIndex)
297 MaxRegionIndex = Sci->MaxRegionIndex;
300 // SizeClassAllocator32 is disabled, i.e., ByteMapMutex is held.
301 ByteMapMutex.assertHeld();
303 for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
304 if (PossibleRegions[I] &&
305 (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
306 const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
307 const uptr From = I * RegionSize;
308 const uptr To = From + (RegionSize / BlockSize) * BlockSize;
309 for (uptr Block = From; Block < To; Block += BlockSize)
310 Callback(Block);
315 void getStats(ScopedString *Str) {
316 // TODO(kostyak): get the RSS per region.
317 uptr TotalMapped = 0;
318 uptr PoppedBlocks = 0;
319 uptr PushedBlocks = 0;
320 for (uptr I = 0; I < NumClasses; I++) {
321 SizeClassInfo *Sci = getSizeClassInfo(I);
322 ScopedLock L(Sci->Mutex);
323 TotalMapped += Sci->AllocatedUser;
324 PoppedBlocks += Sci->FreeListInfo.PoppedBlocks;
325 PushedBlocks += Sci->FreeListInfo.PushedBlocks;
327 Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
328 "remains %zu\n",
329 TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
330 for (uptr I = 0; I < NumClasses; I++) {
331 SizeClassInfo *Sci = getSizeClassInfo(I);
332 ScopedLock L(Sci->Mutex);
333 getStats(Str, I, Sci);
337 void getFragmentationInfo(ScopedString *Str) {
338 Str->append(
339 "Fragmentation Stats: SizeClassAllocator32: page size = %zu bytes\n",
340 getPageSizeCached());
342 for (uptr I = 1; I < NumClasses; I++) {
343 SizeClassInfo *Sci = getSizeClassInfo(I);
344 ScopedLock L(Sci->Mutex);
345 getSizeClassFragmentationInfo(Sci, I, Str);
349 bool setOption(Option O, sptr Value) {
350 if (O == Option::ReleaseInterval) {
351 const s32 Interval = Max(Min(static_cast<s32>(Value),
352 Config::Primary::MaxReleaseToOsIntervalMs),
353 Config::Primary::MinReleaseToOsIntervalMs);
354 atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
355 return true;
357 // Not supported by the Primary, but not an error either.
358 return true;
361 uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
362 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
363 // TODO: Once we have separate locks like primary64, we may consider using
364 // tryLock() as well.
365 ScopedLock L(Sci->Mutex);
366 return releaseToOSMaybe(Sci, ClassId, ReleaseType);
369 uptr releaseToOS(ReleaseToOS ReleaseType) {
370 uptr TotalReleasedBytes = 0;
371 for (uptr I = 0; I < NumClasses; I++) {
372 if (I == SizeClassMap::BatchClassId)
373 continue;
374 SizeClassInfo *Sci = getSizeClassInfo(I);
375 ScopedLock L(Sci->Mutex);
376 TotalReleasedBytes += releaseToOSMaybe(Sci, I, ReleaseType);
378 return TotalReleasedBytes;
381 const char *getRegionInfoArrayAddress() const { return nullptr; }
382 static uptr getRegionInfoArraySize() { return 0; }
384 static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
385 UNUSED uptr Ptr) {
386 return {};
389 AtomicOptions Options;
391 private:
392 static const uptr NumClasses = SizeClassMap::NumClasses;
393 static const uptr RegionSize = 1UL << Config::Primary::RegionSizeLog;
394 static const uptr NumRegions =
395 SCUDO_MMAP_RANGE_SIZE >> Config::Primary::RegionSizeLog;
396 static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
397 typedef FlatByteMap<NumRegions> ByteMap;
399 struct ReleaseToOsInfo {
400 uptr BytesInFreeListAtLastCheckpoint;
401 uptr RangesReleased;
402 uptr LastReleasedBytes;
403 u64 LastReleaseAtNs;
406 struct BlocksInfo {
407 SinglyLinkedList<BatchGroupT> BlockList = {};
408 uptr PoppedBlocks = 0;
409 uptr PushedBlocks = 0;
412 struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
413 HybridMutex Mutex;
414 BlocksInfo FreeListInfo GUARDED_BY(Mutex);
415 uptr CurrentRegion GUARDED_BY(Mutex);
416 uptr CurrentRegionAllocated GUARDED_BY(Mutex);
417 u32 RandState;
418 uptr AllocatedUser GUARDED_BY(Mutex);
419 // Lowest & highest region index allocated for this size class, to avoid
420 // looping through the whole NumRegions.
421 uptr MinRegionIndex GUARDED_BY(Mutex);
422 uptr MaxRegionIndex GUARDED_BY(Mutex);
423 ReleaseToOsInfo ReleaseInfo GUARDED_BY(Mutex);
425 static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
427 uptr computeRegionId(uptr Mem) {
428 const uptr Id = Mem >> Config::Primary::RegionSizeLog;
429 CHECK_LT(Id, NumRegions);
430 return Id;
433 uptr allocateRegionSlow() {
434 uptr MapSize = 2 * RegionSize;
435 const uptr MapBase = reinterpret_cast<uptr>(
436 map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
437 if (!MapBase)
438 return 0;
439 const uptr MapEnd = MapBase + MapSize;
440 uptr Region = MapBase;
441 if (isAligned(Region, RegionSize)) {
442 ScopedLock L(RegionsStashMutex);
443 if (NumberOfStashedRegions < MaxStashedRegions)
444 RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
445 else
446 MapSize = RegionSize;
447 } else {
448 Region = roundUp(MapBase, RegionSize);
449 unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
450 MapSize = RegionSize;
452 const uptr End = Region + MapSize;
453 if (End != MapEnd)
454 unmap(reinterpret_cast<void *>(End), MapEnd - End);
456 DCHECK_EQ(Region % RegionSize, 0U);
457 static_assert(Config::Primary::RegionSizeLog == GroupSizeLog,
458 "Memory group should be the same size as Region");
460 return Region;
463 uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex) {
464 DCHECK_LT(ClassId, NumClasses);
465 uptr Region = 0;
467 ScopedLock L(RegionsStashMutex);
468 if (NumberOfStashedRegions > 0)
469 Region = RegionsStash[--NumberOfStashedRegions];
471 if (!Region)
472 Region = allocateRegionSlow();
473 if (LIKELY(Region)) {
474 // Sci->Mutex is held by the caller, updating the Min/Max is safe.
475 const uptr RegionIndex = computeRegionId(Region);
476 if (RegionIndex < Sci->MinRegionIndex)
477 Sci->MinRegionIndex = RegionIndex;
478 if (RegionIndex > Sci->MaxRegionIndex)
479 Sci->MaxRegionIndex = RegionIndex;
480 ScopedLock L(ByteMapMutex);
481 PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
483 return Region;
486 SizeClassInfo *getSizeClassInfo(uptr ClassId) {
487 DCHECK_LT(ClassId, NumClasses);
488 return &SizeClassInfoArray[ClassId];
491 void pushBatchClassBlocks(SizeClassInfo *Sci, CompactPtrT *Array, u32 Size)
492 REQUIRES(Sci->Mutex) {
493 DCHECK_EQ(Sci, getSizeClassInfo(SizeClassMap::BatchClassId));
495 // Free blocks are recorded by TransferBatch in freelist for all
496 // size-classes. In addition, TransferBatch is allocated from BatchClassId.
497 // In order not to use additional block to record the free blocks in
498 // BatchClassId, they are self-contained. I.e., A TransferBatch records the
499 // block address of itself. See the figure below:
501 // TransferBatch at 0xABCD
502 // +----------------------------+
503 // | Free blocks' addr |
504 // | +------+------+------+ |
505 // | |0xABCD|... |... | |
506 // | +------+------+------+ |
507 // +----------------------------+
509 // When we allocate all the free blocks in the TransferBatch, the block used
510 // by TransferBatch is also free for use. We don't need to recycle the
511 // TransferBatch. Note that the correctness is maintained by the invariant,
513 // The unit of each popBatch() request is entire TransferBatch. Return
514 // part of the blocks in a TransferBatch is invalid.
516 // This ensures that TransferBatch won't leak the address itself while it's
517 // still holding other valid data.
519 // Besides, BatchGroup is also allocated from BatchClassId and has its
520 // address recorded in the TransferBatch too. To maintain the correctness,
522 // The address of BatchGroup is always recorded in the last TransferBatch
523 // in the freelist (also imply that the freelist should only be
524 // updated with push_front). Once the last TransferBatch is popped,
525 // the block used by BatchGroup is also free for use.
527 // With this approach, the blocks used by BatchGroup and TransferBatch are
528 // reusable and don't need additional space for them.
530 Sci->FreeListInfo.PushedBlocks += Size;
531 BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
533 if (BG == nullptr) {
534 // Construct `BatchGroup` on the last element.
535 BG = reinterpret_cast<BatchGroupT *>(
536 decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
537 --Size;
538 BG->Batches.clear();
539 // BatchClass hasn't enabled memory group. Use `0` to indicate there's no
540 // memory group here.
541 BG->CompactPtrGroupBase = 0;
542 // `BG` is also the block of BatchClassId. Note that this is different
543 // from `CreateGroup` in `pushBlocksImpl`
544 BG->PushedBlocks = 1;
545 BG->BytesInBGAtLastCheckpoint = 0;
546 BG->MaxCachedPerBatch =
547 CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
549 Sci->FreeListInfo.BlockList.push_front(BG);
552 if (UNLIKELY(Size == 0))
553 return;
555 // This happens under 2 cases.
556 // 1. just allocated a new `BatchGroup`.
557 // 2. Only 1 block is pushed when the freelist is empty.
558 if (BG->Batches.empty()) {
559 // Construct the `TransferBatch` on the last element.
560 TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
561 decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
562 TB->clear();
563 // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
564 // recorded in the TransferBatch.
565 TB->add(Array[Size - 1]);
566 TB->add(
567 compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
568 --Size;
569 DCHECK_EQ(BG->PushedBlocks, 1U);
570 // `TB` is also the block of BatchClassId.
571 BG->PushedBlocks += 1;
572 BG->Batches.push_front(TB);
575 TransferBatchT *CurBatch = BG->Batches.front();
576 DCHECK_NE(CurBatch, nullptr);
578 for (u32 I = 0; I < Size;) {
579 u16 UnusedSlots =
580 static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
581 if (UnusedSlots == 0) {
582 CurBatch = reinterpret_cast<TransferBatchT *>(
583 decompactPtr(SizeClassMap::BatchClassId, Array[I]));
584 CurBatch->clear();
585 // Self-contained
586 CurBatch->add(Array[I]);
587 ++I;
588 // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of
589 // BatchClassId.
590 BG->Batches.push_front(CurBatch);
591 UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
593 // `UnusedSlots` is u16 so the result will be also fit in u16.
594 const u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
595 CurBatch->appendFromArray(&Array[I], AppendSize);
596 I += AppendSize;
599 BG->PushedBlocks += Size;
601 // Push the blocks to their batch group. The layout will be like,
603 // FreeListInfo.BlockList - > BG -> BG -> BG
604 // | | |
605 // v v v
606 // TB TB TB
607 // |
608 // v
609 // TB
611 // Each BlockGroup(BG) will associate with unique group id and the free blocks
612 // are managed by a list of TransferBatch(TB). To reduce the time of inserting
613 // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
614 // that we can get better performance of maintaining sorted property.
615 // Use `SameGroup=true` to indicate that all blocks in the array are from the
616 // same group then we will skip checking the group id of each block.
618 // The region mutex needs to be held while calling this method.
619 void pushBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
620 CompactPtrT *Array, u32 Size, bool SameGroup = false)
621 REQUIRES(Sci->Mutex) {
622 DCHECK_NE(ClassId, SizeClassMap::BatchClassId);
623 DCHECK_GT(Size, 0U);
625 auto CreateGroup = [&](uptr CompactPtrGroupBase) {
626 BatchGroupT *BG =
627 reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
628 BG->Batches.clear();
629 TransferBatchT *TB =
630 reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
631 TB->clear();
633 BG->CompactPtrGroupBase = CompactPtrGroupBase;
634 BG->Batches.push_front(TB);
635 BG->PushedBlocks = 0;
636 BG->BytesInBGAtLastCheckpoint = 0;
637 BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));
639 return BG;
642 auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
643 SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
644 TransferBatchT *CurBatch = Batches.front();
645 DCHECK_NE(CurBatch, nullptr);
647 for (u32 I = 0; I < Size;) {
648 DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
649 u16 UnusedSlots =
650 static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
651 if (UnusedSlots == 0) {
652 CurBatch =
653 reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
654 CurBatch->clear();
655 Batches.push_front(CurBatch);
656 UnusedSlots = BG->MaxCachedPerBatch;
658 // `UnusedSlots` is u16 so the result will be also fit in u16.
659 u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
660 CurBatch->appendFromArray(&Array[I], AppendSize);
661 I += AppendSize;
664 BG->PushedBlocks += Size;
667 Sci->FreeListInfo.PushedBlocks += Size;
668 BatchGroupT *Cur = Sci->FreeListInfo.BlockList.front();
670 // In the following, `Cur` always points to the BatchGroup for blocks that
671 // will be pushed next. `Prev` is the element right before `Cur`.
672 BatchGroupT *Prev = nullptr;
674 while (Cur != nullptr &&
675 compactPtrGroupBase(Array[0]) > Cur->CompactPtrGroupBase) {
676 Prev = Cur;
677 Cur = Cur->Next;
680 if (Cur == nullptr ||
681 compactPtrGroupBase(Array[0]) != Cur->CompactPtrGroupBase) {
682 Cur = CreateGroup(compactPtrGroupBase(Array[0]));
683 if (Prev == nullptr)
684 Sci->FreeListInfo.BlockList.push_front(Cur);
685 else
686 Sci->FreeListInfo.BlockList.insert(Prev, Cur);
689 // All the blocks are from the same group, just push without checking group
690 // id.
691 if (SameGroup) {
692 for (u32 I = 0; I < Size; ++I)
693 DCHECK_EQ(compactPtrGroupBase(Array[I]), Cur->CompactPtrGroupBase);
695 InsertBlocks(Cur, Array, Size);
696 return;
699 // The blocks are sorted by group id. Determine the segment of group and
700 // push them to their group together.
701 u32 Count = 1;
702 for (u32 I = 1; I < Size; ++I) {
703 if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I])) {
704 DCHECK_EQ(compactPtrGroupBase(Array[I - 1]), Cur->CompactPtrGroupBase);
705 InsertBlocks(Cur, Array + I - Count, Count);
707 while (Cur != nullptr &&
708 compactPtrGroupBase(Array[I]) > Cur->CompactPtrGroupBase) {
709 Prev = Cur;
710 Cur = Cur->Next;
713 if (Cur == nullptr ||
714 compactPtrGroupBase(Array[I]) != Cur->CompactPtrGroupBase) {
715 Cur = CreateGroup(compactPtrGroupBase(Array[I]));
716 DCHECK_NE(Prev, nullptr);
717 Sci->FreeListInfo.BlockList.insert(Prev, Cur);
720 Count = 1;
721 } else {
722 ++Count;
726 InsertBlocks(Cur, Array + Size - Count, Count);
729 // Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
730 // group id will be considered first.
732 // The region mutex needs to be held while calling this method.
733 TransferBatchT *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
734 REQUIRES(Sci->Mutex) {
735 if (Sci->FreeListInfo.BlockList.empty())
736 return nullptr;
738 SinglyLinkedList<TransferBatchT> &Batches =
739 Sci->FreeListInfo.BlockList.front()->Batches;
741 if (Batches.empty()) {
742 DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
743 BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
744 Sci->FreeListInfo.BlockList.pop_front();
746 // Block used by `BatchGroup` is from BatchClassId. Turn the block into
747 // `TransferBatch` with single block.
748 TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
749 TB->clear();
750 TB->add(
751 compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
752 Sci->FreeListInfo.PoppedBlocks += 1;
753 return TB;
756 TransferBatchT *B = Batches.front();
757 Batches.pop_front();
758 DCHECK_NE(B, nullptr);
759 DCHECK_GT(B->getCount(), 0U);
761 if (Batches.empty()) {
762 BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
763 Sci->FreeListInfo.BlockList.pop_front();
765 // We don't keep BatchGroup with zero blocks to avoid empty-checking while
766 // allocating. Note that block used by constructing BatchGroup is recorded
767 // as free blocks in the last element of BatchGroup::Batches. Which means,
768 // once we pop the last TransferBatch, the block is implicitly
769 // deallocated.
770 if (ClassId != SizeClassMap::BatchClassId)
771 C->deallocate(SizeClassMap::BatchClassId, BG);
774 Sci->FreeListInfo.PoppedBlocks += B->getCount();
775 return B;
778 NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
779 REQUIRES(Sci->Mutex) {
780 uptr Region;
781 uptr Offset;
782 // If the size-class currently has a region associated to it, use it. The
783 // newly created blocks will be located after the currently allocated memory
784 // for that region (up to RegionSize). Otherwise, create a new region, where
785 // the new blocks will be carved from the beginning.
786 if (Sci->CurrentRegion) {
787 Region = Sci->CurrentRegion;
788 DCHECK_GT(Sci->CurrentRegionAllocated, 0U);
789 Offset = Sci->CurrentRegionAllocated;
790 } else {
791 DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
792 Region = allocateRegion(Sci, ClassId);
793 if (UNLIKELY(!Region))
794 return false;
795 C->getStats().add(StatMapped, RegionSize);
796 Sci->CurrentRegion = Region;
797 Offset = 0;
800 const uptr Size = getSizeByClassId(ClassId);
801 const u16 MaxCount = CacheT::getMaxCached(Size);
802 DCHECK_GT(MaxCount, 0U);
803 // The maximum number of blocks we should carve in the region is dictated
804 // by the maximum number of batches we want to fill, and the amount of
805 // memory left in the current region (we use the lowest of the two). This
806 // will not be 0 as we ensure that a region can at least hold one block (via
807 // static_assert and at the end of this function).
808 const u32 NumberOfBlocks =
809 Min(MaxNumBatches * MaxCount,
810 static_cast<u32>((RegionSize - Offset) / Size));
811 DCHECK_GT(NumberOfBlocks, 0U);
813 constexpr u32 ShuffleArraySize =
814 MaxNumBatches * TransferBatchT::MaxNumCached;
815 // Fill the transfer batches and put them in the size-class freelist. We
816 // need to randomize the blocks for security purposes, so we first fill a
817 // local array that we then shuffle before populating the batches.
818 CompactPtrT ShuffleArray[ShuffleArraySize];
819 DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
821 uptr P = Region + Offset;
822 for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
823 ShuffleArray[I] = reinterpret_cast<CompactPtrT>(P);
825 if (ClassId != SizeClassMap::BatchClassId) {
826 u32 N = 1;
827 uptr CurGroup = compactPtrGroupBase(ShuffleArray[0]);
828 for (u32 I = 1; I < NumberOfBlocks; I++) {
829 if (UNLIKELY(compactPtrGroupBase(ShuffleArray[I]) != CurGroup)) {
830 shuffle(ShuffleArray + I - N, N, &Sci->RandState);
831 pushBlocksImpl(C, ClassId, Sci, ShuffleArray + I - N, N,
832 /*SameGroup=*/true);
833 N = 1;
834 CurGroup = compactPtrGroupBase(ShuffleArray[I]);
835 } else {
836 ++N;
840 shuffle(ShuffleArray + NumberOfBlocks - N, N, &Sci->RandState);
841 pushBlocksImpl(C, ClassId, Sci, &ShuffleArray[NumberOfBlocks - N], N,
842 /*SameGroup=*/true);
843 } else {
844 pushBatchClassBlocks(Sci, ShuffleArray, NumberOfBlocks);
847 // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
848 // the requests from `PushBlocks` and `PopBatch` which are external
849 // interfaces. `populateFreeList` is the internal interface so we should set
850 // the values back to avoid incorrectly setting the stats.
851 Sci->FreeListInfo.PushedBlocks -= NumberOfBlocks;
853 const uptr AllocatedUser = Size * NumberOfBlocks;
854 C->getStats().add(StatFree, AllocatedUser);
855 DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize);
856 // If there is not enough room in the region currently associated to fit
857 // more blocks, we deassociate the region by resetting CurrentRegion and
858 // CurrentRegionAllocated. Otherwise, update the allocated amount.
859 if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) {
860 Sci->CurrentRegion = 0;
861 Sci->CurrentRegionAllocated = 0;
862 } else {
863 Sci->CurrentRegionAllocated += AllocatedUser;
865 Sci->AllocatedUser += AllocatedUser;
867 return true;
870 void getStats(ScopedString *Str, uptr ClassId, SizeClassInfo *Sci)
871 REQUIRES(Sci->Mutex) {
872 if (Sci->AllocatedUser == 0)
873 return;
874 const uptr BlockSize = getSizeByClassId(ClassId);
875 const uptr InUse =
876 Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
877 const uptr BytesInFreeList = Sci->AllocatedUser - InUse * BlockSize;
878 uptr PushedBytesDelta = 0;
879 if (BytesInFreeList >= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
880 PushedBytesDelta =
881 BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
883 const uptr AvailableChunks = Sci->AllocatedUser / BlockSize;
884 Str->append(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
885 "inuse: %6zu avail: %6zu releases: %6zu last released: %6zuK "
886 "latest pushed bytes: %6zuK\n",
887 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
888 Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks,
889 InUse, AvailableChunks, Sci->ReleaseInfo.RangesReleased,
890 Sci->ReleaseInfo.LastReleasedBytes >> 10,
891 PushedBytesDelta >> 10);
894 void getSizeClassFragmentationInfo(SizeClassInfo *Sci, uptr ClassId,
895 ScopedString *Str) REQUIRES(Sci->Mutex) {
896 const uptr BlockSize = getSizeByClassId(ClassId);
897 const uptr First = Sci->MinRegionIndex;
898 const uptr Last = Sci->MaxRegionIndex;
899 const uptr Base = First * RegionSize;
900 const uptr NumberOfRegions = Last - First + 1U;
901 auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
902 ScopedLock L(ByteMapMutex);
903 return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
906 FragmentationRecorder Recorder;
907 if (!Sci->FreeListInfo.BlockList.empty()) {
908 PageReleaseContext Context =
909 markFreeBlocks(Sci, ClassId, BlockSize, Base, NumberOfRegions,
910 ReleaseToOS::ForceAll);
911 releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
914 const uptr PageSize = getPageSizeCached();
915 const uptr TotalBlocks = Sci->AllocatedUser / BlockSize;
916 const uptr InUseBlocks =
917 Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
918 uptr AllocatedPagesCount = 0;
919 if (TotalBlocks != 0U) {
920 for (uptr I = 0; I < NumberOfRegions; ++I) {
921 if (SkipRegion(I))
922 continue;
923 AllocatedPagesCount += RegionSize / PageSize;
926 DCHECK_NE(AllocatedPagesCount, 0U);
929 DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
930 const uptr InUsePages =
931 AllocatedPagesCount - Recorder.getReleasedPagesCount();
932 const uptr InUseBytes = InUsePages * PageSize;
934 uptr Integral;
935 uptr Fractional;
936 computePercentage(BlockSize * InUseBlocks, InUsePages * PageSize, &Integral,
937 &Fractional);
938 Str->append(" %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
939 "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n",
940 ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
941 AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional);
944 NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
945 ReleaseToOS ReleaseType = ReleaseToOS::Normal)
946 REQUIRES(Sci->Mutex) {
947 const uptr BlockSize = getSizeByClassId(ClassId);
949 DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
950 const uptr BytesInFreeList =
951 Sci->AllocatedUser -
952 (Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks) *
953 BlockSize;
955 if (UNLIKELY(BytesInFreeList == 0))
956 return 0;
958 // ====================================================================== //
959 // 1. Check if we have enough free blocks and if it's worth doing a page
960 // release.
961 // ====================================================================== //
962 if (ReleaseType != ReleaseToOS::ForceAll &&
963 !hasChanceToReleasePages(Sci, BlockSize, BytesInFreeList,
964 ReleaseType)) {
965 return 0;
968 const uptr First = Sci->MinRegionIndex;
969 const uptr Last = Sci->MaxRegionIndex;
970 DCHECK_NE(Last, 0U);
971 DCHECK_LE(First, Last);
972 uptr TotalReleasedBytes = 0;
973 const uptr Base = First * RegionSize;
974 const uptr NumberOfRegions = Last - First + 1U;
976 // ==================================================================== //
977 // 2. Mark the free blocks and we can tell which pages are in-use by
978 // querying `PageReleaseContext`.
979 // ==================================================================== //
980 PageReleaseContext Context = markFreeBlocks(Sci, ClassId, BlockSize, Base,
981 NumberOfRegions, ReleaseType);
982 if (!Context.hasBlockMarked())
983 return 0;
985 // ==================================================================== //
986 // 3. Release the unused physical pages back to the OS.
987 // ==================================================================== //
988 ReleaseRecorder Recorder(Base);
989 auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
990 ScopedLock L(ByteMapMutex);
991 return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
993 releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
995 if (Recorder.getReleasedRangesCount() > 0) {
996 Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
997 Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
998 Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
999 TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
1001 Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
1003 return TotalReleasedBytes;
1006 bool hasChanceToReleasePages(SizeClassInfo *Sci, uptr BlockSize,
1007 uptr BytesInFreeList, ReleaseToOS ReleaseType)
1008 REQUIRES(Sci->Mutex) {
1009 DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
1010 const uptr PageSize = getPageSizeCached();
1012 if (BytesInFreeList <= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint)
1013 Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
1015 // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
1016 // so that we won't underestimate the releasable pages. For example, the
1017 // following is the region usage,
1019 // BytesInFreeListAtLastCheckpoint AllocatedUser
1020 // v v
1021 // |--------------------------------------->
1022 // ^ ^
1023 // BytesInFreeList ReleaseThreshold
1025 // In general, if we have collected enough bytes and the amount of free
1026 // bytes meets the ReleaseThreshold, we will try to do page release. If we
1027 // don't update `BytesInFreeListAtLastCheckpoint` when the current
1028 // `BytesInFreeList` is smaller, we may take longer time to wait for enough
1029 // freed blocks because we miss the bytes between
1030 // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
1031 const uptr PushedBytesDelta =
1032 BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
1033 if (PushedBytesDelta < PageSize)
1034 return false;
1036 // Releasing smaller blocks is expensive, so we want to make sure that a
1037 // significant amount of bytes are free, and that there has been a good
1038 // amount of batches pushed to the freelist before attempting to release.
1039 if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal)
1040 if (PushedBytesDelta < Sci->AllocatedUser / 16U)
1041 return false;
1043 if (ReleaseType == ReleaseToOS::Normal) {
1044 const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
1045 if (IntervalMs < 0)
1046 return false;
1048 // The constant 8 here is selected from profiling some apps and the number
1049 // of unreleased pages in the large size classes is around 16 pages or
1050 // more. Choose half of it as a heuristic and which also avoids page
1051 // release every time for every pushBlocks() attempt by large blocks.
1052 const bool ByPassReleaseInterval =
1053 isLargeBlock(BlockSize) && PushedBytesDelta > 8 * PageSize;
1054 if (!ByPassReleaseInterval) {
1055 if (Sci->ReleaseInfo.LastReleaseAtNs +
1056 static_cast<u64>(IntervalMs) * 1000000 >
1057 getMonotonicTimeFast()) {
1058 // Memory was returned recently.
1059 return false;
1062 } // if (ReleaseType == ReleaseToOS::Normal)
1064 return true;
1067 PageReleaseContext markFreeBlocks(SizeClassInfo *Sci, const uptr ClassId,
1068 const uptr BlockSize, const uptr Base,
1069 const uptr NumberOfRegions,
1070 ReleaseToOS ReleaseType)
1071 REQUIRES(Sci->Mutex) {
1072 const uptr PageSize = getPageSizeCached();
1073 const uptr GroupSize = (1UL << GroupSizeLog);
1074 const uptr CurGroupBase =
1075 compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));
1077 PageReleaseContext Context(BlockSize, NumberOfRegions,
1078 /*ReleaseSize=*/RegionSize);
1080 auto DecompactPtr = [](CompactPtrT CompactPtr) {
1081 return reinterpret_cast<uptr>(CompactPtr);
1083 for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
1084 const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
1085 // The `GroupSize` may not be divided by `BlockSize`, which means there is
1086 // an unused space at the end of Region. Exclude that space to avoid
1087 // unused page map entry.
1088 uptr AllocatedGroupSize = GroupBase == CurGroupBase
1089 ? Sci->CurrentRegionAllocated
1090 : roundDownSlow(GroupSize, BlockSize);
1091 if (AllocatedGroupSize == 0)
1092 continue;
1094 // TransferBatches are pushed in front of BG.Batches. The first one may
1095 // not have all caches used.
1096 const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
1097 BG.Batches.front()->getCount();
1098 const uptr BytesInBG = NumBlocks * BlockSize;
1100 if (ReleaseType != ReleaseToOS::ForceAll) {
1101 if (BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
1102 BG.BytesInBGAtLastCheckpoint = BytesInBG;
1103 continue;
1106 const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
1107 if (PushedBytesDelta < PageSize)
1108 continue;
1110 // Given the randomness property, we try to release the pages only if
1111 // the bytes used by free blocks exceed certain proportion of allocated
1112 // spaces.
1113 if (isSmallBlock(BlockSize) && (BytesInBG * 100U) / AllocatedGroupSize <
1114 (100U - 1U - BlockSize / 16U)) {
1115 continue;
1119 // TODO: Consider updating this after page release if `ReleaseRecorder`
1120 // can tell the released bytes in each group.
1121 BG.BytesInBGAtLastCheckpoint = BytesInBG;
1123 const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
1124 const uptr RegionIndex = (GroupBase - Base) / RegionSize;
1126 if (NumBlocks == MaxContainedBlocks) {
1127 for (const auto &It : BG.Batches)
1128 for (u16 I = 0; I < It.getCount(); ++I)
1129 DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);
1131 const uptr To = GroupBase + AllocatedGroupSize;
1132 Context.markRangeAsAllCounted(GroupBase, To, GroupBase, RegionIndex,
1133 AllocatedGroupSize);
1134 } else {
1135 DCHECK_LT(NumBlocks, MaxContainedBlocks);
1137 // Note that we don't always visit blocks in each BatchGroup so that we
1138 // may miss the chance of releasing certain pages that cross
1139 // BatchGroups.
1140 Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase,
1141 RegionIndex, AllocatedGroupSize,
1142 /*MayContainLastBlockInRegion=*/true);
1145 // We may not be able to do the page release In a rare case that we may
1146 // fail on PageMap allocation.
1147 if (UNLIKELY(!Context.hasBlockMarked()))
1148 break;
1151 return Context;
1154 SizeClassInfo SizeClassInfoArray[NumClasses] = {};
1156 HybridMutex ByteMapMutex;
1157 // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
1158 ByteMap PossibleRegions GUARDED_BY(ByteMapMutex) = {};
1159 atomic_s32 ReleaseToOsIntervalMs = {};
1160 // Unless several threads request regions simultaneously from different size
1161 // classes, the stash rarely contains more than 1 entry.
1162 static constexpr uptr MaxStashedRegions = 4;
1163 HybridMutex RegionsStashMutex;
1164 uptr NumberOfStashedRegions GUARDED_BY(RegionsStashMutex) = 0;
1165 uptr RegionsStash[MaxStashedRegions] GUARDED_BY(RegionsStashMutex) = {};
1168 } // namespace scudo
1170 #endif // SCUDO_PRIMARY32_H_