1 //===-- primary32.h ---------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_PRIMARY32_H_
10 #define SCUDO_PRIMARY32_H_
12 #include "allocator_common.h"
16 #include "local_cache.h"
21 #include "string_utils.h"
22 #include "thread_annotations.h"
26 // SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
28 // It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
29 // boundary, and keeps a bytemap of the mappable address space to track the size
30 // class they are associated with.
32 // Mapped regions are split into equally sized Blocks according to the size
33 // class they belong to, and the associated pointers are shuffled to prevent any
34 // predictable address pattern (the predictability increases with the block
37 // Regions for size class 0 are special and used to hold TransferBatches, which
38 // allow to transfer arrays of pointers from the global size class freelist to
39 // the thread specific freelist for said class, and back.
41 // Memory used by this allocator is never unmapped but can be partially
42 // reclaimed if the platform allows for it.
44 template <typename Config
> class SizeClassAllocator32
{
46 typedef typename
Config::CompactPtrT CompactPtrT
;
47 typedef typename
Config::SizeClassMap SizeClassMap
;
48 static const uptr GroupSizeLog
= Config::getGroupSizeLog();
49 // The bytemap can only track UINT8_MAX - 1 classes.
50 static_assert(SizeClassMap::LargestClassId
<= (UINT8_MAX
- 1), "");
51 // Regions should be large enough to hold the largest Block.
52 static_assert((1UL << Config::getRegionSizeLog()) >= SizeClassMap::MaxSize
,
54 typedef SizeClassAllocator32
<Config
> ThisT
;
55 typedef SizeClassAllocatorLocalCache
<ThisT
> CacheT
;
56 typedef TransferBatch
<ThisT
> TransferBatchT
;
57 typedef BatchGroup
<ThisT
> BatchGroupT
;
59 static uptr
getSizeByClassId(uptr ClassId
) {
60 return (ClassId
== SizeClassMap::BatchClassId
)
61 ? Max(sizeof(BatchGroupT
), sizeof(TransferBatchT
))
62 : SizeClassMap::getSizeByClassId(ClassId
);
65 static bool canAllocate(uptr Size
) { return Size
<= SizeClassMap::MaxSize
; }
67 void init(s32 ReleaseToOsInterval
) NO_THREAD_SAFETY_ANALYSIS
{
69 reportError("SizeClassAllocator32 is not supported on Fuchsia");
72 reportError("SizeClassAllocator32 is not supported on Trusty");
74 DCHECK(isAligned(reinterpret_cast<uptr
>(this), alignof(ThisT
)));
75 PossibleRegions
.init();
77 const u64 Time
= getMonotonicTimeFast();
78 if (!getRandom(reinterpret_cast<void *>(&Seed
), sizeof(Seed
)))
79 Seed
= static_cast<u32
>(
80 Time
^ (reinterpret_cast<uptr
>(SizeClassInfoArray
) >> 6));
81 for (uptr I
= 0; I
< NumClasses
; I
++) {
82 SizeClassInfo
*Sci
= getSizeClassInfo(I
);
83 Sci
->RandState
= getRandomU32(&Seed
);
84 // Sci->MaxRegionIndex is already initialized to 0.
85 Sci
->MinRegionIndex
= NumRegions
;
86 Sci
->ReleaseInfo
.LastReleaseAtNs
= Time
;
89 // The default value in the primary config has the higher priority.
90 if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN
)
91 ReleaseToOsInterval
= Config::getDefaultReleaseToOsIntervalMs();
92 setOption(Option::ReleaseInterval
, static_cast<sptr
>(ReleaseToOsInterval
));
95 void unmapTestOnly() {
97 ScopedLock
L(RegionsStashMutex
);
98 while (NumberOfStashedRegions
> 0) {
99 unmap(reinterpret_cast<void *>(RegionsStash
[--NumberOfStashedRegions
]),
104 uptr MinRegionIndex
= NumRegions
, MaxRegionIndex
= 0;
105 for (uptr I
= 0; I
< NumClasses
; I
++) {
106 SizeClassInfo
*Sci
= getSizeClassInfo(I
);
107 ScopedLock
L(Sci
->Mutex
);
108 if (Sci
->MinRegionIndex
< MinRegionIndex
)
109 MinRegionIndex
= Sci
->MinRegionIndex
;
110 if (Sci
->MaxRegionIndex
> MaxRegionIndex
)
111 MaxRegionIndex
= Sci
->MaxRegionIndex
;
115 ScopedLock
L(ByteMapMutex
);
116 for (uptr I
= MinRegionIndex
; I
<= MaxRegionIndex
; I
++)
117 if (PossibleRegions
[I
])
118 unmap(reinterpret_cast<void *>(I
* RegionSize
), RegionSize
);
119 PossibleRegions
.unmapTestOnly();
122 // When all blocks are freed, it has to be the same size as `AllocatedUser`.
123 void verifyAllBlocksAreReleasedTestOnly() {
124 // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
125 uptr BatchClassUsedInFreeLists
= 0;
126 for (uptr I
= 0; I
< NumClasses
; I
++) {
127 // We have to count BatchClassUsedInFreeLists in other regions first.
128 if (I
== SizeClassMap::BatchClassId
)
130 SizeClassInfo
*Sci
= getSizeClassInfo(I
);
131 ScopedLock
L1(Sci
->Mutex
);
132 uptr TotalBlocks
= 0;
133 for (BatchGroupT
&BG
: Sci
->FreeListInfo
.BlockList
) {
134 // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
135 BatchClassUsedInFreeLists
+= BG
.Batches
.size() + 1;
136 for (const auto &It
: BG
.Batches
)
137 TotalBlocks
+= It
.getCount();
140 const uptr BlockSize
= getSizeByClassId(I
);
141 DCHECK_EQ(TotalBlocks
, Sci
->AllocatedUser
/ BlockSize
);
142 DCHECK_EQ(Sci
->FreeListInfo
.PushedBlocks
, Sci
->FreeListInfo
.PoppedBlocks
);
145 SizeClassInfo
*Sci
= getSizeClassInfo(SizeClassMap::BatchClassId
);
146 ScopedLock
L1(Sci
->Mutex
);
147 uptr TotalBlocks
= 0;
148 for (BatchGroupT
&BG
: Sci
->FreeListInfo
.BlockList
) {
149 if (LIKELY(!BG
.Batches
.empty())) {
150 for (const auto &It
: BG
.Batches
)
151 TotalBlocks
+= It
.getCount();
153 // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
159 const uptr BlockSize
= getSizeByClassId(SizeClassMap::BatchClassId
);
160 DCHECK_EQ(TotalBlocks
+ BatchClassUsedInFreeLists
,
161 Sci
->AllocatedUser
/ BlockSize
);
162 const uptr BlocksInUse
=
163 Sci
->FreeListInfo
.PoppedBlocks
- Sci
->FreeListInfo
.PushedBlocks
;
164 DCHECK_EQ(BlocksInUse
, BatchClassUsedInFreeLists
);
167 CompactPtrT
compactPtr(UNUSED uptr ClassId
, uptr Ptr
) const {
168 return static_cast<CompactPtrT
>(Ptr
);
171 void *decompactPtr(UNUSED uptr ClassId
, CompactPtrT CompactPtr
) const {
172 return reinterpret_cast<void *>(static_cast<uptr
>(CompactPtr
));
175 uptr
compactPtrGroupBase(CompactPtrT CompactPtr
) {
176 const uptr Mask
= (static_cast<uptr
>(1) << GroupSizeLog
) - 1;
177 return CompactPtr
& ~Mask
;
180 uptr
decompactGroupBase(uptr CompactPtrGroupBase
) {
181 return CompactPtrGroupBase
;
184 ALWAYS_INLINE
static bool isSmallBlock(uptr BlockSize
) {
185 const uptr PageSize
= getPageSizeCached();
186 return BlockSize
< PageSize
/ 16U;
189 ALWAYS_INLINE
static bool isLargeBlock(uptr BlockSize
) {
190 const uptr PageSize
= getPageSizeCached();
191 return BlockSize
> PageSize
;
194 u16
popBlocks(CacheT
*C
, uptr ClassId
, CompactPtrT
*ToArray
,
195 const u16 MaxBlockCount
) {
196 DCHECK_LT(ClassId
, NumClasses
);
197 SizeClassInfo
*Sci
= getSizeClassInfo(ClassId
);
198 ScopedLock
L(Sci
->Mutex
);
200 u16 PopCount
= popBlocksImpl(C
, ClassId
, Sci
, ToArray
, MaxBlockCount
);
201 if (UNLIKELY(PopCount
== 0)) {
202 if (UNLIKELY(!populateFreeList(C
, ClassId
, Sci
)))
204 PopCount
= popBlocksImpl(C
, ClassId
, Sci
, ToArray
, MaxBlockCount
);
205 DCHECK_NE(PopCount
, 0U);
211 // Push the array of free blocks to the designated batch group.
212 void pushBlocks(CacheT
*C
, uptr ClassId
, CompactPtrT
*Array
, u32 Size
) {
213 DCHECK_LT(ClassId
, NumClasses
);
216 SizeClassInfo
*Sci
= getSizeClassInfo(ClassId
);
217 if (ClassId
== SizeClassMap::BatchClassId
) {
218 ScopedLock
L(Sci
->Mutex
);
219 pushBatchClassBlocks(Sci
, Array
, Size
);
223 // TODO(chiahungduan): Consider not doing grouping if the group size is not
224 // greater than the block size with a certain scale.
226 // Sort the blocks so that blocks belonging to the same group can be pushed
228 bool SameGroup
= true;
229 for (u32 I
= 1; I
< Size
; ++I
) {
230 if (compactPtrGroupBase(Array
[I
- 1]) != compactPtrGroupBase(Array
[I
]))
232 CompactPtrT Cur
= Array
[I
];
235 compactPtrGroupBase(Cur
) < compactPtrGroupBase(Array
[J
- 1])) {
236 Array
[J
] = Array
[J
- 1];
242 ScopedLock
L(Sci
->Mutex
);
243 pushBlocksImpl(C
, ClassId
, Sci
, Array
, Size
, SameGroup
);
246 void disable() NO_THREAD_SAFETY_ANALYSIS
{
247 // The BatchClassId must be locked last since other classes can use it.
248 for (sptr I
= static_cast<sptr
>(NumClasses
) - 1; I
>= 0; I
--) {
249 if (static_cast<uptr
>(I
) == SizeClassMap::BatchClassId
)
251 getSizeClassInfo(static_cast<uptr
>(I
))->Mutex
.lock();
253 getSizeClassInfo(SizeClassMap::BatchClassId
)->Mutex
.lock();
254 RegionsStashMutex
.lock();
258 void enable() NO_THREAD_SAFETY_ANALYSIS
{
259 ByteMapMutex
.unlock();
260 RegionsStashMutex
.unlock();
261 getSizeClassInfo(SizeClassMap::BatchClassId
)->Mutex
.unlock();
262 for (uptr I
= 0; I
< NumClasses
; I
++) {
263 if (I
== SizeClassMap::BatchClassId
)
265 getSizeClassInfo(I
)->Mutex
.unlock();
269 template <typename F
> void iterateOverBlocks(F Callback
) {
270 uptr MinRegionIndex
= NumRegions
, MaxRegionIndex
= 0;
271 for (uptr I
= 0; I
< NumClasses
; I
++) {
272 SizeClassInfo
*Sci
= getSizeClassInfo(I
);
273 // TODO: The call of `iterateOverBlocks` requires disabling
274 // SizeClassAllocator32. We may consider locking each region on demand
276 Sci
->Mutex
.assertHeld();
277 if (Sci
->MinRegionIndex
< MinRegionIndex
)
278 MinRegionIndex
= Sci
->MinRegionIndex
;
279 if (Sci
->MaxRegionIndex
> MaxRegionIndex
)
280 MaxRegionIndex
= Sci
->MaxRegionIndex
;
283 // SizeClassAllocator32 is disabled, i.e., ByteMapMutex is held.
284 ByteMapMutex
.assertHeld();
286 for (uptr I
= MinRegionIndex
; I
<= MaxRegionIndex
; I
++) {
287 if (PossibleRegions
[I
] &&
288 (PossibleRegions
[I
] - 1U) != SizeClassMap::BatchClassId
) {
289 const uptr BlockSize
= getSizeByClassId(PossibleRegions
[I
] - 1U);
290 const uptr From
= I
* RegionSize
;
291 const uptr To
= From
+ (RegionSize
/ BlockSize
) * BlockSize
;
292 for (uptr Block
= From
; Block
< To
; Block
+= BlockSize
)
298 void getStats(ScopedString
*Str
) {
299 // TODO(kostyak): get the RSS per region.
300 uptr TotalMapped
= 0;
301 uptr PoppedBlocks
= 0;
302 uptr PushedBlocks
= 0;
303 for (uptr I
= 0; I
< NumClasses
; I
++) {
304 SizeClassInfo
*Sci
= getSizeClassInfo(I
);
305 ScopedLock
L(Sci
->Mutex
);
306 TotalMapped
+= Sci
->AllocatedUser
;
307 PoppedBlocks
+= Sci
->FreeListInfo
.PoppedBlocks
;
308 PushedBlocks
+= Sci
->FreeListInfo
.PushedBlocks
;
310 Str
->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
312 TotalMapped
>> 20, PoppedBlocks
, PoppedBlocks
- PushedBlocks
);
313 for (uptr I
= 0; I
< NumClasses
; I
++) {
314 SizeClassInfo
*Sci
= getSizeClassInfo(I
);
315 ScopedLock
L(Sci
->Mutex
);
316 getStats(Str
, I
, Sci
);
320 void getFragmentationInfo(ScopedString
*Str
) {
322 "Fragmentation Stats: SizeClassAllocator32: page size = %zu bytes\n",
323 getPageSizeCached());
325 for (uptr I
= 1; I
< NumClasses
; I
++) {
326 SizeClassInfo
*Sci
= getSizeClassInfo(I
);
327 ScopedLock
L(Sci
->Mutex
);
328 getSizeClassFragmentationInfo(Sci
, I
, Str
);
332 void getMemoryGroupFragmentationInfo(ScopedString
*Str
) {
333 // Each region is also a memory group because region size is the same as
335 getFragmentationInfo(Str
);
338 bool setOption(Option O
, sptr Value
) {
339 if (O
== Option::ReleaseInterval
) {
340 const s32 Interval
= Max(
341 Min(static_cast<s32
>(Value
), Config::getMaxReleaseToOsIntervalMs()),
342 Config::getMinReleaseToOsIntervalMs());
343 atomic_store_relaxed(&ReleaseToOsIntervalMs
, Interval
);
346 // Not supported by the Primary, but not an error either.
350 uptr
tryReleaseToOS(uptr ClassId
, ReleaseToOS ReleaseType
) {
351 SizeClassInfo
*Sci
= getSizeClassInfo(ClassId
);
352 // TODO: Once we have separate locks like primary64, we may consider using
353 // tryLock() as well.
354 ScopedLock
L(Sci
->Mutex
);
355 return releaseToOSMaybe(Sci
, ClassId
, ReleaseType
);
358 uptr
releaseToOS(ReleaseToOS ReleaseType
) {
359 uptr TotalReleasedBytes
= 0;
360 for (uptr I
= 0; I
< NumClasses
; I
++) {
361 if (I
== SizeClassMap::BatchClassId
)
363 SizeClassInfo
*Sci
= getSizeClassInfo(I
);
364 ScopedLock
L(Sci
->Mutex
);
365 TotalReleasedBytes
+= releaseToOSMaybe(Sci
, I
, ReleaseType
);
367 return TotalReleasedBytes
;
370 const char *getRegionInfoArrayAddress() const { return nullptr; }
371 static uptr
getRegionInfoArraySize() { return 0; }
373 static BlockInfo
findNearestBlock(UNUSED
const char *RegionInfoData
,
378 AtomicOptions Options
;
381 static const uptr NumClasses
= SizeClassMap::NumClasses
;
382 static const uptr RegionSize
= 1UL << Config::getRegionSizeLog();
383 static const uptr NumRegions
= SCUDO_MMAP_RANGE_SIZE
>>
384 Config::getRegionSizeLog();
385 static const u32 MaxNumBatches
= SCUDO_ANDROID
? 4U : 8U;
386 typedef FlatByteMap
<NumRegions
> ByteMap
;
388 struct ReleaseToOsInfo
{
389 uptr BytesInFreeListAtLastCheckpoint
;
391 uptr LastReleasedBytes
;
396 SinglyLinkedList
<BatchGroupT
> BlockList
= {};
397 uptr PoppedBlocks
= 0;
398 uptr PushedBlocks
= 0;
401 struct alignas(SCUDO_CACHE_LINE_SIZE
) SizeClassInfo
{
403 BlocksInfo FreeListInfo
GUARDED_BY(Mutex
);
404 uptr CurrentRegion
GUARDED_BY(Mutex
);
405 uptr CurrentRegionAllocated
GUARDED_BY(Mutex
);
407 uptr AllocatedUser
GUARDED_BY(Mutex
);
408 // Lowest & highest region index allocated for this size class, to avoid
409 // looping through the whole NumRegions.
410 uptr MinRegionIndex
GUARDED_BY(Mutex
);
411 uptr MaxRegionIndex
GUARDED_BY(Mutex
);
412 ReleaseToOsInfo ReleaseInfo
GUARDED_BY(Mutex
);
414 static_assert(sizeof(SizeClassInfo
) % SCUDO_CACHE_LINE_SIZE
== 0, "");
416 uptr
computeRegionId(uptr Mem
) {
417 const uptr Id
= Mem
>> Config::getRegionSizeLog();
418 CHECK_LT(Id
, NumRegions
);
422 uptr
allocateRegionSlow() {
423 uptr MapSize
= 2 * RegionSize
;
424 const uptr MapBase
= reinterpret_cast<uptr
>(
425 map(nullptr, MapSize
, "scudo:primary", MAP_ALLOWNOMEM
));
428 const uptr MapEnd
= MapBase
+ MapSize
;
429 uptr Region
= MapBase
;
430 if (isAligned(Region
, RegionSize
)) {
431 ScopedLock
L(RegionsStashMutex
);
432 if (NumberOfStashedRegions
< MaxStashedRegions
)
433 RegionsStash
[NumberOfStashedRegions
++] = MapBase
+ RegionSize
;
435 MapSize
= RegionSize
;
437 Region
= roundUp(MapBase
, RegionSize
);
438 unmap(reinterpret_cast<void *>(MapBase
), Region
- MapBase
);
439 MapSize
= RegionSize
;
441 const uptr End
= Region
+ MapSize
;
443 unmap(reinterpret_cast<void *>(End
), MapEnd
- End
);
445 DCHECK_EQ(Region
% RegionSize
, 0U);
446 static_assert(Config::getRegionSizeLog() == GroupSizeLog
,
447 "Memory group should be the same size as Region");
452 uptr
allocateRegion(SizeClassInfo
*Sci
, uptr ClassId
) REQUIRES(Sci
->Mutex
) {
453 DCHECK_LT(ClassId
, NumClasses
);
456 ScopedLock
L(RegionsStashMutex
);
457 if (NumberOfStashedRegions
> 0)
458 Region
= RegionsStash
[--NumberOfStashedRegions
];
461 Region
= allocateRegionSlow();
462 if (LIKELY(Region
)) {
463 // Sci->Mutex is held by the caller, updating the Min/Max is safe.
464 const uptr RegionIndex
= computeRegionId(Region
);
465 if (RegionIndex
< Sci
->MinRegionIndex
)
466 Sci
->MinRegionIndex
= RegionIndex
;
467 if (RegionIndex
> Sci
->MaxRegionIndex
)
468 Sci
->MaxRegionIndex
= RegionIndex
;
469 ScopedLock
L(ByteMapMutex
);
470 PossibleRegions
.set(RegionIndex
, static_cast<u8
>(ClassId
+ 1U));
475 SizeClassInfo
*getSizeClassInfo(uptr ClassId
) {
476 DCHECK_LT(ClassId
, NumClasses
);
477 return &SizeClassInfoArray
[ClassId
];
480 void pushBatchClassBlocks(SizeClassInfo
*Sci
, CompactPtrT
*Array
, u32 Size
)
481 REQUIRES(Sci
->Mutex
) {
482 DCHECK_EQ(Sci
, getSizeClassInfo(SizeClassMap::BatchClassId
));
484 // Free blocks are recorded by TransferBatch in freelist for all
485 // size-classes. In addition, TransferBatch is allocated from BatchClassId.
486 // In order not to use additional block to record the free blocks in
487 // BatchClassId, they are self-contained. I.e., A TransferBatch records the
488 // block address of itself. See the figure below:
490 // TransferBatch at 0xABCD
491 // +----------------------------+
492 // | Free blocks' addr |
493 // | +------+------+------+ |
494 // | |0xABCD|... |... | |
495 // | +------+------+------+ |
496 // +----------------------------+
498 // When we allocate all the free blocks in the TransferBatch, the block used
499 // by TransferBatch is also free for use. We don't need to recycle the
500 // TransferBatch. Note that the correctness is maintained by the invariant,
502 // Each popBlocks() request returns the entire TransferBatch. Returning
503 // part of the blocks in a TransferBatch is invalid.
505 // This ensures that TransferBatch won't leak the address itself while it's
506 // still holding other valid data.
508 // Besides, BatchGroup is also allocated from BatchClassId and has its
509 // address recorded in the TransferBatch too. To maintain the correctness,
511 // The address of BatchGroup is always recorded in the last TransferBatch
512 // in the freelist (also imply that the freelist should only be
513 // updated with push_front). Once the last TransferBatch is popped,
514 // the block used by BatchGroup is also free for use.
516 // With this approach, the blocks used by BatchGroup and TransferBatch are
517 // reusable and don't need additional space for them.
519 Sci
->FreeListInfo
.PushedBlocks
+= Size
;
520 BatchGroupT
*BG
= Sci
->FreeListInfo
.BlockList
.front();
523 // Construct `BatchGroup` on the last element.
524 BG
= reinterpret_cast<BatchGroupT
*>(
525 decompactPtr(SizeClassMap::BatchClassId
, Array
[Size
- 1]));
528 // BatchClass hasn't enabled memory group. Use `0` to indicate there's no
529 // memory group here.
530 BG
->CompactPtrGroupBase
= 0;
531 BG
->BytesInBGAtLastCheckpoint
= 0;
532 BG
->MaxCachedPerBatch
=
533 CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId
));
535 Sci
->FreeListInfo
.BlockList
.push_front(BG
);
538 if (UNLIKELY(Size
== 0))
541 // This happens under 2 cases.
542 // 1. just allocated a new `BatchGroup`.
543 // 2. Only 1 block is pushed when the freelist is empty.
544 if (BG
->Batches
.empty()) {
545 // Construct the `TransferBatch` on the last element.
546 TransferBatchT
*TB
= reinterpret_cast<TransferBatchT
*>(
547 decompactPtr(SizeClassMap::BatchClassId
, Array
[Size
- 1]));
549 // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
550 // recorded in the TransferBatch.
551 TB
->add(Array
[Size
- 1]);
553 compactPtr(SizeClassMap::BatchClassId
, reinterpret_cast<uptr
>(BG
)));
555 BG
->Batches
.push_front(TB
);
558 TransferBatchT
*CurBatch
= BG
->Batches
.front();
559 DCHECK_NE(CurBatch
, nullptr);
561 for (u32 I
= 0; I
< Size
;) {
563 static_cast<u16
>(BG
->MaxCachedPerBatch
- CurBatch
->getCount());
564 if (UnusedSlots
== 0) {
565 CurBatch
= reinterpret_cast<TransferBatchT
*>(
566 decompactPtr(SizeClassMap::BatchClassId
, Array
[I
]));
569 CurBatch
->add(Array
[I
]);
571 // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of
573 BG
->Batches
.push_front(CurBatch
);
574 UnusedSlots
= static_cast<u16
>(BG
->MaxCachedPerBatch
- 1);
576 // `UnusedSlots` is u16 so the result will be also fit in u16.
577 const u16 AppendSize
= static_cast<u16
>(Min
<u32
>(UnusedSlots
, Size
- I
));
578 CurBatch
->appendFromArray(&Array
[I
], AppendSize
);
582 // Push the blocks to their batch group. The layout will be like,
584 // FreeListInfo.BlockList - > BG -> BG -> BG
592 // Each BlockGroup(BG) will associate with unique group id and the free blocks
593 // are managed by a list of TransferBatch(TB). To reduce the time of inserting
594 // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
595 // that we can get better performance of maintaining sorted property.
596 // Use `SameGroup=true` to indicate that all blocks in the array are from the
597 // same group then we will skip checking the group id of each block.
599 // The region mutex needs to be held while calling this method.
600 void pushBlocksImpl(CacheT
*C
, uptr ClassId
, SizeClassInfo
*Sci
,
601 CompactPtrT
*Array
, u32 Size
, bool SameGroup
= false)
602 REQUIRES(Sci
->Mutex
) {
603 DCHECK_NE(ClassId
, SizeClassMap::BatchClassId
);
606 auto CreateGroup
= [&](uptr CompactPtrGroupBase
) {
608 reinterpret_cast<BatchGroupT
*>(C
->getBatchClassBlock());
611 reinterpret_cast<TransferBatchT
*>(C
->getBatchClassBlock());
614 BG
->CompactPtrGroupBase
= CompactPtrGroupBase
;
615 BG
->Batches
.push_front(TB
);
616 BG
->BytesInBGAtLastCheckpoint
= 0;
617 BG
->MaxCachedPerBatch
= TransferBatchT::MaxNumCached
;
622 auto InsertBlocks
= [&](BatchGroupT
*BG
, CompactPtrT
*Array
, u32 Size
) {
623 SinglyLinkedList
<TransferBatchT
> &Batches
= BG
->Batches
;
624 TransferBatchT
*CurBatch
= Batches
.front();
625 DCHECK_NE(CurBatch
, nullptr);
627 for (u32 I
= 0; I
< Size
;) {
628 DCHECK_GE(BG
->MaxCachedPerBatch
, CurBatch
->getCount());
630 static_cast<u16
>(BG
->MaxCachedPerBatch
- CurBatch
->getCount());
631 if (UnusedSlots
== 0) {
633 reinterpret_cast<TransferBatchT
*>(C
->getBatchClassBlock());
635 Batches
.push_front(CurBatch
);
636 UnusedSlots
= BG
->MaxCachedPerBatch
;
638 // `UnusedSlots` is u16 so the result will be also fit in u16.
639 u16 AppendSize
= static_cast<u16
>(Min
<u32
>(UnusedSlots
, Size
- I
));
640 CurBatch
->appendFromArray(&Array
[I
], AppendSize
);
645 Sci
->FreeListInfo
.PushedBlocks
+= Size
;
646 BatchGroupT
*Cur
= Sci
->FreeListInfo
.BlockList
.front();
648 // In the following, `Cur` always points to the BatchGroup for blocks that
649 // will be pushed next. `Prev` is the element right before `Cur`.
650 BatchGroupT
*Prev
= nullptr;
652 while (Cur
!= nullptr &&
653 compactPtrGroupBase(Array
[0]) > Cur
->CompactPtrGroupBase
) {
658 if (Cur
== nullptr ||
659 compactPtrGroupBase(Array
[0]) != Cur
->CompactPtrGroupBase
) {
660 Cur
= CreateGroup(compactPtrGroupBase(Array
[0]));
662 Sci
->FreeListInfo
.BlockList
.push_front(Cur
);
664 Sci
->FreeListInfo
.BlockList
.insert(Prev
, Cur
);
667 // All the blocks are from the same group, just push without checking group
670 for (u32 I
= 0; I
< Size
; ++I
)
671 DCHECK_EQ(compactPtrGroupBase(Array
[I
]), Cur
->CompactPtrGroupBase
);
673 InsertBlocks(Cur
, Array
, Size
);
677 // The blocks are sorted by group id. Determine the segment of group and
678 // push them to their group together.
680 for (u32 I
= 1; I
< Size
; ++I
) {
681 if (compactPtrGroupBase(Array
[I
- 1]) != compactPtrGroupBase(Array
[I
])) {
682 DCHECK_EQ(compactPtrGroupBase(Array
[I
- 1]), Cur
->CompactPtrGroupBase
);
683 InsertBlocks(Cur
, Array
+ I
- Count
, Count
);
685 while (Cur
!= nullptr &&
686 compactPtrGroupBase(Array
[I
]) > Cur
->CompactPtrGroupBase
) {
691 if (Cur
== nullptr ||
692 compactPtrGroupBase(Array
[I
]) != Cur
->CompactPtrGroupBase
) {
693 Cur
= CreateGroup(compactPtrGroupBase(Array
[I
]));
694 DCHECK_NE(Prev
, nullptr);
695 Sci
->FreeListInfo
.BlockList
.insert(Prev
, Cur
);
704 InsertBlocks(Cur
, Array
+ Size
- Count
, Count
);
707 u16
popBlocksImpl(CacheT
*C
, uptr ClassId
, SizeClassInfo
*Sci
,
708 CompactPtrT
*ToArray
, const u16 MaxBlockCount
)
709 REQUIRES(Sci
->Mutex
) {
710 if (Sci
->FreeListInfo
.BlockList
.empty())
713 SinglyLinkedList
<TransferBatchT
> &Batches
=
714 Sci
->FreeListInfo
.BlockList
.front()->Batches
;
716 if (Batches
.empty()) {
717 DCHECK_EQ(ClassId
, SizeClassMap::BatchClassId
);
718 BatchGroupT
*BG
= Sci
->FreeListInfo
.BlockList
.front();
719 Sci
->FreeListInfo
.BlockList
.pop_front();
721 // Block used by `BatchGroup` is from BatchClassId. Turn the block into
722 // `TransferBatch` with single block.
723 TransferBatchT
*TB
= reinterpret_cast<TransferBatchT
*>(BG
);
725 compactPtr(SizeClassMap::BatchClassId
, reinterpret_cast<uptr
>(TB
));
726 Sci
->FreeListInfo
.PoppedBlocks
+= 1;
730 // So far, instead of always filling the blocks to `MaxBlockCount`, we only
731 // examine single `TransferBatch` to minimize the time spent on the primary
732 // allocator. Besides, the sizes of `TransferBatch` and
733 // `CacheT::getMaxCached()` may also impact the time spent on accessing the
734 // primary allocator.
735 // TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount`
736 // blocks and/or adjust the size of `TransferBatch` according to
737 // `CacheT::getMaxCached()`.
738 TransferBatchT
*B
= Batches
.front();
739 DCHECK_NE(B
, nullptr);
740 DCHECK_GT(B
->getCount(), 0U);
742 // BachClassId should always take all blocks in the TransferBatch. Read the
743 // comment in `pushBatchClassBlocks()` for more details.
744 const u16 PopCount
= ClassId
== SizeClassMap::BatchClassId
746 : Min(MaxBlockCount
, B
->getCount());
747 B
->moveNToArray(ToArray
, PopCount
);
749 // TODO(chiahungduan): The deallocation of unused BatchClassId blocks can be
750 // done without holding `Mutex`.
753 // `TransferBatch` of BatchClassId is self-contained, no need to
754 // deallocate. Read the comment in `pushBatchClassBlocks()` for more
756 if (ClassId
!= SizeClassMap::BatchClassId
)
757 C
->deallocate(SizeClassMap::BatchClassId
, B
);
759 if (Batches
.empty()) {
760 BatchGroupT
*BG
= Sci
->FreeListInfo
.BlockList
.front();
761 Sci
->FreeListInfo
.BlockList
.pop_front();
763 // We don't keep BatchGroup with zero blocks to avoid empty-checking
764 // while allocating. Note that block used for constructing BatchGroup is
765 // recorded as free blocks in the last element of BatchGroup::Batches.
766 // Which means, once we pop the last TransferBatch, the block is
767 // implicitly deallocated.
768 if (ClassId
!= SizeClassMap::BatchClassId
)
769 C
->deallocate(SizeClassMap::BatchClassId
, BG
);
773 Sci
->FreeListInfo
.PoppedBlocks
+= PopCount
;
777 NOINLINE
bool populateFreeList(CacheT
*C
, uptr ClassId
, SizeClassInfo
*Sci
)
778 REQUIRES(Sci
->Mutex
) {
781 // If the size-class currently has a region associated to it, use it. The
782 // newly created blocks will be located after the currently allocated memory
783 // for that region (up to RegionSize). Otherwise, create a new region, where
784 // the new blocks will be carved from the beginning.
785 if (Sci
->CurrentRegion
) {
786 Region
= Sci
->CurrentRegion
;
787 DCHECK_GT(Sci
->CurrentRegionAllocated
, 0U);
788 Offset
= Sci
->CurrentRegionAllocated
;
790 DCHECK_EQ(Sci
->CurrentRegionAllocated
, 0U);
791 Region
= allocateRegion(Sci
, ClassId
);
792 if (UNLIKELY(!Region
))
794 C
->getStats().add(StatMapped
, RegionSize
);
795 Sci
->CurrentRegion
= Region
;
799 const uptr Size
= getSizeByClassId(ClassId
);
800 const u16 MaxCount
= CacheT::getMaxCached(Size
);
801 DCHECK_GT(MaxCount
, 0U);
802 // The maximum number of blocks we should carve in the region is dictated
803 // by the maximum number of batches we want to fill, and the amount of
804 // memory left in the current region (we use the lowest of the two). This
805 // will not be 0 as we ensure that a region can at least hold one block (via
806 // static_assert and at the end of this function).
807 const u32 NumberOfBlocks
=
808 Min(MaxNumBatches
* MaxCount
,
809 static_cast<u32
>((RegionSize
- Offset
) / Size
));
810 DCHECK_GT(NumberOfBlocks
, 0U);
812 constexpr u32 ShuffleArraySize
=
813 MaxNumBatches
* TransferBatchT::MaxNumCached
;
814 // Fill the transfer batches and put them in the size-class freelist. We
815 // need to randomize the blocks for security purposes, so we first fill a
816 // local array that we then shuffle before populating the batches.
817 CompactPtrT ShuffleArray
[ShuffleArraySize
];
818 DCHECK_LE(NumberOfBlocks
, ShuffleArraySize
);
820 uptr P
= Region
+ Offset
;
821 for (u32 I
= 0; I
< NumberOfBlocks
; I
++, P
+= Size
)
822 ShuffleArray
[I
] = reinterpret_cast<CompactPtrT
>(P
);
824 if (ClassId
!= SizeClassMap::BatchClassId
) {
826 uptr CurGroup
= compactPtrGroupBase(ShuffleArray
[0]);
827 for (u32 I
= 1; I
< NumberOfBlocks
; I
++) {
828 if (UNLIKELY(compactPtrGroupBase(ShuffleArray
[I
]) != CurGroup
)) {
829 shuffle(ShuffleArray
+ I
- N
, N
, &Sci
->RandState
);
830 pushBlocksImpl(C
, ClassId
, Sci
, ShuffleArray
+ I
- N
, N
,
833 CurGroup
= compactPtrGroupBase(ShuffleArray
[I
]);
839 shuffle(ShuffleArray
+ NumberOfBlocks
- N
, N
, &Sci
->RandState
);
840 pushBlocksImpl(C
, ClassId
, Sci
, &ShuffleArray
[NumberOfBlocks
- N
], N
,
843 pushBatchClassBlocks(Sci
, ShuffleArray
, NumberOfBlocks
);
846 // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
847 // the requests from `PushBlocks` and `PopBatch` which are external
848 // interfaces. `populateFreeList` is the internal interface so we should set
849 // the values back to avoid incorrectly setting the stats.
850 Sci
->FreeListInfo
.PushedBlocks
-= NumberOfBlocks
;
852 const uptr AllocatedUser
= Size
* NumberOfBlocks
;
853 C
->getStats().add(StatFree
, AllocatedUser
);
854 DCHECK_LE(Sci
->CurrentRegionAllocated
+ AllocatedUser
, RegionSize
);
855 // If there is not enough room in the region currently associated to fit
856 // more blocks, we deassociate the region by resetting CurrentRegion and
857 // CurrentRegionAllocated. Otherwise, update the allocated amount.
858 if (RegionSize
- (Sci
->CurrentRegionAllocated
+ AllocatedUser
) < Size
) {
859 Sci
->CurrentRegion
= 0;
860 Sci
->CurrentRegionAllocated
= 0;
862 Sci
->CurrentRegionAllocated
+= AllocatedUser
;
864 Sci
->AllocatedUser
+= AllocatedUser
;
869 void getStats(ScopedString
*Str
, uptr ClassId
, SizeClassInfo
*Sci
)
870 REQUIRES(Sci
->Mutex
) {
871 if (Sci
->AllocatedUser
== 0)
873 const uptr BlockSize
= getSizeByClassId(ClassId
);
875 Sci
->FreeListInfo
.PoppedBlocks
- Sci
->FreeListInfo
.PushedBlocks
;
876 const uptr BytesInFreeList
= Sci
->AllocatedUser
- InUse
* BlockSize
;
877 uptr PushedBytesDelta
= 0;
878 if (BytesInFreeList
>= Sci
->ReleaseInfo
.BytesInFreeListAtLastCheckpoint
) {
880 BytesInFreeList
- Sci
->ReleaseInfo
.BytesInFreeListAtLastCheckpoint
;
882 const uptr AvailableChunks
= Sci
->AllocatedUser
/ BlockSize
;
883 Str
->append(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
884 "inuse: %6zu avail: %6zu releases: %6zu last released: %6zuK "
885 "latest pushed bytes: %6zuK\n",
886 ClassId
, getSizeByClassId(ClassId
), Sci
->AllocatedUser
>> 10,
887 Sci
->FreeListInfo
.PoppedBlocks
, Sci
->FreeListInfo
.PushedBlocks
,
888 InUse
, AvailableChunks
, Sci
->ReleaseInfo
.RangesReleased
,
889 Sci
->ReleaseInfo
.LastReleasedBytes
>> 10,
890 PushedBytesDelta
>> 10);
893 void getSizeClassFragmentationInfo(SizeClassInfo
*Sci
, uptr ClassId
,
894 ScopedString
*Str
) REQUIRES(Sci
->Mutex
) {
895 const uptr BlockSize
= getSizeByClassId(ClassId
);
896 const uptr First
= Sci
->MinRegionIndex
;
897 const uptr Last
= Sci
->MaxRegionIndex
;
898 const uptr Base
= First
* RegionSize
;
899 const uptr NumberOfRegions
= Last
- First
+ 1U;
900 auto SkipRegion
= [this, First
, ClassId
](uptr RegionIndex
) {
901 ScopedLock
L(ByteMapMutex
);
902 return (PossibleRegions
[First
+ RegionIndex
] - 1U) != ClassId
;
905 FragmentationRecorder Recorder
;
906 if (!Sci
->FreeListInfo
.BlockList
.empty()) {
907 PageReleaseContext Context
=
908 markFreeBlocks(Sci
, ClassId
, BlockSize
, Base
, NumberOfRegions
,
909 ReleaseToOS::ForceAll
);
910 releaseFreeMemoryToOS(Context
, Recorder
, SkipRegion
);
913 const uptr PageSize
= getPageSizeCached();
914 const uptr TotalBlocks
= Sci
->AllocatedUser
/ BlockSize
;
915 const uptr InUseBlocks
=
916 Sci
->FreeListInfo
.PoppedBlocks
- Sci
->FreeListInfo
.PushedBlocks
;
917 uptr AllocatedPagesCount
= 0;
918 if (TotalBlocks
!= 0U) {
919 for (uptr I
= 0; I
< NumberOfRegions
; ++I
) {
922 AllocatedPagesCount
+= RegionSize
/ PageSize
;
925 DCHECK_NE(AllocatedPagesCount
, 0U);
928 DCHECK_GE(AllocatedPagesCount
, Recorder
.getReleasedPagesCount());
929 const uptr InUsePages
=
930 AllocatedPagesCount
- Recorder
.getReleasedPagesCount();
931 const uptr InUseBytes
= InUsePages
* PageSize
;
935 computePercentage(BlockSize
* InUseBlocks
, InUseBytes
, &Integral
,
937 Str
->append(" %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
938 "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n",
939 ClassId
, BlockSize
, InUseBlocks
, TotalBlocks
, InUsePages
,
940 AllocatedPagesCount
, InUseBytes
>> 10, Integral
, Fractional
);
943 NOINLINE uptr
releaseToOSMaybe(SizeClassInfo
*Sci
, uptr ClassId
,
944 ReleaseToOS ReleaseType
= ReleaseToOS::Normal
)
945 REQUIRES(Sci
->Mutex
) {
946 const uptr BlockSize
= getSizeByClassId(ClassId
);
948 DCHECK_GE(Sci
->FreeListInfo
.PoppedBlocks
, Sci
->FreeListInfo
.PushedBlocks
);
949 const uptr BytesInFreeList
=
951 (Sci
->FreeListInfo
.PoppedBlocks
- Sci
->FreeListInfo
.PushedBlocks
) *
954 if (UNLIKELY(BytesInFreeList
== 0))
957 // ====================================================================== //
958 // 1. Check if we have enough free blocks and if it's worth doing a page
960 // ====================================================================== //
961 if (ReleaseType
!= ReleaseToOS::ForceAll
&&
962 !hasChanceToReleasePages(Sci
, BlockSize
, BytesInFreeList
,
967 const uptr First
= Sci
->MinRegionIndex
;
968 const uptr Last
= Sci
->MaxRegionIndex
;
970 DCHECK_LE(First
, Last
);
971 uptr TotalReleasedBytes
= 0;
972 const uptr Base
= First
* RegionSize
;
973 const uptr NumberOfRegions
= Last
- First
+ 1U;
975 // ==================================================================== //
976 // 2. Mark the free blocks and we can tell which pages are in-use by
977 // querying `PageReleaseContext`.
978 // ==================================================================== //
979 PageReleaseContext Context
= markFreeBlocks(Sci
, ClassId
, BlockSize
, Base
,
980 NumberOfRegions
, ReleaseType
);
981 if (!Context
.hasBlockMarked())
984 // ==================================================================== //
985 // 3. Release the unused physical pages back to the OS.
986 // ==================================================================== //
987 ReleaseRecorder
Recorder(Base
);
988 auto SkipRegion
= [this, First
, ClassId
](uptr RegionIndex
) {
989 ScopedLock
L(ByteMapMutex
);
990 return (PossibleRegions
[First
+ RegionIndex
] - 1U) != ClassId
;
992 releaseFreeMemoryToOS(Context
, Recorder
, SkipRegion
);
994 if (Recorder
.getReleasedRangesCount() > 0) {
995 Sci
->ReleaseInfo
.BytesInFreeListAtLastCheckpoint
= BytesInFreeList
;
996 Sci
->ReleaseInfo
.RangesReleased
+= Recorder
.getReleasedRangesCount();
997 Sci
->ReleaseInfo
.LastReleasedBytes
= Recorder
.getReleasedBytes();
998 TotalReleasedBytes
+= Sci
->ReleaseInfo
.LastReleasedBytes
;
1000 Sci
->ReleaseInfo
.LastReleaseAtNs
= getMonotonicTimeFast();
1002 return TotalReleasedBytes
;
1005 bool hasChanceToReleasePages(SizeClassInfo
*Sci
, uptr BlockSize
,
1006 uptr BytesInFreeList
, ReleaseToOS ReleaseType
)
1007 REQUIRES(Sci
->Mutex
) {
1008 DCHECK_GE(Sci
->FreeListInfo
.PoppedBlocks
, Sci
->FreeListInfo
.PushedBlocks
);
1009 const uptr PageSize
= getPageSizeCached();
1011 if (BytesInFreeList
<= Sci
->ReleaseInfo
.BytesInFreeListAtLastCheckpoint
)
1012 Sci
->ReleaseInfo
.BytesInFreeListAtLastCheckpoint
= BytesInFreeList
;
1014 // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
1015 // so that we won't underestimate the releasable pages. For example, the
1016 // following is the region usage,
1018 // BytesInFreeListAtLastCheckpoint AllocatedUser
1020 // |--------------------------------------->
1022 // BytesInFreeList ReleaseThreshold
1024 // In general, if we have collected enough bytes and the amount of free
1025 // bytes meets the ReleaseThreshold, we will try to do page release. If we
1026 // don't update `BytesInFreeListAtLastCheckpoint` when the current
1027 // `BytesInFreeList` is smaller, we may take longer time to wait for enough
1028 // freed blocks because we miss the bytes between
1029 // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
1030 const uptr PushedBytesDelta
=
1031 BytesInFreeList
- Sci
->ReleaseInfo
.BytesInFreeListAtLastCheckpoint
;
1032 if (PushedBytesDelta
< PageSize
)
1035 // Releasing smaller blocks is expensive, so we want to make sure that a
1036 // significant amount of bytes are free, and that there has been a good
1037 // amount of batches pushed to the freelist before attempting to release.
1038 if (isSmallBlock(BlockSize
) && ReleaseType
== ReleaseToOS::Normal
)
1039 if (PushedBytesDelta
< Sci
->AllocatedUser
/ 16U)
1042 if (ReleaseType
== ReleaseToOS::Normal
) {
1043 const s32 IntervalMs
= atomic_load_relaxed(&ReleaseToOsIntervalMs
);
1047 // The constant 8 here is selected from profiling some apps and the number
1048 // of unreleased pages in the large size classes is around 16 pages or
1049 // more. Choose half of it as a heuristic and which also avoids page
1050 // release every time for every pushBlocks() attempt by large blocks.
1051 const bool ByPassReleaseInterval
=
1052 isLargeBlock(BlockSize
) && PushedBytesDelta
> 8 * PageSize
;
1053 if (!ByPassReleaseInterval
) {
1054 if (Sci
->ReleaseInfo
.LastReleaseAtNs
+
1055 static_cast<u64
>(IntervalMs
) * 1000000 >
1056 getMonotonicTimeFast()) {
1057 // Memory was returned recently.
1061 } // if (ReleaseType == ReleaseToOS::Normal)
1066 PageReleaseContext
markFreeBlocks(SizeClassInfo
*Sci
, const uptr ClassId
,
1067 const uptr BlockSize
, const uptr Base
,
1068 const uptr NumberOfRegions
,
1069 ReleaseToOS ReleaseType
)
1070 REQUIRES(Sci
->Mutex
) {
1071 const uptr PageSize
= getPageSizeCached();
1072 const uptr GroupSize
= (1UL << GroupSizeLog
);
1073 const uptr CurGroupBase
=
1074 compactPtrGroupBase(compactPtr(ClassId
, Sci
->CurrentRegion
));
1076 PageReleaseContext
Context(BlockSize
, NumberOfRegions
,
1077 /*ReleaseSize=*/RegionSize
);
1079 auto DecompactPtr
= [](CompactPtrT CompactPtr
) {
1080 return reinterpret_cast<uptr
>(CompactPtr
);
1082 for (BatchGroupT
&BG
: Sci
->FreeListInfo
.BlockList
) {
1083 const uptr GroupBase
= decompactGroupBase(BG
.CompactPtrGroupBase
);
1084 // The `GroupSize` may not be divided by `BlockSize`, which means there is
1085 // an unused space at the end of Region. Exclude that space to avoid
1086 // unused page map entry.
1087 uptr AllocatedGroupSize
= GroupBase
== CurGroupBase
1088 ? Sci
->CurrentRegionAllocated
1089 : roundDownSlow(GroupSize
, BlockSize
);
1090 if (AllocatedGroupSize
== 0)
1093 // TransferBatches are pushed in front of BG.Batches. The first one may
1094 // not have all caches used.
1095 const uptr NumBlocks
= (BG
.Batches
.size() - 1) * BG
.MaxCachedPerBatch
+
1096 BG
.Batches
.front()->getCount();
1097 const uptr BytesInBG
= NumBlocks
* BlockSize
;
1099 if (ReleaseType
!= ReleaseToOS::ForceAll
) {
1100 if (BytesInBG
<= BG
.BytesInBGAtLastCheckpoint
) {
1101 BG
.BytesInBGAtLastCheckpoint
= BytesInBG
;
1105 const uptr PushedBytesDelta
= BytesInBG
- BG
.BytesInBGAtLastCheckpoint
;
1106 if (PushedBytesDelta
< PageSize
)
1109 // Given the randomness property, we try to release the pages only if
1110 // the bytes used by free blocks exceed certain proportion of allocated
1112 if (isSmallBlock(BlockSize
) && (BytesInBG
* 100U) / AllocatedGroupSize
<
1113 (100U - 1U - BlockSize
/ 16U)) {
1118 // TODO: Consider updating this after page release if `ReleaseRecorder`
1119 // can tell the released bytes in each group.
1120 BG
.BytesInBGAtLastCheckpoint
= BytesInBG
;
1122 const uptr MaxContainedBlocks
= AllocatedGroupSize
/ BlockSize
;
1123 const uptr RegionIndex
= (GroupBase
- Base
) / RegionSize
;
1125 if (NumBlocks
== MaxContainedBlocks
) {
1126 for (const auto &It
: BG
.Batches
)
1127 for (u16 I
= 0; I
< It
.getCount(); ++I
)
1128 DCHECK_EQ(compactPtrGroupBase(It
.get(I
)), BG
.CompactPtrGroupBase
);
1130 const uptr To
= GroupBase
+ AllocatedGroupSize
;
1131 Context
.markRangeAsAllCounted(GroupBase
, To
, GroupBase
, RegionIndex
,
1132 AllocatedGroupSize
);
1134 DCHECK_LT(NumBlocks
, MaxContainedBlocks
);
1136 // Note that we don't always visit blocks in each BatchGroup so that we
1137 // may miss the chance of releasing certain pages that cross
1139 Context
.markFreeBlocksInRegion(BG
.Batches
, DecompactPtr
, GroupBase
,
1140 RegionIndex
, AllocatedGroupSize
,
1141 /*MayContainLastBlockInRegion=*/true);
1144 // We may not be able to do the page release In a rare case that we may
1145 // fail on PageMap allocation.
1146 if (UNLIKELY(!Context
.hasBlockMarked()))
1153 SizeClassInfo SizeClassInfoArray
[NumClasses
] = {};
1155 HybridMutex ByteMapMutex
;
1156 // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
1157 ByteMap PossibleRegions
GUARDED_BY(ByteMapMutex
) = {};
1158 atomic_s32 ReleaseToOsIntervalMs
= {};
1159 // Unless several threads request regions simultaneously from different size
1160 // classes, the stash rarely contains more than 1 entry.
1161 static constexpr uptr MaxStashedRegions
= 4;
1162 HybridMutex RegionsStashMutex
;
1163 uptr NumberOfStashedRegions
GUARDED_BY(RegionsStashMutex
) = 0;
1164 uptr RegionsStash
[MaxStashedRegions
] GUARDED_BY(RegionsStashMutex
) = {};
1167 } // namespace scudo
1169 #endif // SCUDO_PRIMARY32_H_