1 //===-- release.h -----------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_RELEASE_H_
10 #define SCUDO_RELEASE_H_
16 #include "thread_annotations.h"
20 template <typename MemMapT
> class RegionReleaseRecorder
{
22 RegionReleaseRecorder(MemMapT
*RegionMemMap
, uptr Base
, uptr Offset
= 0)
23 : RegionMemMap(RegionMemMap
), Base(Base
), Offset(Offset
) {}
25 uptr
getReleasedRangesCount() const { return ReleasedRangesCount
; }
27 uptr
getReleasedBytes() const { return ReleasedBytes
; }
29 uptr
getBase() const { return Base
; }
31 // Releases [From, To) range of pages back to OS. Note that `From` and `To`
32 // are offseted from `Base` + Offset.
33 void releasePageRangeToOS(uptr From
, uptr To
) {
34 const uptr Size
= To
- From
;
35 RegionMemMap
->releasePagesToOS(getBase() + Offset
+ From
, Size
);
36 ReleasedRangesCount
++;
37 ReleasedBytes
+= Size
;
41 uptr ReleasedRangesCount
= 0;
42 uptr ReleasedBytes
= 0;
43 MemMapT
*RegionMemMap
= nullptr;
45 // The release offset from Base. This is used when we know a given range after
46 // Base will not be released.
50 class ReleaseRecorder
{
52 ReleaseRecorder(uptr Base
, uptr Offset
= 0, MapPlatformData
*Data
= nullptr)
53 : Base(Base
), Offset(Offset
), Data(Data
) {}
55 uptr
getReleasedRangesCount() const { return ReleasedRangesCount
; }
57 uptr
getReleasedBytes() const { return ReleasedBytes
; }
59 uptr
getBase() const { return Base
; }
61 // Releases [From, To) range of pages back to OS.
62 void releasePageRangeToOS(uptr From
, uptr To
) {
63 const uptr Size
= To
- From
;
64 releasePagesToOS(Base
, From
+ Offset
, Size
, Data
);
65 ReleasedRangesCount
++;
66 ReleasedBytes
+= Size
;
70 uptr ReleasedRangesCount
= 0;
71 uptr ReleasedBytes
= 0;
72 // The starting address to release. Note that we may want to combine (Base +
73 // Offset) as a new Base. However, the Base is retrieved from
74 // `MapPlatformData` on Fuchsia, which means the offset won't be aware.
75 // Therefore, store them separately to make it work on all the platforms.
77 // The release offset from Base. This is used when we know a given range after
78 // Base will not be released.
80 MapPlatformData
*Data
= nullptr;
83 class FragmentationRecorder
{
85 FragmentationRecorder() = default;
87 uptr
getReleasedPagesCount() const { return ReleasedPagesCount
; }
89 void releasePageRangeToOS(uptr From
, uptr To
) {
90 DCHECK_EQ((To
- From
) % getPageSizeCached(), 0U);
91 ReleasedPagesCount
+= (To
- From
) >> getPageSizeLogCached();
95 uptr ReleasedPagesCount
= 0;
98 template <uptr GroupSize
, uptr NumGroups
>
99 class MemoryGroupFragmentationRecorder
{
101 const uptr NumPagesInOneGroup
= GroupSize
/ getPageSizeCached();
103 void releasePageRangeToOS(uptr From
, uptr To
) {
104 for (uptr I
= From
/ getPageSizeCached(); I
< To
/ getPageSizeCached(); ++I
)
105 ++FreePagesCount
[I
/ NumPagesInOneGroup
];
108 uptr
getNumFreePages(uptr GroupId
) { return FreePagesCount
[GroupId
]; }
111 uptr FreePagesCount
[NumGroups
] = {};
114 // A buffer pool which holds a fixed number of static buffers of `uptr` elements
115 // for fast buffer allocation. If the request size is greater than
116 // `StaticBufferNumElements` or if all the static buffers are in use, it'll
117 // delegate the allocation to map().
118 template <uptr StaticBufferCount
, uptr StaticBufferNumElements
>
121 // Preserve 1 bit in the `Mask` so that we don't need to do zero-check while
122 // extracting the least significant bit from the `Mask`.
123 static_assert(StaticBufferCount
< SCUDO_WORDSIZE
, "");
124 static_assert(isAligned(StaticBufferNumElements
* sizeof(uptr
),
125 SCUDO_CACHE_LINE_SIZE
),
129 // Pointer to the buffer's memory, or nullptr if no buffer was allocated.
130 uptr
*Data
= nullptr;
132 // The index of the underlying static buffer, or StaticBufferCount if this
133 // buffer was dynamically allocated. This value is initially set to a poison
134 // value to aid debugging.
135 uptr BufferIndex
= ~static_cast<uptr
>(0);
137 // Only valid if BufferIndex == StaticBufferCount.
141 // Return a zero-initialized buffer which can contain at least the given
142 // number of elements, or nullptr on failure.
143 Buffer
getBuffer(const uptr NumElements
) {
144 if (UNLIKELY(NumElements
> StaticBufferNumElements
))
145 return getDynamicBuffer(NumElements
);
149 // TODO: In general, we expect this operation should be fast so the
150 // waiting thread won't be put into sleep. The HybridMutex does implement
151 // the busy-waiting but we may want to review the performance and see if
152 // we need an explict spin lock here.
154 index
= getLeastSignificantSetBitIndex(Mask
);
155 if (index
< StaticBufferCount
)
156 Mask
^= static_cast<uptr
>(1) << index
;
159 if (index
>= StaticBufferCount
)
160 return getDynamicBuffer(NumElements
);
163 Buf
.Data
= &RawBuffer
[index
* StaticBufferNumElements
];
164 Buf
.BufferIndex
= index
;
165 memset(Buf
.Data
, 0, StaticBufferNumElements
* sizeof(uptr
));
169 void releaseBuffer(Buffer Buf
) {
170 DCHECK_NE(Buf
.Data
, nullptr);
171 DCHECK_LE(Buf
.BufferIndex
, StaticBufferCount
);
172 if (Buf
.BufferIndex
!= StaticBufferCount
) {
174 DCHECK_EQ((Mask
& (static_cast<uptr
>(1) << Buf
.BufferIndex
)), 0U);
175 Mask
|= static_cast<uptr
>(1) << Buf
.BufferIndex
;
181 bool isStaticBufferTestOnly(const Buffer
&Buf
) {
182 DCHECK_NE(Buf
.Data
, nullptr);
183 DCHECK_LE(Buf
.BufferIndex
, StaticBufferCount
);
184 return Buf
.BufferIndex
!= StaticBufferCount
;
188 Buffer
getDynamicBuffer(const uptr NumElements
) {
189 // When using a heap-based buffer, precommit the pages backing the
190 // Vmar by passing |MAP_PRECOMMIT| flag. This allows an optimization
191 // where page fault exceptions are skipped as the allocated memory
192 // is accessed. So far, this is only enabled on Fuchsia. It hasn't proven a
193 // performance benefit on other platforms.
194 const uptr MmapFlags
= MAP_ALLOWNOMEM
| (SCUDO_FUCHSIA
? MAP_PRECOMMIT
: 0);
195 const uptr MappedSize
=
196 roundUp(NumElements
* sizeof(uptr
), getPageSizeCached());
198 if (Buf
.MemMap
.map(/*Addr=*/0, MappedSize
, "scudo:counters", MmapFlags
)) {
199 Buf
.Data
= reinterpret_cast<uptr
*>(Buf
.MemMap
.getBase());
200 Buf
.BufferIndex
= StaticBufferCount
;
206 // '1' means that buffer index is not used. '0' means the buffer is in use.
207 uptr Mask
GUARDED_BY(Mutex
) = ~static_cast<uptr
>(0);
208 uptr RawBuffer
[StaticBufferCount
* StaticBufferNumElements
] GUARDED_BY(Mutex
);
211 // A Region page map is used to record the usage of pages in the regions. It
212 // implements a packed array of Counters. Each counter occupies 2^N bits, enough
213 // to store counter's MaxValue. Ctor will try to use a static buffer first, and
214 // if that fails (the buffer is too small or already locked), will allocate the
215 // required Buffer via map(). The caller is expected to check whether the
216 // initialization was successful by checking isAllocated() result. For
217 // performance sake, none of the accessors check the validity of the arguments,
218 // It is assumed that Index is always in [0, N) range and the value is not
219 // incremented past MaxValue.
220 class RegionPageMap
{
223 : Regions(0), NumCounters(0), CounterSizeBitsLog(0), CounterMask(0),
224 PackingRatioLog(0), BitOffsetMask(0), SizePerRegion(0),
225 BufferNumElements(0) {}
226 RegionPageMap(uptr NumberOfRegions
, uptr CountersPerRegion
, uptr MaxValue
) {
227 reset(NumberOfRegions
, CountersPerRegion
, MaxValue
);
232 Buffers
.releaseBuffer(Buffer
);
236 // Lock of `StaticBuffer` is acquired conditionally and there's no easy way to
237 // specify the thread-safety attribute properly in current code structure.
238 // Besides, it's the only place we may want to check thread safety. Therefore,
239 // it's fine to bypass the thread-safety analysis now.
240 void reset(uptr NumberOfRegion
, uptr CountersPerRegion
, uptr MaxValue
) {
241 DCHECK_GT(NumberOfRegion
, 0);
242 DCHECK_GT(CountersPerRegion
, 0);
243 DCHECK_GT(MaxValue
, 0);
245 Regions
= NumberOfRegion
;
246 NumCounters
= CountersPerRegion
;
248 constexpr uptr MaxCounterBits
= sizeof(*Buffer
.Data
) * 8UL;
249 // Rounding counter storage size up to the power of two allows for using
250 // bit shifts calculating particular counter's Index and offset.
251 const uptr CounterSizeBits
=
252 roundUpPowerOfTwo(getMostSignificantSetBitIndex(MaxValue
) + 1);
253 DCHECK_LE(CounterSizeBits
, MaxCounterBits
);
254 CounterSizeBitsLog
= getLog2(CounterSizeBits
);
255 CounterMask
= ~(static_cast<uptr
>(0)) >> (MaxCounterBits
- CounterSizeBits
);
257 const uptr PackingRatio
= MaxCounterBits
>> CounterSizeBitsLog
;
258 DCHECK_GT(PackingRatio
, 0);
259 PackingRatioLog
= getLog2(PackingRatio
);
260 BitOffsetMask
= PackingRatio
- 1;
263 roundUp(NumCounters
, static_cast<uptr
>(1U) << PackingRatioLog
) >>
265 BufferNumElements
= SizePerRegion
* Regions
;
266 Buffer
= Buffers
.getBuffer(BufferNumElements
);
269 bool isAllocated() const { return Buffer
.Data
!= nullptr; }
271 uptr
getCount() const { return NumCounters
; }
273 uptr
get(uptr Region
, uptr I
) const {
274 DCHECK_LT(Region
, Regions
);
275 DCHECK_LT(I
, NumCounters
);
276 const uptr Index
= I
>> PackingRatioLog
;
277 const uptr BitOffset
= (I
& BitOffsetMask
) << CounterSizeBitsLog
;
278 return (Buffer
.Data
[Region
* SizePerRegion
+ Index
] >> BitOffset
) &
282 void inc(uptr Region
, uptr I
) const {
283 DCHECK_LT(get(Region
, I
), CounterMask
);
284 const uptr Index
= I
>> PackingRatioLog
;
285 const uptr BitOffset
= (I
& BitOffsetMask
) << CounterSizeBitsLog
;
286 DCHECK_LT(BitOffset
, SCUDO_WORDSIZE
);
287 DCHECK_EQ(isAllCounted(Region
, I
), false);
288 Buffer
.Data
[Region
* SizePerRegion
+ Index
] += static_cast<uptr
>(1U)
292 void incN(uptr Region
, uptr I
, uptr N
) const {
294 DCHECK_LE(N
, CounterMask
);
295 DCHECK_LE(get(Region
, I
), CounterMask
- N
);
296 const uptr Index
= I
>> PackingRatioLog
;
297 const uptr BitOffset
= (I
& BitOffsetMask
) << CounterSizeBitsLog
;
298 DCHECK_LT(BitOffset
, SCUDO_WORDSIZE
);
299 DCHECK_EQ(isAllCounted(Region
, I
), false);
300 Buffer
.Data
[Region
* SizePerRegion
+ Index
] += N
<< BitOffset
;
303 void incRange(uptr Region
, uptr From
, uptr To
) const {
305 const uptr Top
= Min(To
+ 1, NumCounters
);
306 for (uptr I
= From
; I
< Top
; I
++)
310 // Set the counter to the max value. Note that the max number of blocks in a
311 // page may vary. To provide an easier way to tell if all the blocks are
312 // counted for different pages, set to the same max value to denote the
313 // all-counted status.
314 void setAsAllCounted(uptr Region
, uptr I
) const {
315 DCHECK_LE(get(Region
, I
), CounterMask
);
316 const uptr Index
= I
>> PackingRatioLog
;
317 const uptr BitOffset
= (I
& BitOffsetMask
) << CounterSizeBitsLog
;
318 DCHECK_LT(BitOffset
, SCUDO_WORDSIZE
);
319 Buffer
.Data
[Region
* SizePerRegion
+ Index
] |= CounterMask
<< BitOffset
;
321 void setAsAllCountedRange(uptr Region
, uptr From
, uptr To
) const {
323 const uptr Top
= Min(To
+ 1, NumCounters
);
324 for (uptr I
= From
; I
< Top
; I
++)
325 setAsAllCounted(Region
, I
);
328 bool updateAsAllCountedIf(uptr Region
, uptr I
, uptr MaxCount
) {
329 const uptr Count
= get(Region
, I
);
330 if (Count
== CounterMask
)
332 if (Count
== MaxCount
) {
333 setAsAllCounted(Region
, I
);
338 bool isAllCounted(uptr Region
, uptr I
) const {
339 return get(Region
, I
) == CounterMask
;
342 uptr
getBufferNumElements() const { return BufferNumElements
; }
345 // We may consider making this configurable if there are cases which may
346 // benefit from this.
347 static const uptr StaticBufferCount
= 2U;
348 static const uptr StaticBufferNumElements
= 512U;
349 using BufferPoolT
= BufferPool
<StaticBufferCount
, StaticBufferNumElements
>;
350 static BufferPoolT Buffers
;
354 uptr CounterSizeBitsLog
;
356 uptr PackingRatioLog
;
360 uptr BufferNumElements
;
361 BufferPoolT::Buffer Buffer
;
364 template <class ReleaseRecorderT
> class FreePagesRangeTracker
{
366 explicit FreePagesRangeTracker(ReleaseRecorderT
&Recorder
)
367 : Recorder(Recorder
) {}
369 void processNextPage(bool Released
) {
372 CurrentRangeStatePage
= CurrentPage
;
381 void skipPages(uptr N
) {
386 void finish() { closeOpenedRange(); }
389 void closeOpenedRange() {
391 const uptr PageSizeLog
= getPageSizeLogCached();
392 Recorder
.releasePageRangeToOS((CurrentRangeStatePage
<< PageSizeLog
),
393 (CurrentPage
<< PageSizeLog
));
398 ReleaseRecorderT
&Recorder
;
399 bool InRange
= false;
400 uptr CurrentPage
= 0;
401 uptr CurrentRangeStatePage
= 0;
404 struct PageReleaseContext
{
405 PageReleaseContext(uptr BlockSize
, uptr NumberOfRegions
, uptr ReleaseSize
,
406 uptr ReleaseOffset
= 0)
407 : BlockSize(BlockSize
), NumberOfRegions(NumberOfRegions
) {
408 const uptr PageSize
= getPageSizeCached();
409 if (BlockSize
<= PageSize
) {
410 if (PageSize
% BlockSize
== 0) {
411 // Same number of chunks per page, no cross overs.
412 FullPagesBlockCountMax
= PageSize
/ BlockSize
;
413 SameBlockCountPerPage
= true;
414 } else if (BlockSize
% (PageSize
% BlockSize
) == 0) {
415 // Some chunks are crossing page boundaries, which means that the page
416 // contains one or two partial chunks, but all pages contain the same
418 FullPagesBlockCountMax
= PageSize
/ BlockSize
+ 1;
419 SameBlockCountPerPage
= true;
421 // Some chunks are crossing page boundaries, which means that the page
422 // contains one or two partial chunks.
423 FullPagesBlockCountMax
= PageSize
/ BlockSize
+ 2;
424 SameBlockCountPerPage
= false;
427 if ((BlockSize
& (PageSize
- 1)) == 0) {
428 // One chunk covers multiple pages, no cross overs.
429 FullPagesBlockCountMax
= 1;
430 SameBlockCountPerPage
= true;
432 // One chunk covers multiple pages, Some chunks are crossing page
433 // boundaries. Some pages contain one chunk, some contain two.
434 FullPagesBlockCountMax
= 2;
435 SameBlockCountPerPage
= false;
439 // TODO: For multiple regions, it's more complicated to support partial
440 // region marking (which includes the complexity of how to handle the last
441 // block in a region). We may consider this after markFreeBlocks() accepts
442 // only free blocks from the same region.
443 if (NumberOfRegions
!= 1)
444 DCHECK_EQ(ReleaseOffset
, 0U);
446 const uptr PageSizeLog
= getPageSizeLogCached();
447 PagesCount
= roundUp(ReleaseSize
, PageSize
) >> PageSizeLog
;
448 ReleasePageOffset
= ReleaseOffset
>> PageSizeLog
;
451 // PageMap is lazily allocated when markFreeBlocks() is invoked.
452 bool hasBlockMarked() const {
453 return PageMap
.isAllocated();
456 bool ensurePageMapAllocated() {
457 if (PageMap
.isAllocated())
459 PageMap
.reset(NumberOfRegions
, PagesCount
, FullPagesBlockCountMax
);
460 // TODO: Log some message when we fail on PageMap allocation.
461 return PageMap
.isAllocated();
464 // Mark all the blocks in the given range [From, to). Instead of visiting all
465 // the blocks, we will just mark the page as all counted. Note the `From` and
466 // `To` has to be page aligned but with one exception, if `To` is equal to the
467 // RegionSize, it's not necessary to be aligned with page size.
468 bool markRangeAsAllCounted(uptr From
, uptr To
, uptr Base
,
469 const uptr RegionIndex
, const uptr RegionSize
) {
470 const uptr PageSize
= getPageSizeCached();
472 DCHECK_LE(To
, Base
+ RegionSize
);
473 DCHECK_EQ(From
% PageSize
, 0U);
474 DCHECK_LE(To
- From
, RegionSize
);
476 if (!ensurePageMapAllocated())
479 uptr FromInRegion
= From
- Base
;
480 uptr ToInRegion
= To
- Base
;
481 uptr FirstBlockInRange
= roundUpSlow(FromInRegion
, BlockSize
);
483 // The straddling block sits across entire range.
484 if (FirstBlockInRange
>= ToInRegion
)
487 // First block may not sit at the first pape in the range, move
488 // `FromInRegion` to the first block page.
489 FromInRegion
= roundDown(FirstBlockInRange
, PageSize
);
491 // When The first block is not aligned to the range boundary, which means
492 // there is a block sitting acorss `From`, that looks like,
496 // +-----------------------------------------------+
497 // +-----+-----+-----+-----+
499 // +-----+-----+-----+-----+
500 // |- first page -||- second page -||- ...
502 // Therefore, we can't just mark the first page as all counted. Instead, we
503 // increment the number of blocks in the first page in the page map and
504 // then round up the `From` to the next page.
505 if (FirstBlockInRange
!= FromInRegion
) {
506 DCHECK_GT(FromInRegion
+ PageSize
, FirstBlockInRange
);
507 uptr NumBlocksInFirstPage
=
508 (FromInRegion
+ PageSize
- FirstBlockInRange
+ BlockSize
- 1) /
510 PageMap
.incN(RegionIndex
, getPageIndex(FromInRegion
),
511 NumBlocksInFirstPage
);
512 FromInRegion
= roundUp(FromInRegion
+ 1, PageSize
);
515 uptr LastBlockInRange
= roundDownSlow(ToInRegion
- 1, BlockSize
);
517 // Note that LastBlockInRange may be smaller than `FromInRegion` at this
518 // point because it may contain only one block in the range.
520 // When the last block sits across `To`, we can't just mark the pages
521 // occupied by the last block as all counted. Instead, we increment the
522 // counters of those pages by 1. The exception is that if it's the last
523 // block in the region, it's fine to mark those pages as all counted.
524 if (LastBlockInRange
+ BlockSize
!= RegionSize
) {
525 DCHECK_EQ(ToInRegion
% PageSize
, 0U);
526 // The case below is like,
530 // +----------------------------------------+
531 // +-----+-----+-----+-----+
533 // +-----+-----+-----+-----+
534 // ... -||- last page -||- next page -|
536 // The last block is not aligned to `To`, we need to increment the
537 // counter of `next page` by 1.
538 if (LastBlockInRange
+ BlockSize
!= ToInRegion
) {
539 PageMap
.incRange(RegionIndex
, getPageIndex(ToInRegion
),
540 getPageIndex(LastBlockInRange
+ BlockSize
- 1));
543 ToInRegion
= RegionSize
;
546 // After handling the first page and the last block, it's safe to mark any
547 // page in between the range [From, To).
548 if (FromInRegion
< ToInRegion
) {
549 PageMap
.setAsAllCountedRange(RegionIndex
, getPageIndex(FromInRegion
),
550 getPageIndex(ToInRegion
- 1));
556 template <class TransferBatchT
, typename DecompactPtrT
>
557 bool markFreeBlocksInRegion(const IntrusiveList
<TransferBatchT
> &FreeList
,
558 DecompactPtrT DecompactPtr
, const uptr Base
,
559 const uptr RegionIndex
, const uptr RegionSize
,
560 bool MayContainLastBlockInRegion
) {
561 if (!ensurePageMapAllocated())
564 const uptr PageSize
= getPageSizeCached();
565 if (MayContainLastBlockInRegion
) {
566 const uptr LastBlockInRegion
=
567 ((RegionSize
/ BlockSize
) - 1U) * BlockSize
;
568 // The last block in a region may not use the entire page, we mark the
569 // following "pretend" memory block(s) as free in advance.
573 // -----+-----------------------+
574 // | Last Page | <- Rounded Region Boundary
575 // -----+-----------------------+
576 // |-----||- trailing blocks -|
579 const uptr RoundedRegionSize
= roundUp(RegionSize
, PageSize
);
580 const uptr TrailingBlockBase
= LastBlockInRegion
+ BlockSize
;
581 // If the difference between `RoundedRegionSize` and
582 // `TrailingBlockBase` is larger than a page, that implies the reported
583 // `RegionSize` may not be accurate.
584 DCHECK_LT(RoundedRegionSize
- TrailingBlockBase
, PageSize
);
586 // Only the last page touched by the last block needs to mark the trailing
587 // blocks. Note that if the last "pretend" block straddles the boundary,
588 // we still have to count it in so that the logic of counting the number
589 // of blocks on a page is consistent.
590 uptr NumTrailingBlocks
=
591 (roundUpSlow(RoundedRegionSize
- TrailingBlockBase
, BlockSize
) +
594 if (NumTrailingBlocks
> 0) {
595 PageMap
.incN(RegionIndex
, getPageIndex(TrailingBlockBase
),
600 // Iterate over free chunks and count how many free chunks affect each
602 if (BlockSize
<= PageSize
&& PageSize
% BlockSize
== 0) {
603 // Each chunk affects one page only.
604 for (const auto &It
: FreeList
) {
605 for (u16 I
= 0; I
< It
.getCount(); I
++) {
606 const uptr PInRegion
= DecompactPtr(It
.get(I
)) - Base
;
607 DCHECK_LT(PInRegion
, RegionSize
);
608 PageMap
.inc(RegionIndex
, getPageIndex(PInRegion
));
612 // In all other cases chunks might affect more than one page.
613 DCHECK_GE(RegionSize
, BlockSize
);
614 for (const auto &It
: FreeList
) {
615 for (u16 I
= 0; I
< It
.getCount(); I
++) {
616 const uptr PInRegion
= DecompactPtr(It
.get(I
)) - Base
;
617 PageMap
.incRange(RegionIndex
, getPageIndex(PInRegion
),
618 getPageIndex(PInRegion
+ BlockSize
- 1));
626 uptr
getPageIndex(uptr P
) {
627 return (P
>> getPageSizeLogCached()) - ReleasePageOffset
;
629 uptr
getReleaseOffset() {
630 return ReleasePageOffset
<< getPageSizeLogCached();
634 uptr NumberOfRegions
;
635 // For partial region marking, some pages in front are not needed to be
637 uptr ReleasePageOffset
;
639 uptr FullPagesBlockCountMax
;
640 bool SameBlockCountPerPage
;
641 RegionPageMap PageMap
;
644 // Try to release the page which doesn't have any in-used block, i.e., they are
645 // all free blocks. The `PageMap` will record the number of free blocks in each
647 template <class ReleaseRecorderT
, typename SkipRegionT
>
649 releaseFreeMemoryToOS(PageReleaseContext
&Context
,
650 ReleaseRecorderT
&Recorder
, SkipRegionT SkipRegion
) {
651 const uptr PageSize
= getPageSizeCached();
652 const uptr BlockSize
= Context
.BlockSize
;
653 const uptr PagesCount
= Context
.PagesCount
;
654 const uptr NumberOfRegions
= Context
.NumberOfRegions
;
655 const uptr ReleasePageOffset
= Context
.ReleasePageOffset
;
656 const uptr FullPagesBlockCountMax
= Context
.FullPagesBlockCountMax
;
657 const bool SameBlockCountPerPage
= Context
.SameBlockCountPerPage
;
658 RegionPageMap
&PageMap
= Context
.PageMap
;
660 // Iterate over pages detecting ranges of pages with chunk Counters equal
661 // to the expected number of chunks for the particular page.
662 FreePagesRangeTracker
<ReleaseRecorderT
> RangeTracker(Recorder
);
663 if (SameBlockCountPerPage
) {
664 // Fast path, every page has the same number of chunks affecting it.
665 for (uptr I
= 0; I
< NumberOfRegions
; I
++) {
667 RangeTracker
.skipPages(PagesCount
);
670 for (uptr J
= 0; J
< PagesCount
; J
++) {
671 const bool CanRelease
=
672 PageMap
.updateAsAllCountedIf(I
, J
, FullPagesBlockCountMax
);
673 RangeTracker
.processNextPage(CanRelease
);
677 // Slow path, go through the pages keeping count how many chunks affect
679 const uptr Pn
= BlockSize
< PageSize
? PageSize
/ BlockSize
: 1;
680 const uptr Pnc
= Pn
* BlockSize
;
681 // The idea is to increment the current page pointer by the first chunk
682 // size, middle portion size (the portion of the page covered by chunks
683 // except the first and the last one) and then the last chunk size, adding
684 // up the number of chunks on the current page and checking on every step
685 // whether the page boundary was crossed.
686 for (uptr I
= 0; I
< NumberOfRegions
; I
++) {
688 RangeTracker
.skipPages(PagesCount
);
691 uptr PrevPageBoundary
= 0;
692 uptr CurrentBoundary
= 0;
693 if (ReleasePageOffset
> 0) {
694 PrevPageBoundary
= ReleasePageOffset
<< getPageSizeLogCached();
695 CurrentBoundary
= roundUpSlow(PrevPageBoundary
, BlockSize
);
697 for (uptr J
= 0; J
< PagesCount
; J
++) {
698 const uptr PageBoundary
= PrevPageBoundary
+ PageSize
;
699 uptr BlocksPerPage
= Pn
;
700 if (CurrentBoundary
< PageBoundary
) {
701 if (CurrentBoundary
> PrevPageBoundary
)
703 CurrentBoundary
+= Pnc
;
704 if (CurrentBoundary
< PageBoundary
) {
706 CurrentBoundary
+= BlockSize
;
709 PrevPageBoundary
= PageBoundary
;
710 const bool CanRelease
=
711 PageMap
.updateAsAllCountedIf(I
, J
, BlocksPerPage
);
712 RangeTracker
.processNextPage(CanRelease
);
716 RangeTracker
.finish();
721 #endif // SCUDO_RELEASE_H_