1 //===-- release.h -----------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_RELEASE_H_
10 #define SCUDO_RELEASE_H_
16 #include "thread_annotations.h"
20 template <typename MemMapT
> class RegionReleaseRecorder
{
22 RegionReleaseRecorder(MemMapT
*RegionMemMap
, uptr Base
, uptr Offset
= 0)
23 : RegionMemMap(RegionMemMap
), Base(Base
), Offset(Offset
) {}
25 uptr
getReleasedRangesCount() const { return ReleasedRangesCount
; }
27 uptr
getReleasedBytes() const { return ReleasedBytes
; }
29 uptr
getBase() const { return Base
; }
31 // Releases [From, To) range of pages back to OS. Note that `From` and `To`
32 // are offseted from `Base` + Offset.
33 void releasePageRangeToOS(uptr From
, uptr To
) {
34 const uptr Size
= To
- From
;
35 RegionMemMap
->releasePagesToOS(getBase() + Offset
+ From
, Size
);
36 ReleasedRangesCount
++;
37 ReleasedBytes
+= Size
;
41 uptr ReleasedRangesCount
= 0;
42 uptr ReleasedBytes
= 0;
43 MemMapT
*RegionMemMap
= nullptr;
45 // The release offset from Base. This is used when we know a given range after
46 // Base will not be released.
50 class ReleaseRecorder
{
52 ReleaseRecorder(uptr Base
, uptr Offset
= 0, MapPlatformData
*Data
= nullptr)
53 : Base(Base
), Offset(Offset
), Data(Data
) {}
55 uptr
getReleasedRangesCount() const { return ReleasedRangesCount
; }
57 uptr
getReleasedBytes() const { return ReleasedBytes
; }
59 uptr
getBase() const { return Base
; }
61 // Releases [From, To) range of pages back to OS.
62 void releasePageRangeToOS(uptr From
, uptr To
) {
63 const uptr Size
= To
- From
;
64 releasePagesToOS(Base
, From
+ Offset
, Size
, Data
);
65 ReleasedRangesCount
++;
66 ReleasedBytes
+= Size
;
70 uptr ReleasedRangesCount
= 0;
71 uptr ReleasedBytes
= 0;
72 // The starting address to release. Note that we may want to combine (Base +
73 // Offset) as a new Base. However, the Base is retrieved from
74 // `MapPlatformData` on Fuchsia, which means the offset won't be aware.
75 // Therefore, store them separately to make it work on all the platforms.
77 // The release offset from Base. This is used when we know a given range after
78 // Base will not be released.
80 MapPlatformData
*Data
= nullptr;
83 class FragmentationRecorder
{
85 FragmentationRecorder() = default;
87 uptr
getReleasedPagesCount() const { return ReleasedPagesCount
; }
89 void releasePageRangeToOS(uptr From
, uptr To
) {
90 DCHECK_EQ((To
- From
) % getPageSizeCached(), 0U);
91 ReleasedPagesCount
+= (To
- From
) / getPageSizeCached();
95 uptr ReleasedPagesCount
= 0;
98 // A buffer pool which holds a fixed number of static buffers of `uptr` elements
99 // for fast buffer allocation. If the request size is greater than
100 // `StaticBufferNumElements` or if all the static buffers are in use, it'll
101 // delegate the allocation to map().
102 template <uptr StaticBufferCount
, uptr StaticBufferNumElements
>
105 // Preserve 1 bit in the `Mask` so that we don't need to do zero-check while
106 // extracting the least significant bit from the `Mask`.
107 static_assert(StaticBufferCount
< SCUDO_WORDSIZE
, "");
108 static_assert(isAligned(StaticBufferNumElements
* sizeof(uptr
),
109 SCUDO_CACHE_LINE_SIZE
),
113 // Pointer to the buffer's memory, or nullptr if no buffer was allocated.
114 uptr
*Data
= nullptr;
116 // The index of the underlying static buffer, or StaticBufferCount if this
117 // buffer was dynamically allocated. This value is initially set to a poison
118 // value to aid debugging.
119 uptr BufferIndex
= ~static_cast<uptr
>(0);
121 // Only valid if BufferIndex == StaticBufferCount.
125 // Return a zero-initialized buffer which can contain at least the given
126 // number of elements, or nullptr on failure.
127 Buffer
getBuffer(const uptr NumElements
) {
128 if (UNLIKELY(NumElements
> StaticBufferNumElements
))
129 return getDynamicBuffer(NumElements
);
133 // TODO: In general, we expect this operation should be fast so the
134 // waiting thread won't be put into sleep. The HybridMutex does implement
135 // the busy-waiting but we may want to review the performance and see if
136 // we need an explict spin lock here.
138 index
= getLeastSignificantSetBitIndex(Mask
);
139 if (index
< StaticBufferCount
)
140 Mask
^= static_cast<uptr
>(1) << index
;
143 if (index
>= StaticBufferCount
)
144 return getDynamicBuffer(NumElements
);
147 Buf
.Data
= &RawBuffer
[index
* StaticBufferNumElements
];
148 Buf
.BufferIndex
= index
;
149 memset(Buf
.Data
, 0, StaticBufferNumElements
* sizeof(uptr
));
153 void releaseBuffer(Buffer Buf
) {
154 DCHECK_NE(Buf
.Data
, nullptr);
155 DCHECK_LE(Buf
.BufferIndex
, StaticBufferCount
);
156 if (Buf
.BufferIndex
!= StaticBufferCount
) {
158 DCHECK_EQ((Mask
& (static_cast<uptr
>(1) << Buf
.BufferIndex
)), 0U);
159 Mask
|= static_cast<uptr
>(1) << Buf
.BufferIndex
;
161 Buf
.MemMap
.unmap(Buf
.MemMap
.getBase(), Buf
.MemMap
.getCapacity());
165 bool isStaticBufferTestOnly(const Buffer
&Buf
) {
166 DCHECK_NE(Buf
.Data
, nullptr);
167 DCHECK_LE(Buf
.BufferIndex
, StaticBufferCount
);
168 return Buf
.BufferIndex
!= StaticBufferCount
;
172 Buffer
getDynamicBuffer(const uptr NumElements
) {
173 // When using a heap-based buffer, precommit the pages backing the
174 // Vmar by passing |MAP_PRECOMMIT| flag. This allows an optimization
175 // where page fault exceptions are skipped as the allocated memory
176 // is accessed. So far, this is only enabled on Fuchsia. It hasn't proven a
177 // performance benefit on other platforms.
178 const uptr MmapFlags
= MAP_ALLOWNOMEM
| (SCUDO_FUCHSIA
? MAP_PRECOMMIT
: 0);
179 const uptr MappedSize
=
180 roundUp(NumElements
* sizeof(uptr
), getPageSizeCached());
182 if (Buf
.MemMap
.map(/*Addr=*/0, MappedSize
, "scudo:counters", MmapFlags
)) {
183 Buf
.Data
= reinterpret_cast<uptr
*>(Buf
.MemMap
.getBase());
184 Buf
.BufferIndex
= StaticBufferCount
;
190 // '1' means that buffer index is not used. '0' means the buffer is in use.
191 uptr Mask
GUARDED_BY(Mutex
) = ~static_cast<uptr
>(0);
192 uptr RawBuffer
[StaticBufferCount
* StaticBufferNumElements
] GUARDED_BY(Mutex
);
195 // A Region page map is used to record the usage of pages in the regions. It
196 // implements a packed array of Counters. Each counter occupies 2^N bits, enough
197 // to store counter's MaxValue. Ctor will try to use a static buffer first, and
198 // if that fails (the buffer is too small or already locked), will allocate the
199 // required Buffer via map(). The caller is expected to check whether the
200 // initialization was successful by checking isAllocated() result. For
201 // performance sake, none of the accessors check the validity of the arguments,
202 // It is assumed that Index is always in [0, N) range and the value is not
203 // incremented past MaxValue.
204 class RegionPageMap
{
207 : Regions(0), NumCounters(0), CounterSizeBitsLog(0), CounterMask(0),
208 PackingRatioLog(0), BitOffsetMask(0), SizePerRegion(0),
209 BufferNumElements(0) {}
210 RegionPageMap(uptr NumberOfRegions
, uptr CountersPerRegion
, uptr MaxValue
) {
211 reset(NumberOfRegions
, CountersPerRegion
, MaxValue
);
216 Buffers
.releaseBuffer(Buffer
);
220 // Lock of `StaticBuffer` is acquired conditionally and there's no easy way to
221 // specify the thread-safety attribute properly in current code structure.
222 // Besides, it's the only place we may want to check thread safety. Therefore,
223 // it's fine to bypass the thread-safety analysis now.
224 void reset(uptr NumberOfRegion
, uptr CountersPerRegion
, uptr MaxValue
) {
225 DCHECK_GT(NumberOfRegion
, 0);
226 DCHECK_GT(CountersPerRegion
, 0);
227 DCHECK_GT(MaxValue
, 0);
229 Regions
= NumberOfRegion
;
230 NumCounters
= CountersPerRegion
;
232 constexpr uptr MaxCounterBits
= sizeof(*Buffer
.Data
) * 8UL;
233 // Rounding counter storage size up to the power of two allows for using
234 // bit shifts calculating particular counter's Index and offset.
235 const uptr CounterSizeBits
=
236 roundUpPowerOfTwo(getMostSignificantSetBitIndex(MaxValue
) + 1);
237 DCHECK_LE(CounterSizeBits
, MaxCounterBits
);
238 CounterSizeBitsLog
= getLog2(CounterSizeBits
);
239 CounterMask
= ~(static_cast<uptr
>(0)) >> (MaxCounterBits
- CounterSizeBits
);
241 const uptr PackingRatio
= MaxCounterBits
>> CounterSizeBitsLog
;
242 DCHECK_GT(PackingRatio
, 0);
243 PackingRatioLog
= getLog2(PackingRatio
);
244 BitOffsetMask
= PackingRatio
- 1;
247 roundUp(NumCounters
, static_cast<uptr
>(1U) << PackingRatioLog
) >>
249 BufferNumElements
= SizePerRegion
* Regions
;
250 Buffer
= Buffers
.getBuffer(BufferNumElements
);
253 bool isAllocated() const { return Buffer
.Data
!= nullptr; }
255 uptr
getCount() const { return NumCounters
; }
257 uptr
get(uptr Region
, uptr I
) const {
258 DCHECK_LT(Region
, Regions
);
259 DCHECK_LT(I
, NumCounters
);
260 const uptr Index
= I
>> PackingRatioLog
;
261 const uptr BitOffset
= (I
& BitOffsetMask
) << CounterSizeBitsLog
;
262 return (Buffer
.Data
[Region
* SizePerRegion
+ Index
] >> BitOffset
) &
266 void inc(uptr Region
, uptr I
) const {
267 DCHECK_LT(get(Region
, I
), CounterMask
);
268 const uptr Index
= I
>> PackingRatioLog
;
269 const uptr BitOffset
= (I
& BitOffsetMask
) << CounterSizeBitsLog
;
270 DCHECK_LT(BitOffset
, SCUDO_WORDSIZE
);
271 DCHECK_EQ(isAllCounted(Region
, I
), false);
272 Buffer
.Data
[Region
* SizePerRegion
+ Index
] += static_cast<uptr
>(1U)
276 void incN(uptr Region
, uptr I
, uptr N
) const {
278 DCHECK_LE(N
, CounterMask
);
279 DCHECK_LE(get(Region
, I
), CounterMask
- N
);
280 const uptr Index
= I
>> PackingRatioLog
;
281 const uptr BitOffset
= (I
& BitOffsetMask
) << CounterSizeBitsLog
;
282 DCHECK_LT(BitOffset
, SCUDO_WORDSIZE
);
283 DCHECK_EQ(isAllCounted(Region
, I
), false);
284 Buffer
.Data
[Region
* SizePerRegion
+ Index
] += N
<< BitOffset
;
287 void incRange(uptr Region
, uptr From
, uptr To
) const {
289 const uptr Top
= Min(To
+ 1, NumCounters
);
290 for (uptr I
= From
; I
< Top
; I
++)
294 // Set the counter to the max value. Note that the max number of blocks in a
295 // page may vary. To provide an easier way to tell if all the blocks are
296 // counted for different pages, set to the same max value to denote the
297 // all-counted status.
298 void setAsAllCounted(uptr Region
, uptr I
) const {
299 DCHECK_LE(get(Region
, I
), CounterMask
);
300 const uptr Index
= I
>> PackingRatioLog
;
301 const uptr BitOffset
= (I
& BitOffsetMask
) << CounterSizeBitsLog
;
302 DCHECK_LT(BitOffset
, SCUDO_WORDSIZE
);
303 Buffer
.Data
[Region
* SizePerRegion
+ Index
] |= CounterMask
<< BitOffset
;
305 void setAsAllCountedRange(uptr Region
, uptr From
, uptr To
) const {
307 const uptr Top
= Min(To
+ 1, NumCounters
);
308 for (uptr I
= From
; I
< Top
; I
++)
309 setAsAllCounted(Region
, I
);
312 bool updateAsAllCountedIf(uptr Region
, uptr I
, uptr MaxCount
) {
313 const uptr Count
= get(Region
, I
);
314 if (Count
== CounterMask
)
316 if (Count
== MaxCount
) {
317 setAsAllCounted(Region
, I
);
322 bool isAllCounted(uptr Region
, uptr I
) const {
323 return get(Region
, I
) == CounterMask
;
326 uptr
getBufferNumElements() const { return BufferNumElements
; }
329 // We may consider making this configurable if there are cases which may
330 // benefit from this.
331 static const uptr StaticBufferCount
= 2U;
332 static const uptr StaticBufferNumElements
= 512U;
333 using BufferPoolT
= BufferPool
<StaticBufferCount
, StaticBufferNumElements
>;
334 static BufferPoolT Buffers
;
338 uptr CounterSizeBitsLog
;
340 uptr PackingRatioLog
;
344 uptr BufferNumElements
;
345 BufferPoolT::Buffer Buffer
;
348 template <class ReleaseRecorderT
> class FreePagesRangeTracker
{
350 explicit FreePagesRangeTracker(ReleaseRecorderT
&Recorder
)
351 : Recorder(Recorder
), PageSizeLog(getLog2(getPageSizeCached())) {}
353 void processNextPage(bool Released
) {
356 CurrentRangeStatePage
= CurrentPage
;
365 void skipPages(uptr N
) {
370 void finish() { closeOpenedRange(); }
373 void closeOpenedRange() {
375 Recorder
.releasePageRangeToOS((CurrentRangeStatePage
<< PageSizeLog
),
376 (CurrentPage
<< PageSizeLog
));
381 ReleaseRecorderT
&Recorder
;
382 const uptr PageSizeLog
;
383 bool InRange
= false;
384 uptr CurrentPage
= 0;
385 uptr CurrentRangeStatePage
= 0;
388 struct PageReleaseContext
{
389 PageReleaseContext(uptr BlockSize
, uptr NumberOfRegions
, uptr ReleaseSize
,
390 uptr ReleaseOffset
= 0)
391 : BlockSize(BlockSize
), NumberOfRegions(NumberOfRegions
) {
392 PageSize
= getPageSizeCached();
393 if (BlockSize
<= PageSize
) {
394 if (PageSize
% BlockSize
== 0) {
395 // Same number of chunks per page, no cross overs.
396 FullPagesBlockCountMax
= PageSize
/ BlockSize
;
397 SameBlockCountPerPage
= true;
398 } else if (BlockSize
% (PageSize
% BlockSize
) == 0) {
399 // Some chunks are crossing page boundaries, which means that the page
400 // contains one or two partial chunks, but all pages contain the same
402 FullPagesBlockCountMax
= PageSize
/ BlockSize
+ 1;
403 SameBlockCountPerPage
= true;
405 // Some chunks are crossing page boundaries, which means that the page
406 // contains one or two partial chunks.
407 FullPagesBlockCountMax
= PageSize
/ BlockSize
+ 2;
408 SameBlockCountPerPage
= false;
411 if (BlockSize
% PageSize
== 0) {
412 // One chunk covers multiple pages, no cross overs.
413 FullPagesBlockCountMax
= 1;
414 SameBlockCountPerPage
= true;
416 // One chunk covers multiple pages, Some chunks are crossing page
417 // boundaries. Some pages contain one chunk, some contain two.
418 FullPagesBlockCountMax
= 2;
419 SameBlockCountPerPage
= false;
423 // TODO: For multiple regions, it's more complicated to support partial
424 // region marking (which includes the complexity of how to handle the last
425 // block in a region). We may consider this after markFreeBlocks() accepts
426 // only free blocks from the same region.
427 if (NumberOfRegions
!= 1)
428 DCHECK_EQ(ReleaseOffset
, 0U);
430 PagesCount
= roundUp(ReleaseSize
, PageSize
) / PageSize
;
431 PageSizeLog
= getLog2(PageSize
);
432 ReleasePageOffset
= ReleaseOffset
>> PageSizeLog
;
435 // PageMap is lazily allocated when markFreeBlocks() is invoked.
436 bool hasBlockMarked() const {
437 return PageMap
.isAllocated();
440 bool ensurePageMapAllocated() {
441 if (PageMap
.isAllocated())
443 PageMap
.reset(NumberOfRegions
, PagesCount
, FullPagesBlockCountMax
);
444 // TODO: Log some message when we fail on PageMap allocation.
445 return PageMap
.isAllocated();
448 // Mark all the blocks in the given range [From, to). Instead of visiting all
449 // the blocks, we will just mark the page as all counted. Note the `From` and
450 // `To` has to be page aligned but with one exception, if `To` is equal to the
451 // RegionSize, it's not necessary to be aligned with page size.
452 bool markRangeAsAllCounted(uptr From
, uptr To
, uptr Base
,
453 const uptr RegionIndex
, const uptr RegionSize
) {
455 DCHECK_LE(To
, Base
+ RegionSize
);
456 DCHECK_EQ(From
% PageSize
, 0U);
457 DCHECK_LE(To
- From
, RegionSize
);
459 if (!ensurePageMapAllocated())
462 uptr FromInRegion
= From
- Base
;
463 uptr ToInRegion
= To
- Base
;
464 uptr FirstBlockInRange
= roundUpSlow(FromInRegion
, BlockSize
);
466 // The straddling block sits across entire range.
467 if (FirstBlockInRange
>= ToInRegion
)
470 // First block may not sit at the first pape in the range, move
471 // `FromInRegion` to the first block page.
472 FromInRegion
= roundDown(FirstBlockInRange
, PageSize
);
474 // When The first block is not aligned to the range boundary, which means
475 // there is a block sitting acorss `From`, that looks like,
479 // +-----------------------------------------------+
480 // +-----+-----+-----+-----+
482 // +-----+-----+-----+-----+
483 // |- first page -||- second page -||- ...
485 // Therefore, we can't just mark the first page as all counted. Instead, we
486 // increment the number of blocks in the first page in the page map and
487 // then round up the `From` to the next page.
488 if (FirstBlockInRange
!= FromInRegion
) {
489 DCHECK_GT(FromInRegion
+ PageSize
, FirstBlockInRange
);
490 uptr NumBlocksInFirstPage
=
491 (FromInRegion
+ PageSize
- FirstBlockInRange
+ BlockSize
- 1) /
493 PageMap
.incN(RegionIndex
, getPageIndex(FromInRegion
),
494 NumBlocksInFirstPage
);
495 FromInRegion
= roundUp(FromInRegion
+ 1, PageSize
);
498 uptr LastBlockInRange
= roundDownSlow(ToInRegion
- 1, BlockSize
);
500 // Note that LastBlockInRange may be smaller than `FromInRegion` at this
501 // point because it may contain only one block in the range.
503 // When the last block sits across `To`, we can't just mark the pages
504 // occupied by the last block as all counted. Instead, we increment the
505 // counters of those pages by 1. The exception is that if it's the last
506 // block in the region, it's fine to mark those pages as all counted.
507 if (LastBlockInRange
+ BlockSize
!= RegionSize
) {
508 DCHECK_EQ(ToInRegion
% PageSize
, 0U);
509 // The case below is like,
513 // +----------------------------------------+
514 // +-----+-----+-----+-----+
516 // +-----+-----+-----+-----+
517 // ... -||- last page -||- next page -|
519 // The last block is not aligned to `To`, we need to increment the
520 // counter of `next page` by 1.
521 if (LastBlockInRange
+ BlockSize
!= ToInRegion
) {
522 PageMap
.incRange(RegionIndex
, getPageIndex(ToInRegion
),
523 getPageIndex(LastBlockInRange
+ BlockSize
- 1));
526 ToInRegion
= RegionSize
;
529 // After handling the first page and the last block, it's safe to mark any
530 // page in between the range [From, To).
531 if (FromInRegion
< ToInRegion
) {
532 PageMap
.setAsAllCountedRange(RegionIndex
, getPageIndex(FromInRegion
),
533 getPageIndex(ToInRegion
- 1));
539 template <class TransferBatchT
, typename DecompactPtrT
>
540 bool markFreeBlocksInRegion(const IntrusiveList
<TransferBatchT
> &FreeList
,
541 DecompactPtrT DecompactPtr
, const uptr Base
,
542 const uptr RegionIndex
, const uptr RegionSize
,
543 bool MayContainLastBlockInRegion
) {
544 if (!ensurePageMapAllocated())
547 if (MayContainLastBlockInRegion
) {
548 const uptr LastBlockInRegion
=
549 ((RegionSize
/ BlockSize
) - 1U) * BlockSize
;
550 // The last block in a region may not use the entire page, we mark the
551 // following "pretend" memory block(s) as free in advance.
555 // -----+-----------------------+
556 // | Last Page | <- Rounded Region Boundary
557 // -----+-----------------------+
558 // |-----||- trailing blocks -|
561 const uptr RoundedRegionSize
= roundUp(RegionSize
, PageSize
);
562 const uptr TrailingBlockBase
= LastBlockInRegion
+ BlockSize
;
563 // If the difference between `RoundedRegionSize` and
564 // `TrailingBlockBase` is larger than a page, that implies the reported
565 // `RegionSize` may not be accurate.
566 DCHECK_LT(RoundedRegionSize
- TrailingBlockBase
, PageSize
);
568 // Only the last page touched by the last block needs to mark the trailing
569 // blocks. Note that if the last "pretend" block straddles the boundary,
570 // we still have to count it in so that the logic of counting the number
571 // of blocks on a page is consistent.
572 uptr NumTrailingBlocks
=
573 (roundUpSlow(RoundedRegionSize
- TrailingBlockBase
, BlockSize
) +
576 if (NumTrailingBlocks
> 0) {
577 PageMap
.incN(RegionIndex
, getPageIndex(TrailingBlockBase
),
582 // Iterate over free chunks and count how many free chunks affect each
584 if (BlockSize
<= PageSize
&& PageSize
% BlockSize
== 0) {
585 // Each chunk affects one page only.
586 for (const auto &It
: FreeList
) {
587 for (u16 I
= 0; I
< It
.getCount(); I
++) {
588 const uptr PInRegion
= DecompactPtr(It
.get(I
)) - Base
;
589 DCHECK_LT(PInRegion
, RegionSize
);
590 PageMap
.inc(RegionIndex
, getPageIndex(PInRegion
));
594 // In all other cases chunks might affect more than one page.
595 DCHECK_GE(RegionSize
, BlockSize
);
596 for (const auto &It
: FreeList
) {
597 for (u16 I
= 0; I
< It
.getCount(); I
++) {
598 const uptr PInRegion
= DecompactPtr(It
.get(I
)) - Base
;
599 PageMap
.incRange(RegionIndex
, getPageIndex(PInRegion
),
600 getPageIndex(PInRegion
+ BlockSize
- 1));
608 uptr
getPageIndex(uptr P
) { return (P
>> PageSizeLog
) - ReleasePageOffset
; }
609 uptr
getReleaseOffset() { return ReleasePageOffset
<< PageSizeLog
; }
612 uptr NumberOfRegions
;
613 // For partial region marking, some pages in front are not needed to be
615 uptr ReleasePageOffset
;
619 uptr FullPagesBlockCountMax
;
620 bool SameBlockCountPerPage
;
621 RegionPageMap PageMap
;
624 // Try to release the page which doesn't have any in-used block, i.e., they are
625 // all free blocks. The `PageMap` will record the number of free blocks in each
627 template <class ReleaseRecorderT
, typename SkipRegionT
>
629 releaseFreeMemoryToOS(PageReleaseContext
&Context
,
630 ReleaseRecorderT
&Recorder
, SkipRegionT SkipRegion
) {
631 const uptr PageSize
= Context
.PageSize
;
632 const uptr BlockSize
= Context
.BlockSize
;
633 const uptr PagesCount
= Context
.PagesCount
;
634 const uptr NumberOfRegions
= Context
.NumberOfRegions
;
635 const uptr ReleasePageOffset
= Context
.ReleasePageOffset
;
636 const uptr FullPagesBlockCountMax
= Context
.FullPagesBlockCountMax
;
637 const bool SameBlockCountPerPage
= Context
.SameBlockCountPerPage
;
638 RegionPageMap
&PageMap
= Context
.PageMap
;
640 // Iterate over pages detecting ranges of pages with chunk Counters equal
641 // to the expected number of chunks for the particular page.
642 FreePagesRangeTracker
<ReleaseRecorderT
> RangeTracker(Recorder
);
643 if (SameBlockCountPerPage
) {
644 // Fast path, every page has the same number of chunks affecting it.
645 for (uptr I
= 0; I
< NumberOfRegions
; I
++) {
647 RangeTracker
.skipPages(PagesCount
);
650 for (uptr J
= 0; J
< PagesCount
; J
++) {
651 const bool CanRelease
=
652 PageMap
.updateAsAllCountedIf(I
, J
, FullPagesBlockCountMax
);
653 RangeTracker
.processNextPage(CanRelease
);
657 // Slow path, go through the pages keeping count how many chunks affect
659 const uptr Pn
= BlockSize
< PageSize
? PageSize
/ BlockSize
: 1;
660 const uptr Pnc
= Pn
* BlockSize
;
661 // The idea is to increment the current page pointer by the first chunk
662 // size, middle portion size (the portion of the page covered by chunks
663 // except the first and the last one) and then the last chunk size, adding
664 // up the number of chunks on the current page and checking on every step
665 // whether the page boundary was crossed.
666 for (uptr I
= 0; I
< NumberOfRegions
; I
++) {
668 RangeTracker
.skipPages(PagesCount
);
671 uptr PrevPageBoundary
= 0;
672 uptr CurrentBoundary
= 0;
673 if (ReleasePageOffset
> 0) {
674 PrevPageBoundary
= ReleasePageOffset
* PageSize
;
675 CurrentBoundary
= roundUpSlow(PrevPageBoundary
, BlockSize
);
677 for (uptr J
= 0; J
< PagesCount
; J
++) {
678 const uptr PageBoundary
= PrevPageBoundary
+ PageSize
;
679 uptr BlocksPerPage
= Pn
;
680 if (CurrentBoundary
< PageBoundary
) {
681 if (CurrentBoundary
> PrevPageBoundary
)
683 CurrentBoundary
+= Pnc
;
684 if (CurrentBoundary
< PageBoundary
) {
686 CurrentBoundary
+= BlockSize
;
689 PrevPageBoundary
= PageBoundary
;
690 const bool CanRelease
=
691 PageMap
.updateAsAllCountedIf(I
, J
, BlocksPerPage
);
692 RangeTracker
.processNextPage(CanRelease
);
696 RangeTracker
.finish();
701 #endif // SCUDO_RELEASE_H_