1 //===-- release_test.cpp ----------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "tests/scudo_unit_test.h"
13 #include "size_class_map.h"
21 TEST(ScudoReleaseTest
, RegionPageMap
) {
22 for (scudo::uptr I
= 0; I
< SCUDO_WORDSIZE
; I
++) {
23 // Various valid counter's max values packed into one word.
24 scudo::RegionPageMap
PageMap2N(1U, 1U, 1UL << I
);
25 ASSERT_TRUE(PageMap2N
.isAllocated());
26 EXPECT_EQ(1U, PageMap2N
.getBufferNumElements());
27 // Check the "all bit set" values too.
28 scudo::RegionPageMap
PageMap2N1_1(1U, 1U, ~0UL >> I
);
29 ASSERT_TRUE(PageMap2N1_1
.isAllocated());
30 EXPECT_EQ(1U, PageMap2N1_1
.getBufferNumElements());
31 // Verify the packing ratio, the counter is Expected to be packed into the
32 // closest power of 2 bits.
33 scudo::RegionPageMap
PageMap(1U, SCUDO_WORDSIZE
, 1UL << I
);
34 ASSERT_TRUE(PageMap
.isAllocated());
35 EXPECT_EQ(scudo::roundUpPowerOfTwo(I
+ 1), PageMap
.getBufferNumElements());
38 // Go through 1, 2, 4, 8, .. {32,64} bits per counter.
39 for (scudo::uptr I
= 0; (SCUDO_WORDSIZE
>> I
) != 0; I
++) {
40 // Make sure counters request one memory page for the buffer.
41 const scudo::uptr NumCounters
=
42 (scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE
>> I
);
43 scudo::RegionPageMap
PageMap(1U, NumCounters
,
44 1UL << ((1UL << I
) - 1));
45 ASSERT_TRUE(PageMap
.isAllocated());
47 for (scudo::uptr C
= 1; C
< NumCounters
- 1; C
++) {
48 EXPECT_EQ(0UL, PageMap
.get(0U, C
));
50 EXPECT_EQ(1UL, PageMap
.get(0U, C
- 1));
52 EXPECT_EQ(0UL, PageMap
.get(0U, NumCounters
- 1));
53 PageMap
.inc(0U, NumCounters
- 1);
55 PageMap
.incRange(0u, 0U, NumCounters
- 1);
56 for (scudo::uptr C
= 0; C
< NumCounters
; C
++)
57 EXPECT_EQ(2UL, PageMap
.get(0U, C
));
61 // Similar to the above except that we are using incN().
62 for (scudo::uptr I
= 0; (SCUDO_WORDSIZE
>> I
) != 0; I
++) {
63 // Make sure counters request one memory page for the buffer.
64 const scudo::uptr NumCounters
=
65 (scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE
>> I
);
66 scudo::uptr MaxValue
= 1UL << ((1UL << I
) - 1);
70 scudo::RegionPageMap
PageMap(1U, NumCounters
, MaxValue
);
72 scudo::uptr N
= MaxValue
/ 2;
73 PageMap
.incN(0U, 0, N
);
74 for (scudo::uptr C
= 1; C
< NumCounters
; C
++) {
75 EXPECT_EQ(0UL, PageMap
.get(0U, C
));
76 PageMap
.incN(0U, C
, N
);
77 EXPECT_EQ(N
, PageMap
.get(0U, C
- 1));
79 EXPECT_EQ(N
, PageMap
.get(0U, NumCounters
- 1));
83 class StringRangeRecorder
{
85 std::string ReportedPages
;
88 : PageSizeScaledLog(scudo::getLog2(scudo::getPageSizeCached())) {}
90 void releasePageRangeToOS(scudo::uptr From
, scudo::uptr To
) {
91 From
>>= PageSizeScaledLog
;
92 To
>>= PageSizeScaledLog
;
94 if (!ReportedPages
.empty())
95 EXPECT_LT(LastPageReported
, From
);
96 ReportedPages
.append(From
- LastPageReported
, '.');
97 ReportedPages
.append(To
- From
, 'x');
98 LastPageReported
= To
;
102 const scudo::uptr PageSizeScaledLog
;
103 scudo::uptr LastPageReported
= 0;
106 TEST(ScudoReleaseTest
, FreePagesRangeTracker
) {
107 // 'x' denotes a page to be released, '.' denotes a page to be kept around.
108 const char *TestCases
[] = {
114 "..............xxxxx",
115 "xxxxxxxxxxxxxxxxxx.....",
116 "......xxxxxxxx........",
117 "xxx..........xxxxxxxxxxxxxxx",
118 "......xxxx....xxxx........",
119 "xxx..........xxxxxxxx....xxxxxxx",
120 "x.x.x.x.x.x.x.x.x.x.x.x.",
121 ".x.x.x.x.x.x.x.x.x.x.x.x",
122 ".x.x.x.x.x.x.x.x.x.x.x.x.",
123 "x.x.x.x.x.x.x.x.x.x.x.x.x",
125 typedef scudo::FreePagesRangeTracker
<StringRangeRecorder
> RangeTracker
;
127 for (auto TestCase
: TestCases
) {
128 StringRangeRecorder Recorder
;
129 RangeTracker
Tracker(Recorder
);
130 for (scudo::uptr I
= 0; TestCase
[I
] != 0; I
++)
131 Tracker
.processNextPage(TestCase
[I
] == 'x');
133 // Strip trailing '.'-pages before comparing the results as they are not
134 // going to be reported to range_recorder anyway.
135 const char *LastX
= strrchr(TestCase
, 'x');
136 std::string
Expected(
138 LastX
== nullptr ? 0U : static_cast<size_t>(LastX
- TestCase
+ 1));
139 EXPECT_STREQ(Expected
.c_str(), Recorder
.ReportedPages
.c_str());
143 class ReleasedPagesRecorder
{
145 ReleasedPagesRecorder() = default;
146 explicit ReleasedPagesRecorder(scudo::uptr Base
) : Base(Base
) {}
147 std::set
<scudo::uptr
> ReportedPages
;
149 void releasePageRangeToOS(scudo::uptr From
, scudo::uptr To
) {
150 const scudo::uptr PageSize
= scudo::getPageSizeCached();
151 for (scudo::uptr I
= From
; I
< To
; I
+= PageSize
)
152 ReportedPages
.insert(I
+ getBase());
155 scudo::uptr
getBase() const { return Base
; }
156 scudo::uptr Base
= 0;
159 // Simplified version of a TransferBatch.
160 template <class SizeClassMap
> struct FreeBatch
{
161 static const scudo::u16 MaxCount
= SizeClassMap::MaxNumCachedHint
;
162 void clear() { Count
= 0; }
163 void add(scudo::uptr P
) {
164 DCHECK_LT(Count
, MaxCount
);
167 scudo::u16
getCount() const { return Count
; }
168 scudo::uptr
get(scudo::u16 I
) const {
175 scudo::uptr Batch
[MaxCount
];
179 template <class SizeClassMap
> void testReleaseFreeMemoryToOS() {
180 typedef FreeBatch
<SizeClassMap
> Batch
;
181 const scudo::uptr PagesCount
= 1024;
182 const scudo::uptr PageSize
= scudo::getPageSizeCached();
183 const scudo::uptr PageSizeLog
= scudo::getLog2(PageSize
);
185 scudo::u32 RandState
= 42;
187 for (scudo::uptr I
= 1; I
<= SizeClassMap::LargestClassId
; I
++) {
188 const scudo::uptr BlockSize
= SizeClassMap::getSizeByClassId(I
);
189 const scudo::uptr MaxBlocks
= PagesCount
* PageSize
/ BlockSize
;
191 // Generate the random free list.
192 std::vector
<scudo::uptr
> FreeArray
;
193 bool InFreeRange
= false;
194 scudo::uptr CurrentRangeEnd
= 0;
195 for (scudo::uptr I
= 0; I
< MaxBlocks
; I
++) {
196 if (I
== CurrentRangeEnd
) {
197 InFreeRange
= (scudo::getRandomU32(&RandState
) & 1U) == 1;
198 CurrentRangeEnd
+= (scudo::getRandomU32(&RandState
) & 0x7f) + 1;
201 FreeArray
.push_back(I
* BlockSize
);
203 if (FreeArray
.empty())
205 // Shuffle the array to ensure that the order is irrelevant.
206 std::shuffle(FreeArray
.begin(), FreeArray
.end(), R
);
208 // Build the FreeList from the FreeArray.
209 scudo::SinglyLinkedList
<Batch
> FreeList
;
211 Batch
*CurrentBatch
= nullptr;
212 for (auto const &Block
: FreeArray
) {
214 CurrentBatch
= new Batch
;
215 CurrentBatch
->clear();
216 FreeList
.push_back(CurrentBatch
);
218 CurrentBatch
->add(Block
);
219 if (CurrentBatch
->getCount() == Batch::MaxCount
)
220 CurrentBatch
= nullptr;
223 // Release the memory.
224 auto SkipRegion
= [](UNUSED
scudo::uptr RegionIndex
) { return false; };
225 auto DecompactPtr
= [](scudo::uptr P
) { return P
; };
226 ReleasedPagesRecorder Recorder
;
227 scudo::PageReleaseContext
Context(BlockSize
, /*NumberOfRegions=*/1U,
228 /*ReleaseSize=*/MaxBlocks
* BlockSize
);
229 ASSERT_FALSE(Context
.hasBlockMarked());
230 Context
.markFreeBlocksInRegion(FreeList
, DecompactPtr
, Recorder
.getBase(),
231 /*RegionIndex=*/0, MaxBlocks
* BlockSize
,
232 /*MayContainLastBlockInRegion=*/true);
233 ASSERT_TRUE(Context
.hasBlockMarked());
234 releaseFreeMemoryToOS(Context
, Recorder
, SkipRegion
);
235 scudo::RegionPageMap
&PageMap
= Context
.PageMap
;
237 // Verify that there are no released pages touched by used chunks and all
238 // ranges of free chunks big enough to contain the entire memory pages had
239 // these pages released.
240 scudo::uptr VerifiedReleasedPages
= 0;
241 std::set
<scudo::uptr
> FreeBlocks(FreeArray
.begin(), FreeArray
.end());
243 scudo::uptr CurrentBlock
= 0;
245 scudo::uptr CurrentFreeRangeStart
= 0;
246 for (scudo::uptr I
= 0; I
< MaxBlocks
; I
++) {
247 const bool IsFreeBlock
=
248 FreeBlocks
.find(CurrentBlock
) != FreeBlocks
.end();
252 CurrentFreeRangeStart
= CurrentBlock
;
255 // Verify that this used chunk does not touch any released page.
256 const scudo::uptr StartPage
= CurrentBlock
/ PageSize
;
257 const scudo::uptr EndPage
= (CurrentBlock
+ BlockSize
- 1) / PageSize
;
258 for (scudo::uptr J
= StartPage
; J
<= EndPage
; J
++) {
259 const bool PageReleased
= Recorder
.ReportedPages
.find(J
* PageSize
) !=
260 Recorder
.ReportedPages
.end();
261 EXPECT_EQ(false, PageReleased
);
263 PageMap
.isAllCounted(0, (J
* PageSize
) >> PageSizeLog
));
268 // Verify that all entire memory pages covered by this range of free
269 // chunks were released.
270 scudo::uptr P
= scudo::roundUp(CurrentFreeRangeStart
, PageSize
);
271 while (P
+ PageSize
<= CurrentBlock
) {
272 const bool PageReleased
=
273 Recorder
.ReportedPages
.find(P
) != Recorder
.ReportedPages
.end();
274 EXPECT_EQ(true, PageReleased
);
275 EXPECT_EQ(true, PageMap
.isAllCounted(0, P
>> PageSizeLog
));
276 VerifiedReleasedPages
++;
282 CurrentBlock
+= BlockSize
;
286 scudo::uptr P
= scudo::roundUp(CurrentFreeRangeStart
, PageSize
);
287 const scudo::uptr EndPage
=
288 scudo::roundUp(MaxBlocks
* BlockSize
, PageSize
);
289 while (P
+ PageSize
<= EndPage
) {
290 const bool PageReleased
=
291 Recorder
.ReportedPages
.find(P
) != Recorder
.ReportedPages
.end();
292 EXPECT_EQ(true, PageReleased
);
293 EXPECT_EQ(true, PageMap
.isAllCounted(0, P
>> PageSizeLog
));
294 VerifiedReleasedPages
++;
299 EXPECT_EQ(Recorder
.ReportedPages
.size(), VerifiedReleasedPages
);
301 while (!FreeList
.empty()) {
302 CurrentBatch
= FreeList
.front();
303 FreeList
.pop_front();
309 template <class SizeClassMap
> void testPageMapMarkRange() {
310 const scudo::uptr PageSize
= scudo::getPageSizeCached();
312 for (scudo::uptr I
= 1; I
<= SizeClassMap::LargestClassId
; I
++) {
313 const scudo::uptr BlockSize
= SizeClassMap::getSizeByClassId(I
);
315 const scudo::uptr GroupNum
= 2;
316 const scudo::uptr GroupSize
= scudo::roundUp(BlockSize
, PageSize
) * 2;
317 const scudo::uptr RegionSize
=
318 scudo::roundUpSlow(GroupSize
* GroupNum
, BlockSize
);
319 const scudo::uptr RoundedRegionSize
= scudo::roundUp(RegionSize
, PageSize
);
321 std::vector
<scudo::uptr
> Pages(RoundedRegionSize
/ PageSize
, 0);
322 for (scudo::uptr Block
= 0; Block
< RoundedRegionSize
; Block
+= BlockSize
) {
323 for (scudo::uptr Page
= Block
/ PageSize
;
324 Page
<= (Block
+ BlockSize
- 1) / PageSize
&&
325 Page
< RoundedRegionSize
/ PageSize
;
327 ASSERT_LT(Page
, Pages
.size());
332 for (scudo::uptr GroupId
= 0; GroupId
< GroupNum
; ++GroupId
) {
333 const scudo::uptr GroupBeg
= GroupId
* GroupSize
;
334 const scudo::uptr GroupEnd
= GroupBeg
+ GroupSize
;
336 scudo::PageReleaseContext
Context(BlockSize
, /*NumberOfRegions=*/1U,
337 /*ReleaseSize=*/RegionSize
);
338 Context
.markRangeAsAllCounted(GroupBeg
, GroupEnd
, /*Base=*/0U,
339 /*RegionIndex=*/0, RegionSize
);
341 scudo::uptr FirstBlock
=
342 ((GroupBeg
+ BlockSize
- 1) / BlockSize
) * BlockSize
;
344 // All the pages before first block page are not supposed to be marked.
345 if (FirstBlock
/ PageSize
> 0) {
346 for (scudo::uptr Page
= 0; Page
<= FirstBlock
/ PageSize
- 1; ++Page
)
347 EXPECT_EQ(Context
.PageMap
.get(/*Region=*/0, Page
), 0U);
350 // Verify the pages used by the blocks in the group except that if the
351 // end of the last block is not aligned with `GroupEnd`, it'll be verified
354 for (Block
= FirstBlock
; Block
+ BlockSize
<= GroupEnd
;
355 Block
+= BlockSize
) {
356 for (scudo::uptr Page
= Block
/ PageSize
;
357 Page
<= (Block
+ BlockSize
- 1) / PageSize
; ++Page
) {
358 // First used page in the group has two cases, which are w/ and w/o
359 // block sitting across the boundary.
360 if (Page
== FirstBlock
/ PageSize
) {
361 if (FirstBlock
% PageSize
== 0) {
362 EXPECT_TRUE(Context
.PageMap
.isAllCounted(/*Region=*/0U, Page
));
364 // There's a block straddling `GroupBeg`, it's supposed to only
365 // increment the counter and we expect it should be 1 less
366 // (exclude the straddling one) than the total blocks on the page.
367 EXPECT_EQ(Context
.PageMap
.get(/*Region=*/0U, Page
),
371 EXPECT_TRUE(Context
.PageMap
.isAllCounted(/*Region=*/0, Page
));
376 if (Block
== GroupEnd
)
379 // Examine the last block which sits across the group boundary.
380 if (Block
+ BlockSize
== RegionSize
) {
381 // This is the last block in the region, it's supposed to mark all the
382 // pages as all counted.
383 for (scudo::uptr Page
= Block
/ PageSize
;
384 Page
<= (Block
+ BlockSize
- 1) / PageSize
; ++Page
) {
385 EXPECT_TRUE(Context
.PageMap
.isAllCounted(/*Region=*/0, Page
));
388 for (scudo::uptr Page
= Block
/ PageSize
;
389 Page
<= (Block
+ BlockSize
- 1) / PageSize
; ++Page
) {
390 if (Page
<= (GroupEnd
- 1) / PageSize
)
391 EXPECT_TRUE(Context
.PageMap
.isAllCounted(/*Region=*/0, Page
));
393 EXPECT_EQ(Context
.PageMap
.get(/*Region=*/0U, Page
), 1U);
397 const scudo::uptr FirstUncountedPage
=
398 scudo::roundUp(Block
+ BlockSize
, PageSize
);
399 for (scudo::uptr Page
= FirstUncountedPage
;
400 Page
<= RoundedRegionSize
/ PageSize
; ++Page
) {
401 EXPECT_EQ(Context
.PageMap
.get(/*Region=*/0U, Page
), 0U);
403 } // Iterate each Group
405 // Release the entire region. This is to ensure the last page is counted.
406 scudo::PageReleaseContext
Context(BlockSize
, /*NumberOfRegions=*/1U,
407 /*ReleaseSize=*/RegionSize
);
408 Context
.markRangeAsAllCounted(/*From=*/0U, /*To=*/RegionSize
, /*Base=*/0,
409 /*RegionIndex=*/0, RegionSize
);
410 for (scudo::uptr Page
= 0; Page
< RoundedRegionSize
/ PageSize
; ++Page
)
411 EXPECT_TRUE(Context
.PageMap
.isAllCounted(/*Region=*/0, Page
));
412 } // Iterate each size class
415 template <class SizeClassMap
> void testReleasePartialRegion() {
416 typedef FreeBatch
<SizeClassMap
> Batch
;
417 const scudo::uptr PageSize
= scudo::getPageSizeCached();
419 for (scudo::uptr I
= 1; I
<= SizeClassMap::LargestClassId
; I
++) {
420 // In the following, we want to ensure the region includes at least 2 pages
421 // and we will release all the pages except the first one. The handling of
422 // the last block is tricky, so we always test the case that includes the
424 const scudo::uptr BlockSize
= SizeClassMap::getSizeByClassId(I
);
425 const scudo::uptr ReleaseBase
= scudo::roundUp(BlockSize
, PageSize
);
426 const scudo::uptr BasePageOffset
= ReleaseBase
/ PageSize
;
427 const scudo::uptr RegionSize
=
428 scudo::roundUpSlow(scudo::roundUp(BlockSize
, PageSize
) + ReleaseBase
,
431 const scudo::uptr RoundedRegionSize
= scudo::roundUp(RegionSize
, PageSize
);
433 scudo::SinglyLinkedList
<Batch
> FreeList
;
436 // Skip the blocks in the first page and add the remaining.
437 std::vector
<scudo::uptr
> Pages(RoundedRegionSize
/ PageSize
, 0);
438 for (scudo::uptr Block
= scudo::roundUpSlow(ReleaseBase
, BlockSize
);
439 Block
+ BlockSize
<= RoundedRegionSize
; Block
+= BlockSize
) {
440 for (scudo::uptr Page
= Block
/ PageSize
;
441 Page
<= (Block
+ BlockSize
- 1) / PageSize
; ++Page
) {
442 ASSERT_LT(Page
, Pages
.size());
447 // This follows the logic how we count the last page. It should be
448 // consistent with how markFreeBlocksInRegion() handles the last block.
449 if (RoundedRegionSize
% BlockSize
!= 0)
452 Batch
*CurrentBatch
= nullptr;
453 for (scudo::uptr Block
= scudo::roundUpSlow(ReleaseBase
, BlockSize
);
454 Block
< RegionSize
; Block
+= BlockSize
) {
455 if (CurrentBatch
== nullptr ||
456 CurrentBatch
->getCount() == Batch::MaxCount
) {
457 CurrentBatch
= new Batch
;
458 CurrentBatch
->clear();
459 FreeList
.push_back(CurrentBatch
);
461 CurrentBatch
->add(Block
);
464 auto VerifyReleaseToOs
= [&](scudo::PageReleaseContext
&Context
) {
465 auto SkipRegion
= [](UNUSED
scudo::uptr RegionIndex
) { return false; };
466 ReleasedPagesRecorder
Recorder(ReleaseBase
);
467 releaseFreeMemoryToOS(Context
, Recorder
, SkipRegion
);
468 const scudo::uptr FirstBlock
= scudo::roundUpSlow(ReleaseBase
, BlockSize
);
470 for (scudo::uptr P
= 0; P
< RoundedRegionSize
; P
+= PageSize
) {
471 if (P
< FirstBlock
) {
472 // If FirstBlock is not aligned with page boundary, the first touched
473 // page will not be released either.
474 EXPECT_TRUE(Recorder
.ReportedPages
.find(P
) ==
475 Recorder
.ReportedPages
.end());
477 EXPECT_TRUE(Recorder
.ReportedPages
.find(P
) !=
478 Recorder
.ReportedPages
.end());
483 // Test marking by visiting each block.
485 auto DecompactPtr
= [](scudo::uptr P
) { return P
; };
486 scudo::PageReleaseContext
Context(BlockSize
, /*NumberOfRegions=*/1U,
487 /*ReleaseSize=*/RegionSize
- PageSize
,
489 Context
.markFreeBlocksInRegion(FreeList
, DecompactPtr
, /*Base=*/0U,
490 /*RegionIndex=*/0, RegionSize
,
491 /*MayContainLastBlockInRegion=*/true);
492 for (const Batch
&It
: FreeList
) {
493 for (scudo::u16 I
= 0; I
< It
.getCount(); I
++) {
494 scudo::uptr Block
= It
.get(I
);
495 for (scudo::uptr Page
= Block
/ PageSize
;
496 Page
<= (Block
+ BlockSize
- 1) / PageSize
; ++Page
) {
497 EXPECT_EQ(Pages
[Page
], Context
.PageMap
.get(/*Region=*/0U,
498 Page
- BasePageOffset
));
503 VerifyReleaseToOs(Context
);
506 // Test range marking.
508 scudo::PageReleaseContext
Context(BlockSize
, /*NumberOfRegions=*/1U,
509 /*ReleaseSize=*/RegionSize
- PageSize
,
511 Context
.markRangeAsAllCounted(ReleaseBase
, RegionSize
, /*Base=*/0U,
512 /*RegionIndex=*/0, RegionSize
);
513 for (scudo::uptr Page
= ReleaseBase
/ PageSize
;
514 Page
< RoundedRegionSize
/ PageSize
; ++Page
) {
515 if (Context
.PageMap
.get(/*Region=*/0, Page
- BasePageOffset
) !=
517 EXPECT_TRUE(Context
.PageMap
.isAllCounted(/*Region=*/0,
518 Page
- BasePageOffset
));
522 VerifyReleaseToOs(Context
);
525 // Check the buffer size of PageMap.
527 scudo::PageReleaseContext
Full(BlockSize
, /*NumberOfRegions=*/1U,
528 /*ReleaseSize=*/RegionSize
);
529 Full
.ensurePageMapAllocated();
530 scudo::PageReleaseContext
Partial(BlockSize
, /*NumberOfRegions=*/1U,
531 /*ReleaseSize=*/RegionSize
- PageSize
,
533 Partial
.ensurePageMapAllocated();
535 EXPECT_GE(Full
.PageMap
.getBufferNumElements(),
536 Partial
.PageMap
.getBufferNumElements());
539 while (!FreeList
.empty()) {
540 CurrentBatch
= FreeList
.front();
541 FreeList
.pop_front();
544 } // Iterate each size class
547 TEST(ScudoReleaseTest
, ReleaseFreeMemoryToOSDefault
) {
548 testReleaseFreeMemoryToOS
<scudo::DefaultSizeClassMap
>();
551 TEST(ScudoReleaseTest
, ReleaseFreeMemoryToOSAndroid
) {
552 testReleaseFreeMemoryToOS
<scudo::AndroidSizeClassMap
>();
555 TEST(ScudoReleaseTest
, PageMapMarkRange
) {
556 testPageMapMarkRange
<scudo::DefaultSizeClassMap
>();
557 testPageMapMarkRange
<scudo::AndroidSizeClassMap
>();
558 testPageMapMarkRange
<scudo::FuchsiaSizeClassMap
>();
561 TEST(ScudoReleaseTest
, ReleasePartialRegion
) {
562 testReleasePartialRegion
<scudo::DefaultSizeClassMap
>();
563 testReleasePartialRegion
<scudo::AndroidSizeClassMap
>();
564 testReleasePartialRegion
<scudo::FuchsiaSizeClassMap
>();
567 template <class SizeClassMap
> void testReleaseRangeWithSingleBlock() {
568 const scudo::uptr PageSize
= scudo::getPageSizeCached();
570 // We want to test if a memory group only contains single block that will be
571 // handled properly. The case is like:
574 // +----------------------+
575 // +------------+------------+
577 // +------------+------------+
581 // Note that `From` will be page aligned.
583 // If the second from the last block is aligned at `From`, then we expect all
584 // the pages after `From` will be marked as can-be-released. Otherwise, the
585 // pages only touched by the last blocks will be marked as can-be-released.
586 for (scudo::uptr I
= 1; I
<= SizeClassMap::LargestClassId
; I
++) {
587 const scudo::uptr BlockSize
= SizeClassMap::getSizeByClassId(I
);
588 const scudo::uptr From
= scudo::roundUp(BlockSize
, PageSize
);
589 const scudo::uptr To
=
590 From
% BlockSize
== 0
592 : scudo::roundDownSlow(From
+ BlockSize
, BlockSize
) + BlockSize
;
593 const scudo::uptr RoundedRegionSize
= scudo::roundUp(To
, PageSize
);
595 std::vector
<scudo::uptr
> Pages(RoundedRegionSize
/ PageSize
, 0);
596 for (scudo::uptr Block
= (To
- BlockSize
); Block
< RoundedRegionSize
;
597 Block
+= BlockSize
) {
598 for (scudo::uptr Page
= Block
/ PageSize
;
599 Page
<= (Block
+ BlockSize
- 1) / PageSize
&&
600 Page
< RoundedRegionSize
/ PageSize
;
602 ASSERT_LT(Page
, Pages
.size());
607 scudo::PageReleaseContext
Context(BlockSize
, /*NumberOfRegions=*/1U,
610 Context
.markRangeAsAllCounted(From
, To
, /*Base=*/0U, /*RegionIndex=*/0,
613 for (scudo::uptr Page
= 0; Page
< RoundedRegionSize
; Page
+= PageSize
) {
614 if (Context
.PageMap
.get(/*Region=*/0U, Page
/ PageSize
) !=
615 Pages
[Page
/ PageSize
]) {
617 Context
.PageMap
.isAllCounted(/*Region=*/0U, Page
/ PageSize
));
620 } // for each size class
623 TEST(ScudoReleaseTest
, RangeReleaseRegionWithSingleBlock
) {
624 testReleaseRangeWithSingleBlock
<scudo::DefaultSizeClassMap
>();
625 testReleaseRangeWithSingleBlock
<scudo::AndroidSizeClassMap
>();
626 testReleaseRangeWithSingleBlock
<scudo::FuchsiaSizeClassMap
>();
629 TEST(ScudoReleaseTest
, BufferPool
) {
630 constexpr scudo::uptr StaticBufferCount
= SCUDO_WORDSIZE
- 1;
631 constexpr scudo::uptr StaticBufferNumElements
= 512U;
633 // Allocate the buffer pool on the heap because it is quite large (slightly
634 // more than StaticBufferCount * StaticBufferNumElements * sizeof(uptr)) and
635 // it may not fit in the stack on some platforms.
637 scudo::BufferPool
<StaticBufferCount
, StaticBufferNumElements
>;
638 std::unique_ptr
<BufferPool
> Pool(new BufferPool());
640 std::vector
<BufferPool::Buffer
> Buffers
;
641 for (scudo::uptr I
= 0; I
< StaticBufferCount
; ++I
) {
642 BufferPool::Buffer Buffer
= Pool
->getBuffer(StaticBufferNumElements
);
643 EXPECT_TRUE(Pool
->isStaticBufferTestOnly(Buffer
));
644 Buffers
.push_back(Buffer
);
647 // The static buffer is supposed to be used up.
648 BufferPool::Buffer Buffer
= Pool
->getBuffer(StaticBufferNumElements
);
649 EXPECT_FALSE(Pool
->isStaticBufferTestOnly(Buffer
));
651 Pool
->releaseBuffer(Buffer
);
652 for (auto &Buffer
: Buffers
)
653 Pool
->releaseBuffer(Buffer
);