1 //===-- secondary_test.cpp --------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 #include "tests/scudo_unit_test.h"
12 #include "allocator_config.h"
13 #include "allocator_config_wrapper.h"
14 #include "secondary.h"
17 #include <condition_variable>
25 template <typename Config
> static scudo::Options
getOptionsForConfig() {
26 if (!Config::getMaySupportMemoryTagging() ||
27 !scudo::archSupportsMemoryTagging() ||
28 !scudo::systemSupportsMemoryTagging())
30 scudo::AtomicOptions AO
;
31 AO
.set(scudo::OptionBit::UseMemoryTagging
);
35 template <typename Config
> static void testSecondaryBasic(void) {
36 using SecondaryT
= scudo::MapAllocator
<scudo::SecondaryConfig
<Config
>>;
37 scudo::Options Options
=
38 getOptionsForConfig
<scudo::SecondaryConfig
<Config
>>();
42 std::unique_ptr
<SecondaryT
> L(new SecondaryT
);
44 const scudo::uptr Size
= 1U << 16;
45 void *P
= L
->allocate(Options
, Size
);
46 EXPECT_NE(P
, nullptr);
48 EXPECT_GE(SecondaryT::getBlockSize(P
), Size
);
49 L
->deallocate(Options
, P
);
51 // If the Secondary can't cache that pointer, it will be unmapped.
52 if (!L
->canCache(Size
)) {
55 // Repeat few time to avoid missing crash if it's mmaped by unrelated
57 for (int i
= 0; i
< 10; ++i
) {
58 P
= L
->allocate(Options
, Size
);
59 L
->deallocate(Options
, P
);
66 const scudo::uptr Align
= 1U << 16;
67 P
= L
->allocate(Options
, Size
+ Align
, Align
);
68 EXPECT_NE(P
, nullptr);
69 void *AlignedP
= reinterpret_cast<void *>(
70 scudo::roundUp(reinterpret_cast<scudo::uptr
>(P
), Align
));
71 memset(AlignedP
, 'A', Size
);
72 L
->deallocate(Options
, P
);
74 std::vector
<void *> V
;
75 for (scudo::uptr I
= 0; I
< 32U; I
++)
76 V
.push_back(L
->allocate(Options
, Size
));
77 std::shuffle(V
.begin(), V
.end(), std::mt19937(std::random_device()()));
79 L
->deallocate(Options
, V
.back());
82 scudo::ScopedString Str
;
88 struct NoCacheConfig
{
89 static const bool MaySupportMemoryTagging
= false;
90 template <typename
> using TSDRegistryT
= void;
91 template <typename
> using PrimaryT
= void;
92 template <typename Config
> using SecondaryT
= scudo::MapAllocator
<Config
>;
95 template <typename Config
>
96 using CacheT
= scudo::MapAllocatorNoCache
<Config
>;
101 static const bool MaySupportMemoryTagging
= false;
102 template <typename
> using TSDRegistryT
= void;
103 template <typename
> using PrimaryT
= void;
104 template <typename
> using SecondaryT
= void;
108 static const scudo::u32 EntriesArraySize
= 128U;
109 static const scudo::u32 QuarantineSize
= 0U;
110 static const scudo::u32 DefaultMaxEntriesCount
= 64U;
111 static const scudo::uptr DefaultMaxEntrySize
= 1UL << 20;
112 static const scudo::s32 MinReleaseToOsIntervalMs
= INT32_MIN
;
113 static const scudo::s32 MaxReleaseToOsIntervalMs
= INT32_MAX
;
116 template <typename Config
> using CacheT
= scudo::MapAllocatorCache
<Config
>;
120 TEST(ScudoSecondaryTest
, SecondaryBasic
) {
121 testSecondaryBasic
<NoCacheConfig
>();
122 testSecondaryBasic
<scudo::DefaultConfig
>();
123 testSecondaryBasic
<TestConfig
>();
126 struct MapAllocatorTest
: public Test
{
127 using Config
= scudo::DefaultConfig
;
128 using LargeAllocator
= scudo::MapAllocator
<scudo::SecondaryConfig
<Config
>>;
130 void SetUp() override
{ Allocator
->init(nullptr); }
132 void TearDown() override
{ Allocator
->unmapTestOnly(); }
134 std::unique_ptr
<LargeAllocator
> Allocator
=
135 std::make_unique
<LargeAllocator
>();
136 scudo::Options Options
=
137 getOptionsForConfig
<scudo::SecondaryConfig
<Config
>>();
140 // This exercises a variety of combinations of size and alignment for the
141 // MapAllocator. The size computation done here mimic the ones done by the
142 // combined allocator.
143 TEST_F(MapAllocatorTest
, SecondaryCombinations
) {
144 constexpr scudo::uptr MinAlign
= FIRST_32_SECOND_64(8, 16);
145 constexpr scudo::uptr HeaderSize
= scudo::roundUp(8, MinAlign
);
146 for (scudo::uptr SizeLog
= 0; SizeLog
<= 20; SizeLog
++) {
147 for (scudo::uptr AlignLog
= FIRST_32_SECOND_64(3, 4); AlignLog
<= 16;
149 const scudo::uptr Align
= 1U << AlignLog
;
150 for (scudo::sptr Delta
= -128; Delta
<= 128; Delta
+= 8) {
151 if ((1LL << SizeLog
) + Delta
<= 0)
153 const scudo::uptr UserSize
= scudo::roundUp(
154 static_cast<scudo::uptr
>((1LL << SizeLog
) + Delta
), MinAlign
);
155 const scudo::uptr Size
=
156 HeaderSize
+ UserSize
+ (Align
> MinAlign
? Align
- HeaderSize
: 0);
157 void *P
= Allocator
->allocate(Options
, Size
, Align
);
158 EXPECT_NE(P
, nullptr);
159 void *AlignedP
= reinterpret_cast<void *>(
160 scudo::roundUp(reinterpret_cast<scudo::uptr
>(P
), Align
));
161 memset(AlignedP
, 0xff, UserSize
);
162 Allocator
->deallocate(Options
, P
);
166 scudo::ScopedString Str
;
167 Allocator
->getStats(&Str
);
171 TEST_F(MapAllocatorTest
, SecondaryIterate
) {
172 std::vector
<void *> V
;
173 const scudo::uptr PageSize
= scudo::getPageSizeCached();
174 for (scudo::uptr I
= 0; I
< 32U; I
++)
175 V
.push_back(Allocator
->allocate(
176 Options
, (static_cast<scudo::uptr
>(std::rand()) % 16U) * PageSize
));
177 auto Lambda
= [&V
](scudo::uptr Block
) {
178 EXPECT_NE(std::find(V
.begin(), V
.end(), reinterpret_cast<void *>(Block
)),
181 Allocator
->disable();
182 Allocator
->iterateOverBlocks(Lambda
);
185 Allocator
->deallocate(Options
, V
.back());
188 scudo::ScopedString Str
;
189 Allocator
->getStats(&Str
);
193 TEST_F(MapAllocatorTest
, SecondaryCacheOptions
) {
194 if (!Allocator
->canCache(0U))
195 TEST_SKIP("Secondary Cache disabled");
197 // Attempt to set a maximum number of entries higher than the array size.
198 EXPECT_TRUE(Allocator
->setOption(scudo::Option::MaxCacheEntriesCount
, 4096U));
200 // Attempt to set an invalid (negative) number of entries
201 EXPECT_FALSE(Allocator
->setOption(scudo::Option::MaxCacheEntriesCount
, -1));
203 // Various valid combinations.
204 EXPECT_TRUE(Allocator
->setOption(scudo::Option::MaxCacheEntriesCount
, 4U));
206 Allocator
->setOption(scudo::Option::MaxCacheEntrySize
, 1UL << 20));
207 EXPECT_TRUE(Allocator
->canCache(1UL << 18));
209 Allocator
->setOption(scudo::Option::MaxCacheEntrySize
, 1UL << 17));
210 EXPECT_FALSE(Allocator
->canCache(1UL << 18));
211 EXPECT_TRUE(Allocator
->canCache(1UL << 16));
212 EXPECT_TRUE(Allocator
->setOption(scudo::Option::MaxCacheEntriesCount
, 0U));
213 EXPECT_FALSE(Allocator
->canCache(1UL << 16));
214 EXPECT_TRUE(Allocator
->setOption(scudo::Option::MaxCacheEntriesCount
, 4U));
216 Allocator
->setOption(scudo::Option::MaxCacheEntrySize
, 1UL << 20));
217 EXPECT_TRUE(Allocator
->canCache(1UL << 16));
220 struct MapAllocatorWithReleaseTest
: public MapAllocatorTest
{
221 void SetUp() override
{ Allocator
->init(nullptr, /*ReleaseToOsInterval=*/0); }
223 void performAllocations() {
224 std::vector
<void *> V
;
225 const scudo::uptr PageSize
= scudo::getPageSizeCached();
227 std::unique_lock
<std::mutex
> Lock(Mutex
);
231 for (scudo::uptr I
= 0; I
< 128U; I
++) {
232 // Deallocate 75% of the blocks.
233 const bool Deallocate
= (std::rand() & 3) != 0;
234 void *P
= Allocator
->allocate(
235 Options
, (static_cast<scudo::uptr
>(std::rand()) % 16U) * PageSize
);
237 Allocator
->deallocate(Options
, P
);
242 Allocator
->deallocate(Options
, V
.back());
248 std::condition_variable Cv
;
252 TEST_F(MapAllocatorWithReleaseTest
, SecondaryThreadsRace
) {
253 std::thread Threads
[16];
254 for (scudo::uptr I
= 0; I
< ARRAY_SIZE(Threads
); I
++)
256 std::thread(&MapAllocatorWithReleaseTest::performAllocations
, this);
258 std::unique_lock
<std::mutex
> Lock(Mutex
);
262 for (auto &T
: Threads
)
264 scudo::ScopedString Str
;
265 Allocator
->getStats(&Str
);
269 struct MapAllocatorCacheTest
: public Test
{
270 static constexpr scudo::u32 UnmappedMarker
= 0xDEADBEEF;
272 static void testUnmapCallback(scudo::MemMapT
&MemMap
) {
273 scudo::u32
*Ptr
= reinterpret_cast<scudo::u32
*>(MemMap
.getBase());
274 *Ptr
= UnmappedMarker
;
277 using SecondaryConfig
= scudo::SecondaryConfig
<TestConfig
>;
278 using CacheConfig
= SecondaryConfig::CacheConfig
;
279 using CacheT
= scudo::MapAllocatorCache
<CacheConfig
, testUnmapCallback
>;
281 std::unique_ptr
<CacheT
> Cache
= std::make_unique
<CacheT
>();
283 const scudo::uptr PageSize
= scudo::getPageSizeCached();
284 // The current test allocation size is set to the maximum
286 static constexpr scudo::uptr TestAllocSize
=
287 CacheConfig::getDefaultMaxEntrySize();
289 scudo::Options Options
= getOptionsForConfig
<SecondaryConfig
>();
291 void SetUp() override
{ Cache
->init(/*ReleaseToOsInterval=*/-1); }
293 void TearDown() override
{ Cache
->unmapTestOnly(); }
295 scudo::MemMapT
allocate(scudo::uptr Size
) {
296 scudo::uptr MapSize
= scudo::roundUp(Size
, PageSize
);
297 scudo::ReservedMemoryT ReservedMemory
;
298 CHECK(ReservedMemory
.create(0U, MapSize
, nullptr, MAP_ALLOWNOMEM
));
300 scudo::MemMapT MemMap
= ReservedMemory
.dispatch(
301 ReservedMemory
.getBase(), ReservedMemory
.getCapacity());
302 MemMap
.remap(MemMap
.getBase(), MemMap
.getCapacity(), "scudo:test",
303 MAP_RESIZABLE
| MAP_ALLOWNOMEM
);
307 void fillCacheWithSameSizeBlocks(std::vector
<scudo::MemMapT
> &MemMaps
,
308 scudo::uptr NumEntries
, scudo::uptr Size
) {
309 for (scudo::uptr I
= 0; I
< NumEntries
; I
++) {
310 MemMaps
.emplace_back(allocate(Size
));
311 auto &MemMap
= MemMaps
[I
];
312 Cache
->store(Options
, MemMap
.getBase(), MemMap
.getCapacity(),
313 MemMap
.getBase(), MemMap
);
318 TEST_F(MapAllocatorCacheTest
, CacheOrder
) {
319 std::vector
<scudo::MemMapT
> MemMaps
;
320 Cache
->setOption(scudo::Option::MaxCacheEntriesCount
,
321 CacheConfig::getEntriesArraySize());
323 fillCacheWithSameSizeBlocks(MemMaps
, CacheConfig::getEntriesArraySize(),
326 // Retrieval order should be the inverse of insertion order
327 for (scudo::uptr I
= CacheConfig::getEntriesArraySize(); I
> 0; I
--) {
328 scudo::uptr EntryHeaderPos
;
329 scudo::CachedBlock Entry
=
330 Cache
->retrieve(0, TestAllocSize
, PageSize
, 0, EntryHeaderPos
);
331 EXPECT_EQ(Entry
.MemMap
.getBase(), MemMaps
[I
- 1].getBase());
335 for (auto &MemMap
: MemMaps
)
339 TEST_F(MapAllocatorCacheTest
, PartialChunkHeuristicRetrievalTest
) {
340 const scudo::uptr FragmentedPages
=
341 1 + scudo::CachedBlock::MaxReleasedCachePages
;
342 scudo::uptr EntryHeaderPos
;
343 scudo::CachedBlock Entry
;
344 scudo::MemMapT MemMap
= allocate(PageSize
+ FragmentedPages
* PageSize
);
345 Cache
->store(Options
, MemMap
.getBase(), MemMap
.getCapacity(),
346 MemMap
.getBase(), MemMap
);
348 // FragmentedPages > MaxAllowedFragmentedPages so PageSize
349 // cannot be retrieved from the cache
350 Entry
= Cache
->retrieve(/*MaxAllowedFragmentedPages=*/0, PageSize
, PageSize
,
352 EXPECT_FALSE(Entry
.isValid());
354 // FragmentedPages == MaxAllowedFragmentedPages so PageSize
355 // can be retrieved from the cache
357 Cache
->retrieve(FragmentedPages
, PageSize
, PageSize
, 0, EntryHeaderPos
);
358 EXPECT_TRUE(Entry
.isValid());
363 TEST_F(MapAllocatorCacheTest
, MemoryLeakTest
) {
364 std::vector
<scudo::MemMapT
> MemMaps
;
365 // Fill the cache above MaxEntriesCount to force an eviction
366 // The first cache entry should be evicted (because it is the oldest)
367 // due to the maximum number of entries being reached
368 fillCacheWithSameSizeBlocks(
369 MemMaps
, CacheConfig::getDefaultMaxEntriesCount() + 1, TestAllocSize
);
371 std::vector
<scudo::CachedBlock
> RetrievedEntries
;
373 // First MemMap should be evicted from cache because it was the first
374 // inserted into the cache
375 for (scudo::uptr I
= CacheConfig::getDefaultMaxEntriesCount(); I
> 0; I
--) {
376 scudo::uptr EntryHeaderPos
;
377 RetrievedEntries
.push_back(
378 Cache
->retrieve(0, TestAllocSize
, PageSize
, 0, EntryHeaderPos
));
379 EXPECT_EQ(MemMaps
[I
].getBase(), RetrievedEntries
.back().MemMap
.getBase());
382 // Evicted entry should be marked due to unmap callback
383 EXPECT_EQ(*reinterpret_cast<scudo::u32
*>(MemMaps
[0].getBase()),
387 for (auto &MemMap
: MemMaps
)