1 //===-- secondary_test.cpp --------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 #include "tests/scudo_unit_test.h"
12 #include "allocator_config.h"
13 #include "secondary.h"
16 #include <condition_variable>
24 template <typename Config
> static scudo::Options
getOptionsForConfig() {
25 if (!Config::MaySupportMemoryTagging
|| !scudo::archSupportsMemoryTagging() ||
26 !scudo::systemSupportsMemoryTagging())
28 scudo::AtomicOptions AO
;
29 AO
.set(scudo::OptionBit::UseMemoryTagging
);
33 template <typename Config
> static void testSecondaryBasic(void) {
34 using SecondaryT
= scudo::MapAllocator
<Config
>;
35 scudo::Options Options
= getOptionsForConfig
<Config
>();
39 std::unique_ptr
<SecondaryT
> L(new SecondaryT
);
41 const scudo::uptr Size
= 1U << 16;
42 void *P
= L
->allocate(Options
, Size
);
43 EXPECT_NE(P
, nullptr);
45 EXPECT_GE(SecondaryT::getBlockSize(P
), Size
);
46 L
->deallocate(Options
, P
);
48 // If the Secondary can't cache that pointer, it will be unmapped.
49 if (!L
->canCache(Size
)) {
52 // Repeat few time to avoid missing crash if it's mmaped by unrelated
54 for (int i
= 0; i
< 10; ++i
) {
55 P
= L
->allocate(Options
, Size
);
56 L
->deallocate(Options
, P
);
63 const scudo::uptr Align
= 1U << 16;
64 P
= L
->allocate(Options
, Size
+ Align
, Align
);
65 EXPECT_NE(P
, nullptr);
66 void *AlignedP
= reinterpret_cast<void *>(
67 scudo::roundUp(reinterpret_cast<scudo::uptr
>(P
), Align
));
68 memset(AlignedP
, 'A', Size
);
69 L
->deallocate(Options
, P
);
71 std::vector
<void *> V
;
72 for (scudo::uptr I
= 0; I
< 32U; I
++)
73 V
.push_back(L
->allocate(Options
, Size
));
74 std::shuffle(V
.begin(), V
.end(), std::mt19937(std::random_device()()));
76 L
->deallocate(Options
, V
.back());
79 scudo::ScopedString Str
;
85 struct NoCacheConfig
{
86 static const bool MaySupportMemoryTagging
= false;
88 template <typename Config
>
89 using CacheT
= scudo::MapAllocatorNoCache
<Config
>;
94 static const bool MaySupportMemoryTagging
= false;
97 static const scudo::u32 EntriesArraySize
= 128U;
98 static const scudo::u32 QuarantineSize
= 0U;
99 static const scudo::u32 DefaultMaxEntriesCount
= 64U;
100 static const scudo::uptr DefaultMaxEntrySize
= 1UL << 20;
101 static const scudo::s32 MinReleaseToOsIntervalMs
= INT32_MIN
;
102 static const scudo::s32 MaxReleaseToOsIntervalMs
= INT32_MAX
;
105 template <typename Config
> using CacheT
= scudo::MapAllocatorCache
<Config
>;
109 TEST(ScudoSecondaryTest
, SecondaryBasic
) {
110 testSecondaryBasic
<NoCacheConfig
>();
111 testSecondaryBasic
<scudo::DefaultConfig
>();
112 testSecondaryBasic
<TestConfig
>();
115 struct MapAllocatorTest
: public Test
{
116 using Config
= scudo::DefaultConfig
;
117 using LargeAllocator
= scudo::MapAllocator
<Config
>;
119 void SetUp() override
{ Allocator
->init(nullptr); }
121 void TearDown() override
{ Allocator
->unmapTestOnly(); }
123 std::unique_ptr
<LargeAllocator
> Allocator
=
124 std::make_unique
<LargeAllocator
>();
125 scudo::Options Options
= getOptionsForConfig
<Config
>();
128 // This exercises a variety of combinations of size and alignment for the
129 // MapAllocator. The size computation done here mimic the ones done by the
130 // combined allocator.
131 TEST_F(MapAllocatorTest
, SecondaryCombinations
) {
132 constexpr scudo::uptr MinAlign
= FIRST_32_SECOND_64(8, 16);
133 constexpr scudo::uptr HeaderSize
= scudo::roundUp(8, MinAlign
);
134 for (scudo::uptr SizeLog
= 0; SizeLog
<= 20; SizeLog
++) {
135 for (scudo::uptr AlignLog
= FIRST_32_SECOND_64(3, 4); AlignLog
<= 16;
137 const scudo::uptr Align
= 1U << AlignLog
;
138 for (scudo::sptr Delta
= -128; Delta
<= 128; Delta
+= 8) {
139 if ((1LL << SizeLog
) + Delta
<= 0)
141 const scudo::uptr UserSize
= scudo::roundUp(
142 static_cast<scudo::uptr
>((1LL << SizeLog
) + Delta
), MinAlign
);
143 const scudo::uptr Size
=
144 HeaderSize
+ UserSize
+ (Align
> MinAlign
? Align
- HeaderSize
: 0);
145 void *P
= Allocator
->allocate(Options
, Size
, Align
);
146 EXPECT_NE(P
, nullptr);
147 void *AlignedP
= reinterpret_cast<void *>(
148 scudo::roundUp(reinterpret_cast<scudo::uptr
>(P
), Align
));
149 memset(AlignedP
, 0xff, UserSize
);
150 Allocator
->deallocate(Options
, P
);
154 scudo::ScopedString Str
;
155 Allocator
->getStats(&Str
);
159 TEST_F(MapAllocatorTest
, SecondaryIterate
) {
160 std::vector
<void *> V
;
161 const scudo::uptr PageSize
= scudo::getPageSizeCached();
162 for (scudo::uptr I
= 0; I
< 32U; I
++)
163 V
.push_back(Allocator
->allocate(
164 Options
, (static_cast<scudo::uptr
>(std::rand()) % 16U) * PageSize
));
165 auto Lambda
= [&V
](scudo::uptr Block
) {
166 EXPECT_NE(std::find(V
.begin(), V
.end(), reinterpret_cast<void *>(Block
)),
169 Allocator
->disable();
170 Allocator
->iterateOverBlocks(Lambda
);
173 Allocator
->deallocate(Options
, V
.back());
176 scudo::ScopedString Str
;
177 Allocator
->getStats(&Str
);
181 TEST_F(MapAllocatorTest
, SecondaryOptions
) {
182 // Attempt to set a maximum number of entries higher than the array size.
184 Allocator
->setOption(scudo::Option::MaxCacheEntriesCount
, 4096U));
185 // A negative number will be cast to a scudo::u32, and fail.
186 EXPECT_FALSE(Allocator
->setOption(scudo::Option::MaxCacheEntriesCount
, -1));
187 if (Allocator
->canCache(0U)) {
188 // Various valid combinations.
189 EXPECT_TRUE(Allocator
->setOption(scudo::Option::MaxCacheEntriesCount
, 4U));
191 Allocator
->setOption(scudo::Option::MaxCacheEntrySize
, 1UL << 20));
192 EXPECT_TRUE(Allocator
->canCache(1UL << 18));
194 Allocator
->setOption(scudo::Option::MaxCacheEntrySize
, 1UL << 17));
195 EXPECT_FALSE(Allocator
->canCache(1UL << 18));
196 EXPECT_TRUE(Allocator
->canCache(1UL << 16));
197 EXPECT_TRUE(Allocator
->setOption(scudo::Option::MaxCacheEntriesCount
, 0U));
198 EXPECT_FALSE(Allocator
->canCache(1UL << 16));
199 EXPECT_TRUE(Allocator
->setOption(scudo::Option::MaxCacheEntriesCount
, 4U));
201 Allocator
->setOption(scudo::Option::MaxCacheEntrySize
, 1UL << 20));
202 EXPECT_TRUE(Allocator
->canCache(1UL << 16));
206 struct MapAllocatorWithReleaseTest
: public MapAllocatorTest
{
207 void SetUp() override
{ Allocator
->init(nullptr, /*ReleaseToOsInterval=*/0); }
209 void performAllocations() {
210 std::vector
<void *> V
;
211 const scudo::uptr PageSize
= scudo::getPageSizeCached();
213 std::unique_lock
<std::mutex
> Lock(Mutex
);
217 for (scudo::uptr I
= 0; I
< 128U; I
++) {
218 // Deallocate 75% of the blocks.
219 const bool Deallocate
= (std::rand() & 3) != 0;
220 void *P
= Allocator
->allocate(
221 Options
, (static_cast<scudo::uptr
>(std::rand()) % 16U) * PageSize
);
223 Allocator
->deallocate(Options
, P
);
228 Allocator
->deallocate(Options
, V
.back());
234 std::condition_variable Cv
;
238 TEST_F(MapAllocatorWithReleaseTest
, SecondaryThreadsRace
) {
239 std::thread Threads
[16];
240 for (scudo::uptr I
= 0; I
< ARRAY_SIZE(Threads
); I
++)
242 std::thread(&MapAllocatorWithReleaseTest::performAllocations
, this);
244 std::unique_lock
<std::mutex
> Lock(Mutex
);
248 for (auto &T
: Threads
)
250 scudo::ScopedString Str
;
251 Allocator
->getStats(&Str
);