1 //===-- primary_test.cpp ----------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "tests/scudo_unit_test.h"
11 #include "allocator_config.h"
12 #include "condition_variable.h"
13 #include "primary32.h"
14 #include "primary64.h"
15 #include "size_class_map.h"
19 #include <condition_variable>
26 // Note that with small enough regions, the SizeClassAllocator64 also works on
27 // 32-bit architectures. It's not something we want to encourage, but we still
28 // should ensure the tests pass.
30 template <typename SizeClassMapT
> struct TestConfig1
{
31 static const bool MaySupportMemoryTagging
= false;
34 using SizeClassMap
= SizeClassMapT
;
35 static const scudo::uptr RegionSizeLog
= 18U;
36 static const scudo::uptr GroupSizeLog
= 18U;
37 static const scudo::s32 MinReleaseToOsIntervalMs
= INT32_MIN
;
38 static const scudo::s32 MaxReleaseToOsIntervalMs
= INT32_MAX
;
39 typedef scudo::uptr CompactPtrT
;
40 static const scudo::uptr CompactPtrScale
= 0;
41 static const bool EnableRandomOffset
= true;
42 static const scudo::uptr MapSizeIncrement
= 1UL << 18;
46 template <typename SizeClassMapT
> struct TestConfig2
{
47 static const bool MaySupportMemoryTagging
= false;
50 using SizeClassMap
= SizeClassMapT
;
52 // Unable to allocate greater size on QEMU-user.
53 static const scudo::uptr RegionSizeLog
= 23U;
55 static const scudo::uptr RegionSizeLog
= 24U;
57 static const scudo::uptr GroupSizeLog
= 20U;
58 static const scudo::s32 MinReleaseToOsIntervalMs
= INT32_MIN
;
59 static const scudo::s32 MaxReleaseToOsIntervalMs
= INT32_MAX
;
60 typedef scudo::uptr CompactPtrT
;
61 static const scudo::uptr CompactPtrScale
= 0;
62 static const bool EnableRandomOffset
= true;
63 static const scudo::uptr MapSizeIncrement
= 1UL << 18;
67 template <typename SizeClassMapT
> struct TestConfig3
{
68 static const bool MaySupportMemoryTagging
= true;
71 using SizeClassMap
= SizeClassMapT
;
73 // Unable to allocate greater size on QEMU-user.
74 static const scudo::uptr RegionSizeLog
= 23U;
76 static const scudo::uptr RegionSizeLog
= 24U;
78 static const scudo::uptr GroupSizeLog
= 20U;
79 static const scudo::s32 MinReleaseToOsIntervalMs
= INT32_MIN
;
80 static const scudo::s32 MaxReleaseToOsIntervalMs
= INT32_MAX
;
81 typedef scudo::uptr CompactPtrT
;
82 static const scudo::uptr CompactPtrScale
= 0;
83 static const bool EnableRandomOffset
= true;
84 static const scudo::uptr MapSizeIncrement
= 1UL << 18;
88 template <typename SizeClassMapT
> struct TestConfig4
{
89 static const bool MaySupportMemoryTagging
= true;
92 using SizeClassMap
= SizeClassMapT
;
94 // Unable to allocate greater size on QEMU-user.
95 static const scudo::uptr RegionSizeLog
= 23U;
97 static const scudo::uptr RegionSizeLog
= 24U;
99 static const scudo::s32 MinReleaseToOsIntervalMs
= INT32_MIN
;
100 static const scudo::s32 MaxReleaseToOsIntervalMs
= INT32_MAX
;
101 static const scudo::uptr CompactPtrScale
= 3U;
102 static const scudo::uptr GroupSizeLog
= 20U;
103 typedef scudo::u32 CompactPtrT
;
104 static const bool EnableRandomOffset
= true;
105 static const scudo::uptr MapSizeIncrement
= 1UL << 18;
109 // This is the only test config that enables the condition variable.
110 template <typename SizeClassMapT
> struct TestConfig5
{
111 static const bool MaySupportMemoryTagging
= true;
114 using SizeClassMap
= SizeClassMapT
;
115 #if defined(__mips__)
116 // Unable to allocate greater size on QEMU-user.
117 static const scudo::uptr RegionSizeLog
= 23U;
119 static const scudo::uptr RegionSizeLog
= 24U;
121 static const scudo::s32 MinReleaseToOsIntervalMs
= INT32_MIN
;
122 static const scudo::s32 MaxReleaseToOsIntervalMs
= INT32_MAX
;
123 static const scudo::uptr CompactPtrScale
= SCUDO_MIN_ALIGNMENT_LOG
;
124 static const scudo::uptr GroupSizeLog
= 18U;
125 typedef scudo::u32 CompactPtrT
;
126 static const bool EnableRandomOffset
= true;
127 static const scudo::uptr MapSizeIncrement
= 1UL << 18;
128 static const bool UseConditionVariable
= true;
130 using ConditionVariableT
= scudo::ConditionVariableLinux
;
132 using ConditionVariableT
= scudo::ConditionVariableDummy
;
137 template <template <typename
> class BaseConfig
, typename SizeClassMapT
>
138 struct Config
: public BaseConfig
<SizeClassMapT
> {};
140 template <template <typename
> class BaseConfig
, typename SizeClassMapT
>
141 struct SizeClassAllocator
142 : public scudo::SizeClassAllocator64
<Config
<BaseConfig
, SizeClassMapT
>> {};
143 template <typename SizeClassMapT
>
144 struct SizeClassAllocator
<TestConfig1
, SizeClassMapT
>
145 : public scudo::SizeClassAllocator32
<Config
<TestConfig1
, SizeClassMapT
>> {};
147 template <template <typename
> class BaseConfig
, typename SizeClassMapT
>
148 struct TestAllocator
: public SizeClassAllocator
<BaseConfig
, SizeClassMapT
> {
150 this->verifyAllBlocksAreReleasedTestOnly();
151 this->unmapTestOnly();
154 void *operator new(size_t size
) {
156 EXPECT_EQ(0, posix_memalign(&p
, alignof(TestAllocator
), size
));
160 void operator delete(void *ptr
) { free(ptr
); }
163 template <template <typename
> class BaseConfig
>
164 struct ScudoPrimaryTest
: public Test
{};
167 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
168 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2) \
169 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3)
171 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
172 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig1) \
173 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig2) \
174 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig3) \
175 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig4) \
176 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConfig5)
179 #define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
180 using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<TYPE>; \
181 TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<TYPE>::Run(); }
183 #define SCUDO_TYPED_TEST(FIXTURE, NAME) \
184 template <template <typename> class TypeParam> \
185 struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
188 SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
189 template <template <typename> class TypeParam> \
190 void FIXTURE##NAME<TypeParam>::Run()
192 SCUDO_TYPED_TEST(ScudoPrimaryTest
, BasicPrimary
) {
193 using Primary
= TestAllocator
<TypeParam
, scudo::DefaultSizeClassMap
>;
194 std::unique_ptr
<Primary
> Allocator(new Primary
);
195 Allocator
->init(/*ReleaseToOsInterval=*/-1);
196 typename
Primary::CacheT Cache
;
197 Cache
.init(nullptr, Allocator
.get());
198 const scudo::uptr NumberOfAllocations
= 32U;
199 for (scudo::uptr I
= 0; I
<= 16U; I
++) {
200 const scudo::uptr Size
= 1UL << I
;
201 if (!Primary::canAllocate(Size
))
203 const scudo::uptr ClassId
= Primary::SizeClassMap::getClassIdBySize(Size
);
204 void *Pointers
[NumberOfAllocations
];
205 for (scudo::uptr J
= 0; J
< NumberOfAllocations
; J
++) {
206 void *P
= Cache
.allocate(ClassId
);
207 memset(P
, 'B', Size
);
210 for (scudo::uptr J
= 0; J
< NumberOfAllocations
; J
++)
211 Cache
.deallocate(ClassId
, Pointers
[J
]);
213 Cache
.destroy(nullptr);
214 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);
215 scudo::ScopedString Str
;
216 Allocator
->getStats(&Str
);
220 struct SmallRegionsConfig
{
221 static const bool MaySupportMemoryTagging
= false;
224 using SizeClassMap
= scudo::DefaultSizeClassMap
;
225 static const scudo::uptr RegionSizeLog
= 21U;
226 static const scudo::s32 MinReleaseToOsIntervalMs
= INT32_MIN
;
227 static const scudo::s32 MaxReleaseToOsIntervalMs
= INT32_MAX
;
228 typedef scudo::uptr CompactPtrT
;
229 static const scudo::uptr CompactPtrScale
= 0;
230 static const bool EnableRandomOffset
= true;
231 static const scudo::uptr MapSizeIncrement
= 1UL << 18;
232 static const scudo::uptr GroupSizeLog
= 20U;
236 // The 64-bit SizeClassAllocator can be easily OOM'd with small region sizes.
237 // For the 32-bit one, it requires actually exhausting memory, so we skip it.
238 TEST(ScudoPrimaryTest
, Primary64OOM
) {
239 using Primary
= scudo::SizeClassAllocator64
<SmallRegionsConfig
>;
240 using TransferBatch
= Primary::TransferBatchT
;
242 Allocator
.init(/*ReleaseToOsInterval=*/-1);
243 typename
Primary::CacheT Cache
;
244 scudo::GlobalStats Stats
;
246 Cache
.init(&Stats
, &Allocator
);
247 bool AllocationFailed
= false;
248 std::vector
<TransferBatch
*> Batches
;
249 const scudo::uptr ClassId
= Primary::SizeClassMap::LargestClassId
;
250 const scudo::uptr Size
= Primary::getSizeByClassId(ClassId
);
251 typename
Primary::CacheT::CompactPtrT Blocks
[TransferBatch::MaxNumCached
];
253 for (scudo::uptr I
= 0; I
< 10000U; I
++) {
254 TransferBatch
*B
= Allocator
.popBatch(&Cache
, ClassId
);
256 AllocationFailed
= true;
259 for (scudo::u16 J
= 0; J
< B
->getCount(); J
++)
260 memset(Allocator
.decompactPtr(ClassId
, B
->get(J
)), 'B', Size
);
261 Batches
.push_back(B
);
263 while (!Batches
.empty()) {
264 TransferBatch
*B
= Batches
.back();
266 const scudo::u16 Count
= B
->getCount();
267 B
->moveToArray(Blocks
);
268 Allocator
.pushBlocks(&Cache
, ClassId
, Blocks
, Count
);
269 Cache
.deallocate(Primary::SizeClassMap::BatchClassId
, B
);
271 Cache
.destroy(nullptr);
272 Allocator
.releaseToOS(scudo::ReleaseToOS::Force
);
273 scudo::ScopedString Str
;
274 Allocator
.getStats(&Str
);
276 EXPECT_EQ(AllocationFailed
, true);
277 Allocator
.unmapTestOnly();
280 SCUDO_TYPED_TEST(ScudoPrimaryTest
, PrimaryIterate
) {
281 using Primary
= TestAllocator
<TypeParam
, scudo::DefaultSizeClassMap
>;
282 std::unique_ptr
<Primary
> Allocator(new Primary
);
283 Allocator
->init(/*ReleaseToOsInterval=*/-1);
284 typename
Primary::CacheT Cache
;
285 Cache
.init(nullptr, Allocator
.get());
286 std::vector
<std::pair
<scudo::uptr
, void *>> V
;
287 for (scudo::uptr I
= 0; I
< 64U; I
++) {
288 const scudo::uptr Size
=
289 static_cast<scudo::uptr
>(std::rand()) % Primary::SizeClassMap::MaxSize
;
290 const scudo::uptr ClassId
= Primary::SizeClassMap::getClassIdBySize(Size
);
291 void *P
= Cache
.allocate(ClassId
);
292 V
.push_back(std::make_pair(ClassId
, P
));
294 scudo::uptr Found
= 0;
295 auto Lambda
= [&V
, &Found
](scudo::uptr Block
) {
296 for (const auto &Pair
: V
) {
297 if (Pair
.second
== reinterpret_cast<void *>(Block
))
301 Allocator
->disable();
302 Allocator
->iterateOverBlocks(Lambda
);
304 EXPECT_EQ(Found
, V
.size());
306 auto Pair
= V
.back();
307 Cache
.deallocate(Pair
.first
, Pair
.second
);
310 Cache
.destroy(nullptr);
311 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);
312 scudo::ScopedString Str
;
313 Allocator
->getStats(&Str
);
317 SCUDO_TYPED_TEST(ScudoPrimaryTest
, PrimaryThreaded
) {
318 using Primary
= TestAllocator
<TypeParam
, scudo::Config::Primary::SizeClassMap
>;
319 std::unique_ptr
<Primary
> Allocator(new Primary
);
320 Allocator
->init(/*ReleaseToOsInterval=*/-1);
322 std::condition_variable Cv
;
324 std::thread Threads
[32];
325 for (scudo::uptr I
= 0; I
< ARRAY_SIZE(Threads
); I
++) {
326 Threads
[I
] = std::thread([&]() {
327 static thread_local typename
Primary::CacheT Cache
;
328 Cache
.init(nullptr, Allocator
.get());
329 std::vector
<std::pair
<scudo::uptr
, void *>> V
;
331 std::unique_lock
<std::mutex
> Lock(Mutex
);
335 for (scudo::uptr I
= 0; I
< 256U; I
++) {
336 const scudo::uptr Size
= static_cast<scudo::uptr
>(std::rand()) %
337 Primary::SizeClassMap::MaxSize
/ 4;
338 const scudo::uptr ClassId
=
339 Primary::SizeClassMap::getClassIdBySize(Size
);
340 void *P
= Cache
.allocate(ClassId
);
342 V
.push_back(std::make_pair(ClassId
, P
));
345 // Try to interleave pushBlocks(), popBatch() and releaseToOS().
346 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);
349 auto Pair
= V
.back();
350 Cache
.deallocate(Pair
.first
, Pair
.second
);
352 // This increases the chance of having non-full TransferBatches and it
353 // will jump into the code path of merging TransferBatches.
354 if (std::rand() % 8 == 0)
357 Cache
.destroy(nullptr);
361 std::unique_lock
<std::mutex
> Lock(Mutex
);
365 for (auto &T
: Threads
)
367 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);
368 scudo::ScopedString Str
;
369 Allocator
->getStats(&Str
);
370 Allocator
->getFragmentationInfo(&Str
);
374 // Through a simple allocation that spans two pages, verify that releaseToOS
375 // actually releases some bytes (at least one page worth). This is a regression
376 // test for an error in how the release criteria were computed.
377 SCUDO_TYPED_TEST(ScudoPrimaryTest
, ReleaseToOS
) {
378 using Primary
= TestAllocator
<TypeParam
, scudo::DefaultSizeClassMap
>;
379 std::unique_ptr
<Primary
> Allocator(new Primary
);
380 Allocator
->init(/*ReleaseToOsInterval=*/-1);
381 typename
Primary::CacheT Cache
;
382 Cache
.init(nullptr, Allocator
.get());
383 const scudo::uptr Size
= scudo::getPageSizeCached() * 2;
384 EXPECT_TRUE(Primary::canAllocate(Size
));
385 const scudo::uptr ClassId
= Primary::SizeClassMap::getClassIdBySize(Size
);
386 void *P
= Cache
.allocate(ClassId
);
387 EXPECT_NE(P
, nullptr);
388 Cache
.deallocate(ClassId
, P
);
389 Cache
.destroy(nullptr);
390 EXPECT_GT(Allocator
->releaseToOS(scudo::ReleaseToOS::ForceAll
), 0U);
393 SCUDO_TYPED_TEST(ScudoPrimaryTest
, MemoryGroup
) {
394 using Primary
= TestAllocator
<TypeParam
, scudo::DefaultSizeClassMap
>;
395 std::unique_ptr
<Primary
> Allocator(new Primary
);
396 Allocator
->init(/*ReleaseToOsInterval=*/-1);
397 typename
Primary::CacheT Cache
;
398 Cache
.init(nullptr, Allocator
.get());
399 const scudo::uptr Size
= 32U;
400 const scudo::uptr ClassId
= Primary::SizeClassMap::getClassIdBySize(Size
);
402 // We will allocate 4 times the group size memory and release all of them. We
403 // expect the free blocks will be classified with groups. Then we will
404 // allocate the same amount of memory as group size and expect the blocks will
405 // have the max address difference smaller or equal to 2 times the group size.
406 // Note that it isn't necessary to be in the range of single group size
407 // because the way we get the group id is doing compact pointer shifting.
408 // According to configuration, the compact pointer may not align to group
409 // size. As a result, the blocks can cross two groups at most.
410 const scudo::uptr GroupSizeMem
= (1ULL << Primary::GroupSizeLog
);
411 const scudo::uptr PeakAllocationMem
= 4 * GroupSizeMem
;
412 const scudo::uptr PeakNumberOfAllocations
= PeakAllocationMem
/ Size
;
413 const scudo::uptr FinalNumberOfAllocations
= GroupSizeMem
/ Size
;
414 std::vector
<scudo::uptr
> Blocks
;
417 for (scudo::uptr I
= 0; I
< PeakNumberOfAllocations
; ++I
)
418 Blocks
.push_back(reinterpret_cast<scudo::uptr
>(Cache
.allocate(ClassId
)));
420 std::shuffle(Blocks
.begin(), Blocks
.end(), R
);
422 // Release all the allocated blocks, including those held by local cache.
423 while (!Blocks
.empty()) {
424 Cache
.deallocate(ClassId
, reinterpret_cast<void *>(Blocks
.back()));
429 for (scudo::uptr I
= 0; I
< FinalNumberOfAllocations
; ++I
)
430 Blocks
.push_back(reinterpret_cast<scudo::uptr
>(Cache
.allocate(ClassId
)));
432 EXPECT_LE(*std::max_element(Blocks
.begin(), Blocks
.end()) -
433 *std::min_element(Blocks
.begin(), Blocks
.end()),
436 while (!Blocks
.empty()) {
437 Cache
.deallocate(ClassId
, reinterpret_cast<void *>(Blocks
.back()));