1 //===-- combined_test.cpp ---------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 #include "tests/scudo_unit_test.h"
12 #include "allocator_config.h"
15 #include "condition_variable.h"
17 #include "size_class_map.h"
20 #include <condition_variable>
28 static constexpr scudo::Chunk::Origin Origin
= scudo::Chunk::Origin::Malloc
;
29 static constexpr scudo::uptr MinAlignLog
= FIRST_32_SECOND_64(3U, 4U);
31 // Fuchsia complains that the function is not used.
32 UNUSED
static void disableDebuggerdMaybe() {
34 // Disable the debuggerd signal handler on Android, without this we can end
35 // up spending a significant amount of time creating tombstones.
36 signal(SIGSEGV
, SIG_DFL
);
40 template <class AllocatorT
>
41 bool isPrimaryAllocation(scudo::uptr Size
, scudo::uptr Alignment
) {
42 const scudo::uptr MinAlignment
= 1UL << SCUDO_MIN_ALIGNMENT_LOG
;
43 if (Alignment
< MinAlignment
)
44 Alignment
= MinAlignment
;
45 const scudo::uptr NeededSize
=
46 scudo::roundUp(Size
, MinAlignment
) +
47 ((Alignment
> MinAlignment
) ? Alignment
: scudo::Chunk::getHeaderSize());
48 return AllocatorT::PrimaryT::canAllocate(NeededSize
);
51 template <class AllocatorT
>
52 void checkMemoryTaggingMaybe(AllocatorT
*Allocator
, void *P
, scudo::uptr Size
,
53 scudo::uptr Alignment
) {
54 const scudo::uptr MinAlignment
= 1UL << SCUDO_MIN_ALIGNMENT_LOG
;
55 Size
= scudo::roundUp(Size
, MinAlignment
);
56 if (Allocator
->useMemoryTaggingTestOnly())
59 disableDebuggerdMaybe();
60 reinterpret_cast<char *>(P
)[-1] = 'A';
63 if (isPrimaryAllocation
<AllocatorT
>(Size
, Alignment
)
64 ? Allocator
->useMemoryTaggingTestOnly()
65 : Alignment
== MinAlignment
) {
68 disableDebuggerdMaybe();
69 reinterpret_cast<char *>(P
)[Size
] = 'A';
75 template <typename Config
> struct TestAllocator
: scudo::Allocator
<Config
> {
77 this->initThreadMaybe();
78 if (scudo::archSupportsMemoryTagging() &&
79 !scudo::systemDetectsMemoryTagFaultsTestOnly())
80 this->disableMemoryTagging();
82 ~TestAllocator() { this->unmapTestOnly(); }
84 void *operator new(size_t size
);
85 void operator delete(void *ptr
);
88 constexpr size_t kMaxAlign
= std::max({
89 alignof(scudo::Allocator
<scudo::DefaultConfig
>),
90 #if SCUDO_CAN_USE_PRIMARY64
91 alignof(scudo::Allocator
<scudo::FuchsiaConfig
>),
93 alignof(scudo::Allocator
<scudo::AndroidConfig
>)
97 // The allocator is over 4MB large. Rather than creating an instance of this on
98 // the heap, keep it in a global storage to reduce fragmentation from having to
99 // mmap this at the start of every test.
100 struct TestAllocatorStorage
{
101 static constexpr size_t kMaxSize
= std::max({
102 sizeof(scudo::Allocator
<scudo::DefaultConfig
>),
103 #if SCUDO_CAN_USE_PRIMARY64
104 sizeof(scudo::Allocator
<scudo::FuchsiaConfig
>),
106 sizeof(scudo::Allocator
<scudo::AndroidConfig
>)
109 // To alleviate some problem, let's skip the thread safety analysis here.
110 static void *get(size_t size
) NO_THREAD_SAFETY_ANALYSIS
{
111 CHECK(size
<= kMaxSize
&&
112 "Allocation size doesn't fit in the allocator storage");
114 return AllocatorStorage
;
117 static void release(void *ptr
) NO_THREAD_SAFETY_ANALYSIS
{
120 ASSERT_EQ(ptr
, AllocatorStorage
);
123 static scudo::HybridMutex M
;
124 static uint8_t AllocatorStorage
[kMaxSize
];
126 scudo::HybridMutex
TestAllocatorStorage::M
;
127 alignas(kMaxAlign
) uint8_t TestAllocatorStorage::AllocatorStorage
[kMaxSize
];
129 struct TestAllocatorStorage
{
130 static void *get(size_t size
) NO_THREAD_SAFETY_ANALYSIS
{
132 EXPECT_EQ(0, posix_memalign(&p
, kMaxAlign
, size
));
135 static void release(void *ptr
) NO_THREAD_SAFETY_ANALYSIS
{ free(ptr
); }
139 template <typename Config
>
140 void *TestAllocator
<Config
>::operator new(size_t size
) {
141 return TestAllocatorStorage::get(size
);
144 template <typename Config
>
145 void TestAllocator
<Config
>::operator delete(void *ptr
) {
146 TestAllocatorStorage::release(ptr
);
149 template <class TypeParam
> struct ScudoCombinedTest
: public Test
{
150 ScudoCombinedTest() {
151 UseQuarantine
= std::is_same
<TypeParam
, scudo::AndroidConfig
>::value
;
152 Allocator
= std::make_unique
<AllocatorT
>();
154 ~ScudoCombinedTest() {
155 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);
156 UseQuarantine
= true;
161 void BasicTest(scudo::uptr SizeLog
);
163 using AllocatorT
= TestAllocator
<TypeParam
>;
164 std::unique_ptr
<AllocatorT
> Allocator
;
167 template <typename T
> using ScudoCombinedDeathTest
= ScudoCombinedTest
<T
>;
170 struct TestConditionVariableConfig
{
171 static const bool MaySupportMemoryTagging
= true;
174 scudo::TSDRegistrySharedT
<A
, 8U, 4U>; // Shared, max 8 TSDs.
177 using SizeClassMap
= scudo::AndroidSizeClassMap
;
178 #if SCUDO_CAN_USE_PRIMARY64
179 static const scudo::uptr RegionSizeLog
= 28U;
180 typedef scudo::u32 CompactPtrT
;
181 static const scudo::uptr CompactPtrScale
= SCUDO_MIN_ALIGNMENT_LOG
;
182 static const scudo::uptr GroupSizeLog
= 20U;
183 static const bool EnableRandomOffset
= true;
184 static const scudo::uptr MapSizeIncrement
= 1UL << 18;
186 static const scudo::uptr RegionSizeLog
= 18U;
187 static const scudo::uptr GroupSizeLog
= 18U;
188 typedef scudo::uptr CompactPtrT
;
190 static const scudo::s32 MinReleaseToOsIntervalMs
= 1000;
191 static const scudo::s32 MaxReleaseToOsIntervalMs
= 1000;
192 static const bool UseConditionVariable
= true;
194 using ConditionVariableT
= scudo::ConditionVariableLinux
;
196 using ConditionVariableT
= scudo::ConditionVariableDummy
;
199 #if SCUDO_CAN_USE_PRIMARY64
200 template <typename Config
>
201 using PrimaryT
= scudo::SizeClassAllocator64
<Config
>;
203 template <typename Config
>
204 using PrimaryT
= scudo::SizeClassAllocator32
<Config
>;
208 template <typename Config
>
209 using CacheT
= scudo::MapAllocatorNoCache
<Config
>;
211 template <typename Config
> using SecondaryT
= scudo::MapAllocator
<Config
>;
216 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
217 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig)
219 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
220 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig) \
221 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig) \
222 SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig)
225 #define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \
226 using FIXTURE##NAME##_##TYPE = FIXTURE##NAME<scudo::TYPE>; \
227 TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<scudo::TYPE>::Run(); }
229 #define SCUDO_TYPED_TEST(FIXTURE, NAME) \
230 template <class TypeParam> \
231 struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
234 SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
235 template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
237 SCUDO_TYPED_TEST(ScudoCombinedTest
, IsOwned
) {
238 auto *Allocator
= this->Allocator
.get();
239 static scudo::u8 StaticBuffer
[scudo::Chunk::getHeaderSize() + 1];
241 Allocator
->isOwned(&StaticBuffer
[scudo::Chunk::getHeaderSize()]));
243 scudo::u8 StackBuffer
[scudo::Chunk::getHeaderSize() + 1];
244 for (scudo::uptr I
= 0; I
< sizeof(StackBuffer
); I
++)
245 StackBuffer
[I
] = 0x42U
;
246 EXPECT_FALSE(Allocator
->isOwned(&StackBuffer
[scudo::Chunk::getHeaderSize()]));
247 for (scudo::uptr I
= 0; I
< sizeof(StackBuffer
); I
++)
248 EXPECT_EQ(StackBuffer
[I
], 0x42U
);
251 template <class Config
>
252 void ScudoCombinedTest
<Config
>::BasicTest(scudo::uptr SizeLog
) {
253 auto *Allocator
= this->Allocator
.get();
255 // This allocates and deallocates a bunch of chunks, with a wide range of
256 // sizes and alignments, with a focus on sizes that could trigger weird
257 // behaviors (plus or minus a small delta of a power of two for example).
258 for (scudo::uptr AlignLog
= MinAlignLog
; AlignLog
<= 16U; AlignLog
++) {
259 const scudo::uptr Align
= 1U << AlignLog
;
260 for (scudo::sptr Delta
= -32; Delta
<= 32; Delta
++) {
261 if ((1LL << SizeLog
) + Delta
< 0)
263 const scudo::uptr Size
=
264 static_cast<scudo::uptr
>((1LL << SizeLog
) + Delta
);
265 void *P
= Allocator
->allocate(Size
, Origin
, Align
);
266 EXPECT_NE(P
, nullptr);
267 EXPECT_TRUE(Allocator
->isOwned(P
));
268 EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr
>(P
), Align
));
269 EXPECT_LE(Size
, Allocator
->getUsableSize(P
));
270 memset(P
, 0xaa, Size
);
271 checkMemoryTaggingMaybe(Allocator
, P
, Size
, Align
);
272 Allocator
->deallocate(P
, Origin
, Size
);
276 Allocator
->printStats();
277 Allocator
->printFragmentationInfo();
280 #define SCUDO_MAKE_BASIC_TEST(SizeLog) \
281 SCUDO_TYPED_TEST(ScudoCombinedDeathTest, BasicCombined##SizeLog) { \
282 this->BasicTest(SizeLog); \
285 SCUDO_MAKE_BASIC_TEST(0)
286 SCUDO_MAKE_BASIC_TEST(1)
287 SCUDO_MAKE_BASIC_TEST(2)
288 SCUDO_MAKE_BASIC_TEST(3)
289 SCUDO_MAKE_BASIC_TEST(4)
290 SCUDO_MAKE_BASIC_TEST(5)
291 SCUDO_MAKE_BASIC_TEST(6)
292 SCUDO_MAKE_BASIC_TEST(7)
293 SCUDO_MAKE_BASIC_TEST(8)
294 SCUDO_MAKE_BASIC_TEST(9)
295 SCUDO_MAKE_BASIC_TEST(10)
296 SCUDO_MAKE_BASIC_TEST(11)
297 SCUDO_MAKE_BASIC_TEST(12)
298 SCUDO_MAKE_BASIC_TEST(13)
299 SCUDO_MAKE_BASIC_TEST(14)
300 SCUDO_MAKE_BASIC_TEST(15)
301 SCUDO_MAKE_BASIC_TEST(16)
302 SCUDO_MAKE_BASIC_TEST(17)
303 SCUDO_MAKE_BASIC_TEST(18)
304 SCUDO_MAKE_BASIC_TEST(19)
305 SCUDO_MAKE_BASIC_TEST(20)
307 SCUDO_TYPED_TEST(ScudoCombinedTest
, ZeroContents
) {
308 auto *Allocator
= this->Allocator
.get();
310 // Ensure that specifying ZeroContents returns a zero'd out block.
311 for (scudo::uptr SizeLog
= 0U; SizeLog
<= 20U; SizeLog
++) {
312 for (scudo::uptr Delta
= 0U; Delta
<= 4U; Delta
++) {
313 const scudo::uptr Size
= (1U << SizeLog
) + Delta
* 128U;
314 void *P
= Allocator
->allocate(Size
, Origin
, 1U << MinAlignLog
, true);
315 EXPECT_NE(P
, nullptr);
316 for (scudo::uptr I
= 0; I
< Size
; I
++)
317 ASSERT_EQ((reinterpret_cast<char *>(P
))[I
], '\0');
318 memset(P
, 0xaa, Size
);
319 Allocator
->deallocate(P
, Origin
, Size
);
324 SCUDO_TYPED_TEST(ScudoCombinedTest
, ZeroFill
) {
325 auto *Allocator
= this->Allocator
.get();
327 // Ensure that specifying ZeroFill returns a zero'd out block.
328 Allocator
->setFillContents(scudo::ZeroFill
);
329 for (scudo::uptr SizeLog
= 0U; SizeLog
<= 20U; SizeLog
++) {
330 for (scudo::uptr Delta
= 0U; Delta
<= 4U; Delta
++) {
331 const scudo::uptr Size
= (1U << SizeLog
) + Delta
* 128U;
332 void *P
= Allocator
->allocate(Size
, Origin
, 1U << MinAlignLog
, false);
333 EXPECT_NE(P
, nullptr);
334 for (scudo::uptr I
= 0; I
< Size
; I
++)
335 ASSERT_EQ((reinterpret_cast<char *>(P
))[I
], '\0');
336 memset(P
, 0xaa, Size
);
337 Allocator
->deallocate(P
, Origin
, Size
);
342 SCUDO_TYPED_TEST(ScudoCombinedTest
, PatternOrZeroFill
) {
343 auto *Allocator
= this->Allocator
.get();
345 // Ensure that specifying PatternOrZeroFill returns a pattern or zero filled
346 // block. The primary allocator only produces pattern filled blocks if MTE
347 // is disabled, so we only require pattern filled blocks in that case.
348 Allocator
->setFillContents(scudo::PatternOrZeroFill
);
349 for (scudo::uptr SizeLog
= 0U; SizeLog
<= 20U; SizeLog
++) {
350 for (scudo::uptr Delta
= 0U; Delta
<= 4U; Delta
++) {
351 const scudo::uptr Size
= (1U << SizeLog
) + Delta
* 128U;
352 void *P
= Allocator
->allocate(Size
, Origin
, 1U << MinAlignLog
, false);
353 EXPECT_NE(P
, nullptr);
354 for (scudo::uptr I
= 0; I
< Size
; I
++) {
355 unsigned char V
= (reinterpret_cast<unsigned char *>(P
))[I
];
356 if (isPrimaryAllocation
<TestAllocator
<TypeParam
>>(Size
,
357 1U << MinAlignLog
) &&
358 !Allocator
->useMemoryTaggingTestOnly())
359 ASSERT_EQ(V
, scudo::PatternFillByte
);
361 ASSERT_TRUE(V
== scudo::PatternFillByte
|| V
== 0);
363 memset(P
, 0xaa, Size
);
364 Allocator
->deallocate(P
, Origin
, Size
);
369 SCUDO_TYPED_TEST(ScudoCombinedTest
, BlockReuse
) {
370 auto *Allocator
= this->Allocator
.get();
372 // Verify that a chunk will end up being reused, at some point.
373 const scudo::uptr NeedleSize
= 1024U;
374 void *NeedleP
= Allocator
->allocate(NeedleSize
, Origin
);
375 Allocator
->deallocate(NeedleP
, Origin
);
377 for (scudo::uptr I
= 0; I
< 1024U && !Found
; I
++) {
378 void *P
= Allocator
->allocate(NeedleSize
, Origin
);
379 if (Allocator
->getHeaderTaggedPointer(P
) ==
380 Allocator
->getHeaderTaggedPointer(NeedleP
))
382 Allocator
->deallocate(P
, Origin
);
387 SCUDO_TYPED_TEST(ScudoCombinedTest
, ReallocateLargeIncreasing
) {
388 auto *Allocator
= this->Allocator
.get();
390 // Reallocate a chunk all the way up to a secondary allocation, verifying that
391 // we preserve the data in the process.
392 scudo::uptr Size
= 16;
393 void *P
= Allocator
->allocate(Size
, Origin
);
394 const char Marker
= 'A';
395 memset(P
, Marker
, Size
);
396 while (Size
< TypeParam::Primary::SizeClassMap::MaxSize
* 4) {
397 void *NewP
= Allocator
->reallocate(P
, Size
* 2);
398 EXPECT_NE(NewP
, nullptr);
399 for (scudo::uptr J
= 0; J
< Size
; J
++)
400 EXPECT_EQ((reinterpret_cast<char *>(NewP
))[J
], Marker
);
401 memset(reinterpret_cast<char *>(NewP
) + Size
, Marker
, Size
);
405 Allocator
->deallocate(P
, Origin
);
408 SCUDO_TYPED_TEST(ScudoCombinedTest
, ReallocateLargeDecreasing
) {
409 auto *Allocator
= this->Allocator
.get();
411 // Reallocate a large chunk all the way down to a byte, verifying that we
412 // preserve the data in the process.
413 scudo::uptr Size
= TypeParam::Primary::SizeClassMap::MaxSize
* 2;
414 const scudo::uptr DataSize
= 2048U;
415 void *P
= Allocator
->allocate(Size
, Origin
);
416 const char Marker
= 'A';
417 memset(P
, Marker
, scudo::Min(Size
, DataSize
));
420 void *NewP
= Allocator
->reallocate(P
, Size
);
421 EXPECT_NE(NewP
, nullptr);
422 for (scudo::uptr J
= 0; J
< scudo::Min(Size
, DataSize
); J
++)
423 EXPECT_EQ((reinterpret_cast<char *>(NewP
))[J
], Marker
);
426 Allocator
->deallocate(P
, Origin
);
429 SCUDO_TYPED_TEST(ScudoCombinedDeathTest
, ReallocateSame
) {
430 auto *Allocator
= this->Allocator
.get();
432 // Check that reallocating a chunk to a slightly smaller or larger size
433 // returns the same chunk. This requires that all the sizes we iterate on use
434 // the same block size, but that should be the case for MaxSize - 64 with our
435 // default class size maps.
436 constexpr scudo::uptr ReallocSize
=
437 TypeParam::Primary::SizeClassMap::MaxSize
- 64;
438 void *P
= Allocator
->allocate(ReallocSize
, Origin
);
439 const char Marker
= 'A';
440 memset(P
, Marker
, ReallocSize
);
441 for (scudo::sptr Delta
= -32; Delta
< 32; Delta
+= 8) {
442 const scudo::uptr NewSize
=
443 static_cast<scudo::uptr
>(static_cast<scudo::sptr
>(ReallocSize
) + Delta
);
444 void *NewP
= Allocator
->reallocate(P
, NewSize
);
446 for (scudo::uptr I
= 0; I
< ReallocSize
- 32; I
++)
447 EXPECT_EQ((reinterpret_cast<char *>(NewP
))[I
], Marker
);
448 checkMemoryTaggingMaybe(Allocator
, NewP
, NewSize
, 0);
450 Allocator
->deallocate(P
, Origin
);
453 SCUDO_TYPED_TEST(ScudoCombinedTest
, IterateOverChunks
) {
454 auto *Allocator
= this->Allocator
.get();
455 // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
456 // they are the ones we allocated. This requires the allocator to not have any
457 // other allocated chunk at this point (eg: won't work with the Quarantine).
458 // FIXME: Make it work with UseQuarantine and tagging enabled. Internals of
459 // iterateOverChunks reads header by tagged and non-tagger pointers so one of
461 if (!UseQuarantine
) {
462 std::vector
<void *> V
;
463 for (scudo::uptr I
= 0; I
< 64U; I
++)
464 V
.push_back(Allocator
->allocate(
465 static_cast<scudo::uptr
>(std::rand()) %
466 (TypeParam::Primary::SizeClassMap::MaxSize
/ 2U),
468 Allocator
->disable();
469 Allocator
->iterateOverChunks(
470 0U, static_cast<scudo::uptr
>(SCUDO_MMAP_RANGE_SIZE
- 1),
471 [](uintptr_t Base
, UNUSED
size_t Size
, void *Arg
) {
472 std::vector
<void *> *V
= reinterpret_cast<std::vector
<void *> *>(Arg
);
473 void *P
= reinterpret_cast<void *>(Base
);
474 EXPECT_NE(std::find(V
->begin(), V
->end(), P
), V
->end());
476 reinterpret_cast<void *>(&V
));
479 Allocator
->deallocate(P
, Origin
);
483 SCUDO_TYPED_TEST(ScudoCombinedDeathTest
, UseAfterFree
) {
484 auto *Allocator
= this->Allocator
.get();
486 // Check that use-after-free is detected.
487 for (scudo::uptr SizeLog
= 0U; SizeLog
<= 20U; SizeLog
++) {
488 const scudo::uptr Size
= 1U << SizeLog
;
489 if (!Allocator
->useMemoryTaggingTestOnly())
493 disableDebuggerdMaybe();
494 void *P
= Allocator
->allocate(Size
, Origin
);
495 Allocator
->deallocate(P
, Origin
);
496 reinterpret_cast<char *>(P
)[0] = 'A';
501 disableDebuggerdMaybe();
502 void *P
= Allocator
->allocate(Size
, Origin
);
503 Allocator
->deallocate(P
, Origin
);
504 reinterpret_cast<char *>(P
)[Size
- 1] = 'A';
510 SCUDO_TYPED_TEST(ScudoCombinedDeathTest
, DisableMemoryTagging
) {
511 auto *Allocator
= this->Allocator
.get();
513 if (Allocator
->useMemoryTaggingTestOnly()) {
514 // Check that disabling memory tagging works correctly.
515 void *P
= Allocator
->allocate(2048, Origin
);
516 EXPECT_DEATH(reinterpret_cast<char *>(P
)[2048] = 'A', "");
517 scudo::ScopedDisableMemoryTagChecks NoTagChecks
;
518 Allocator
->disableMemoryTagging();
519 reinterpret_cast<char *>(P
)[2048] = 'A';
520 Allocator
->deallocate(P
, Origin
);
522 P
= Allocator
->allocate(2048, Origin
);
523 EXPECT_EQ(scudo::untagPointer(P
), P
);
524 reinterpret_cast<char *>(P
)[2048] = 'A';
525 Allocator
->deallocate(P
, Origin
);
527 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);
531 SCUDO_TYPED_TEST(ScudoCombinedTest
, Stats
) {
532 auto *Allocator
= this->Allocator
.get();
534 scudo::uptr BufferSize
= 8192;
535 std::vector
<char> Buffer(BufferSize
);
536 scudo::uptr ActualSize
= Allocator
->getStats(Buffer
.data(), BufferSize
);
537 while (ActualSize
> BufferSize
) {
538 BufferSize
= ActualSize
+ 1024;
539 Buffer
.resize(BufferSize
);
540 ActualSize
= Allocator
->getStats(Buffer
.data(), BufferSize
);
542 std::string
Stats(Buffer
.begin(), Buffer
.end());
543 // Basic checks on the contents of the statistics output, which also allows us
544 // to verify that we got it all.
545 EXPECT_NE(Stats
.find("Stats: SizeClassAllocator"), std::string::npos
);
546 EXPECT_NE(Stats
.find("Stats: MapAllocator"), std::string::npos
);
547 EXPECT_NE(Stats
.find("Stats: Quarantine"), std::string::npos
);
550 SCUDO_TYPED_TEST(ScudoCombinedTest
, CacheDrain
) NO_THREAD_SAFETY_ANALYSIS
{
551 auto *Allocator
= this->Allocator
.get();
553 std::vector
<void *> V
;
554 for (scudo::uptr I
= 0; I
< 64U; I
++)
555 V
.push_back(Allocator
->allocate(
556 static_cast<scudo::uptr
>(std::rand()) %
557 (TypeParam::Primary::SizeClassMap::MaxSize
/ 2U),
560 Allocator
->deallocate(P
, Origin
);
563 auto *TSD
= Allocator
->getTSDRegistry()->getTSDAndLock(&UnlockRequired
);
564 TSD
->assertLocked(/*BypassCheck=*/!UnlockRequired
);
565 EXPECT_TRUE(!TSD
->getCache().isEmpty());
566 TSD
->getCache().drain();
567 EXPECT_TRUE(TSD
->getCache().isEmpty());
572 SCUDO_TYPED_TEST(ScudoCombinedTest
, ForceCacheDrain
) NO_THREAD_SAFETY_ANALYSIS
{
573 auto *Allocator
= this->Allocator
.get();
575 std::vector
<void *> V
;
576 for (scudo::uptr I
= 0; I
< 64U; I
++)
577 V
.push_back(Allocator
->allocate(
578 static_cast<scudo::uptr
>(std::rand()) %
579 (TypeParam::Primary::SizeClassMap::MaxSize
/ 2U),
582 Allocator
->deallocate(P
, Origin
);
584 // `ForceAll` will also drain the caches.
585 Allocator
->releaseToOS(scudo::ReleaseToOS::ForceAll
);
588 auto *TSD
= Allocator
->getTSDRegistry()->getTSDAndLock(&UnlockRequired
);
589 TSD
->assertLocked(/*BypassCheck=*/!UnlockRequired
);
590 EXPECT_TRUE(TSD
->getCache().isEmpty());
591 EXPECT_EQ(TSD
->getQuarantineCache().getSize(), 0U);
592 EXPECT_TRUE(Allocator
->getQuarantine()->isEmpty());
597 SCUDO_TYPED_TEST(ScudoCombinedTest
, ThreadedCombined
) {
599 std::condition_variable Cv
;
601 auto *Allocator
= this->Allocator
.get();
602 std::thread Threads
[32];
603 for (scudo::uptr I
= 0; I
< ARRAY_SIZE(Threads
); I
++)
604 Threads
[I
] = std::thread([&]() {
606 std::unique_lock
<std::mutex
> Lock(Mutex
);
610 std::vector
<std::pair
<void *, scudo::uptr
>> V
;
611 for (scudo::uptr I
= 0; I
< 256U; I
++) {
612 const scudo::uptr Size
= static_cast<scudo::uptr
>(std::rand()) % 4096U;
613 void *P
= Allocator
->allocate(Size
, Origin
);
614 // A region could have ran out of memory, resulting in a null P.
616 V
.push_back(std::make_pair(P
, Size
));
619 // Try to interleave pushBlocks(), popBatch() and releaseToOS().
620 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);
623 auto Pair
= V
.back();
624 Allocator
->deallocate(Pair
.first
, Origin
, Pair
.second
);
629 std::unique_lock
<std::mutex
> Lock(Mutex
);
633 for (auto &T
: Threads
)
635 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);
638 // Test that multiple instantiations of the allocator have not messed up the
639 // process's signal handlers (GWP-ASan used to do this).
640 TEST(ScudoCombinedDeathTest
, SKIP_ON_FUCHSIA(testSEGV
)) {
641 const scudo::uptr Size
= 4 * scudo::getPageSizeCached();
642 scudo::ReservedMemoryT ReservedMemory
;
643 ASSERT_TRUE(ReservedMemory
.create(/*Addr=*/0U, Size
, "testSEGV"));
644 void *P
= reinterpret_cast<void *>(ReservedMemory
.getBase());
645 ASSERT_NE(P
, nullptr);
646 EXPECT_DEATH(memset(P
, 0xaa, Size
), "");
647 ReservedMemory
.release();
650 struct DeathSizeClassConfig
{
651 static const scudo::uptr NumBits
= 1;
652 static const scudo::uptr MinSizeLog
= 10;
653 static const scudo::uptr MidSizeLog
= 10;
654 static const scudo::uptr MaxSizeLog
= 13;
655 static const scudo::u16 MaxNumCachedHint
= 8;
656 static const scudo::uptr MaxBytesCachedLog
= 12;
657 static const scudo::uptr SizeDelta
= 0;
660 static const scudo::uptr DeathRegionSizeLog
= 21U;
662 static const bool MaySupportMemoryTagging
= false;
663 template <class A
> using TSDRegistryT
= scudo::TSDRegistrySharedT
<A
, 1U, 1U>;
666 // Tiny allocator, its Primary only serves chunks of four sizes.
667 using SizeClassMap
= scudo::FixedSizeClassMap
<DeathSizeClassConfig
>;
668 static const scudo::uptr RegionSizeLog
= DeathRegionSizeLog
;
669 static const scudo::s32 MinReleaseToOsIntervalMs
= INT32_MIN
;
670 static const scudo::s32 MaxReleaseToOsIntervalMs
= INT32_MAX
;
671 typedef scudo::uptr CompactPtrT
;
672 static const scudo::uptr CompactPtrScale
= 0;
673 static const bool EnableRandomOffset
= true;
674 static const scudo::uptr MapSizeIncrement
= 1UL << 18;
675 static const scudo::uptr GroupSizeLog
= 18;
677 template <typename Config
>
678 using PrimaryT
= scudo::SizeClassAllocator64
<Config
>;
681 template <typename Config
>
682 using CacheT
= scudo::MapAllocatorNoCache
<Config
>;
685 template <typename Config
> using SecondaryT
= scudo::MapAllocator
<Config
>;
688 TEST(ScudoCombinedDeathTest
, DeathCombined
) {
689 using AllocatorT
= TestAllocator
<DeathConfig
>;
690 auto Allocator
= std::unique_ptr
<AllocatorT
>(new AllocatorT());
692 const scudo::uptr Size
= 1000U;
693 void *P
= Allocator
->allocate(Size
, Origin
);
694 EXPECT_NE(P
, nullptr);
696 // Invalid sized deallocation.
697 EXPECT_DEATH(Allocator
->deallocate(P
, Origin
, Size
+ 8U), "");
699 // Misaligned pointer. Potentially unused if EXPECT_DEATH isn't available.
700 UNUSED
void *MisalignedP
=
701 reinterpret_cast<void *>(reinterpret_cast<scudo::uptr
>(P
) | 1U);
702 EXPECT_DEATH(Allocator
->deallocate(MisalignedP
, Origin
, Size
), "");
703 EXPECT_DEATH(Allocator
->reallocate(MisalignedP
, Size
* 2U), "");
705 // Header corruption.
707 reinterpret_cast<scudo::u64
*>(scudo::Chunk::getAtomicHeader(P
));
709 EXPECT_DEATH(Allocator
->deallocate(P
, Origin
, Size
), "");
711 EXPECT_DEATH(Allocator
->deallocate(P
, Origin
, Size
), "");
714 // Invalid chunk state.
715 Allocator
->deallocate(P
, Origin
, Size
);
716 EXPECT_DEATH(Allocator
->deallocate(P
, Origin
, Size
), "");
717 EXPECT_DEATH(Allocator
->reallocate(P
, Size
* 2U), "");
718 EXPECT_DEATH(Allocator
->getUsableSize(P
), "");
721 // Verify that when a region gets full, the allocator will still manage to
722 // fulfill the allocation through a larger size class.
723 TEST(ScudoCombinedTest
, FullRegion
) {
724 using AllocatorT
= TestAllocator
<DeathConfig
>;
725 auto Allocator
= std::unique_ptr
<AllocatorT
>(new AllocatorT());
727 std::vector
<void *> V
;
728 scudo::uptr FailedAllocationsCount
= 0;
729 for (scudo::uptr ClassId
= 1U;
730 ClassId
<= DeathConfig::Primary::SizeClassMap::LargestClassId
;
732 const scudo::uptr Size
=
733 DeathConfig::Primary::SizeClassMap::getSizeByClassId(ClassId
);
734 // Allocate enough to fill all of the regions above this one.
735 const scudo::uptr MaxNumberOfChunks
=
736 ((1U << DeathRegionSizeLog
) / Size
) *
737 (DeathConfig::Primary::SizeClassMap::LargestClassId
- ClassId
+ 1);
739 for (scudo::uptr I
= 0; I
<= MaxNumberOfChunks
; I
++) {
740 P
= Allocator
->allocate(Size
- 64U, Origin
);
742 FailedAllocationsCount
++;
747 Allocator
->deallocate(V
.back(), Origin
);
751 EXPECT_EQ(FailedAllocationsCount
, 0U);
754 // Ensure that releaseToOS can be called prior to any other allocator
755 // operation without issue.
756 SCUDO_TYPED_TEST(ScudoCombinedTest
, ReleaseToOS
) {
757 auto *Allocator
= this->Allocator
.get();
758 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);
761 SCUDO_TYPED_TEST(ScudoCombinedTest
, OddEven
) {
762 auto *Allocator
= this->Allocator
.get();
763 Allocator
->setOption(scudo::Option::MemtagTuning
, M_MEMTAG_TUNING_BUFFER_OVERFLOW
);
765 if (!Allocator
->useMemoryTaggingTestOnly())
768 auto CheckOddEven
= [](scudo::uptr P1
, scudo::uptr P2
) {
769 scudo::uptr Tag1
= scudo::extractTag(scudo::loadTag(P1
));
770 scudo::uptr Tag2
= scudo::extractTag(scudo::loadTag(P2
));
771 EXPECT_NE(Tag1
% 2, Tag2
% 2);
774 using SizeClassMap
= typename
TypeParam::Primary::SizeClassMap
;
775 for (scudo::uptr ClassId
= 1U; ClassId
<= SizeClassMap::LargestClassId
;
777 const scudo::uptr Size
= SizeClassMap::getSizeByClassId(ClassId
);
779 std::set
<scudo::uptr
> Ptrs
;
781 for (unsigned I
= 0; I
!= 65536; ++I
) {
782 scudo::uptr P
= scudo::untagPointer(reinterpret_cast<scudo::uptr
>(
783 Allocator
->allocate(Size
- scudo::Chunk::getHeaderSize(), Origin
)));
784 if (Ptrs
.count(P
- Size
)) {
786 CheckOddEven(P
, P
- Size
);
789 if (Ptrs
.count(P
+ Size
)) {
791 CheckOddEven(P
, P
+ Size
);
800 SCUDO_TYPED_TEST(ScudoCombinedTest
, DisableMemInit
) {
801 auto *Allocator
= this->Allocator
.get();
803 std::vector
<void *> Ptrs(65536);
805 Allocator
->setOption(scudo::Option::ThreadDisableMemInit
, 1);
807 constexpr scudo::uptr MinAlignLog
= FIRST_32_SECOND_64(3U, 4U);
809 // Test that if mem-init is disabled on a thread, calloc should still work as
810 // expected. This is tricky to ensure when MTE is enabled, so this test tries
811 // to exercise the relevant code on our MTE path.
812 for (scudo::uptr ClassId
= 1U; ClassId
<= 8; ClassId
++) {
813 using SizeClassMap
= typename
TypeParam::Primary::SizeClassMap
;
814 const scudo::uptr Size
=
815 SizeClassMap::getSizeByClassId(ClassId
) - scudo::Chunk::getHeaderSize();
818 for (unsigned I
= 0; I
!= Ptrs
.size(); ++I
) {
819 Ptrs
[I
] = Allocator
->allocate(Size
, Origin
);
820 memset(Ptrs
[I
], 0xaa, Size
);
822 for (unsigned I
= 0; I
!= Ptrs
.size(); ++I
)
823 Allocator
->deallocate(Ptrs
[I
], Origin
, Size
);
824 for (unsigned I
= 0; I
!= Ptrs
.size(); ++I
) {
825 Ptrs
[I
] = Allocator
->allocate(Size
- 8, Origin
);
826 memset(Ptrs
[I
], 0xbb, Size
- 8);
828 for (unsigned I
= 0; I
!= Ptrs
.size(); ++I
)
829 Allocator
->deallocate(Ptrs
[I
], Origin
, Size
- 8);
830 for (unsigned I
= 0; I
!= Ptrs
.size(); ++I
) {
831 Ptrs
[I
] = Allocator
->allocate(Size
, Origin
, 1U << MinAlignLog
, true);
832 for (scudo::uptr J
= 0; J
< Size
; ++J
)
833 ASSERT_EQ((reinterpret_cast<char *>(Ptrs
[I
]))[J
], '\0');
837 Allocator
->setOption(scudo::Option::ThreadDisableMemInit
, 0);
840 SCUDO_TYPED_TEST(ScudoCombinedTest
, ReallocateInPlaceStress
) {
841 auto *Allocator
= this->Allocator
.get();
843 // Regression test: make realloc-in-place happen at the very right end of a
845 constexpr size_t nPtrs
= 10000;
846 for (scudo::uptr i
= 1; i
< 32; ++i
) {
847 scudo::uptr Size
= 16 * i
- 1;
848 std::vector
<void *> Ptrs
;
849 for (size_t i
= 0; i
< nPtrs
; ++i
) {
850 void *P
= Allocator
->allocate(Size
, Origin
);
851 P
= Allocator
->reallocate(P
, Size
+ 1);
855 for (size_t i
= 0; i
< nPtrs
; ++i
)
856 Allocator
->deallocate(Ptrs
[i
], Origin
);
860 SCUDO_TYPED_TEST(ScudoCombinedTest
, RingBufferSize
) {
861 auto *Allocator
= this->Allocator
.get();
862 auto Size
= Allocator
->getRingBufferSize();
864 EXPECT_EQ(Allocator
->getRingBufferAddress()[Size
- 1], '\0');
867 SCUDO_TYPED_TEST(ScudoCombinedTest
, RingBufferAddress
) {
868 auto *Allocator
= this->Allocator
.get();
869 auto *Addr
= Allocator
->getRingBufferAddress();
870 EXPECT_NE(Addr
, nullptr);
871 EXPECT_EQ(Addr
, Allocator
->getRingBufferAddress());
874 #if SCUDO_CAN_USE_PRIMARY64
877 // TrustyConfig is designed for a domain-specific allocator. Add a basic test
878 // which covers only simple operations and ensure the configuration is able to
880 TEST(ScudoCombinedTest
, BasicTrustyConfig
) {
881 using AllocatorT
= scudo::Allocator
<scudo::TrustyConfig
>;
882 auto Allocator
= std::unique_ptr
<AllocatorT
>(new AllocatorT());
884 for (scudo::uptr ClassId
= 1U;
885 ClassId
<= scudo::TrustyConfig::SizeClassMap::LargestClassId
;
887 const scudo::uptr Size
=
888 scudo::TrustyConfig::SizeClassMap::getSizeByClassId(ClassId
);
889 void *p
= Allocator
->allocate(Size
- scudo::Chunk::getHeaderSize(), Origin
);
890 ASSERT_NE(p
, nullptr);
895 auto *TSD
= Allocator
->getTSDRegistry()->getTSDAndLock(&UnlockRequired
);
896 TSD
->assertLocked(/*BypassCheck=*/!UnlockRequired
);
897 TSD
->getCache().drain();
899 Allocator
->releaseToOS(scudo::ReleaseToOS::Force
);